This commit is contained in:
Karl Wright 2017-12-08 05:41:33 -05:00
commit dcb84701a4
300 changed files with 2896 additions and 745 deletions

View File

@ -49,6 +49,9 @@ Optimizations
* LUCENE-8040: Optimize IndexSearcher.collectionStatistics, avoiding MultiFields/MultiTerms
(David Smiley, Robert Muir)
* LUCENE-4100: Disjunctions now support faster collection of top hits when the
total hit count is not required. (Stefan Pohl, Adrien Grand, Robert Muir)
======================= Lucene 7.3.0 =======================
API Changes
@ -56,6 +59,13 @@ API Changes
* LUCENE-8051: LevensteinDistance renamed to LevenshteinDistance.
(Pulak Ghosh via Adrien Grand)
Improvements
* LUCENE-8081: Allow IndexWriter to opt out of flushing on indexing threads
Index/Update Threads try to help out flushing pending document buffers to
disk. This change adds an expert setting to opt ouf of this behavior unless
flusing is falling behind. (Simon Willnauer)
======================= Lucene 7.2.0 =======================
API Changes
@ -158,15 +168,6 @@ Optimizations
caching as they could break memory accounting of the query cache.
(Adrien Grand)
Tests
* LUCENE-8035: Run tests with JDK-specific options: --illegal-access=deny
on Java 9+. (Uwe Schindler)
======================= Lucene 7.1.1 =======================
Bug Fixes
* LUCENE-8055: MemoryIndex.MemoryDocValuesIterator returns 2 documents
instead of 1. (Simon Willnauer)
@ -174,11 +175,17 @@ Bug Fixes
documents. Once this happens, Lucene refuses to open the index and throws a
CorruptIndexException. (Simon Willnauer, Yonik Seeley, Mike McCandless)
Tests
* LUCENE-8035: Run tests with JDK-specific options: --illegal-access=deny
on Java 9+. (Uwe Schindler)
Build
* LUCENE-6144: Upgrade Ivy to 2.4.0; 'ant ivy-bootstrap' now removes old Ivy
jars in ~/.ant/lib/. (Shawn Heisey, Steve Rowe)
======================= Lucene 7.1.0 =======================
Changes in Runtime Behavior

View File

@ -34,6 +34,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.ScorerSupplier;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.util.DocIdSetBuilder;
import org.apache.lucene.util.StringHelper;
@ -261,7 +262,7 @@ abstract class RangeFieldQuery extends Query {
}
@Override
public final Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public final Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
private IntersectVisitor getIntersectVisitor(DocIdSetBuilder result) {

View File

@ -30,6 +30,7 @@ import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
@ -92,7 +93,7 @@ abstract class SortedNumericDocValuesRangeQuery extends Query {
abstract SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException;
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override

View File

@ -30,6 +30,7 @@ import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
@ -103,7 +104,7 @@ abstract class SortedSetDocValuesRangeQuery extends Query {
abstract SortedSetDocValues getValues(LeafReader reader, String field) throws IOException;
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {

View File

@ -392,7 +392,8 @@ final class DocumentsWriter implements Closeable, Accountable {
private boolean preUpdate() throws IOException, AbortingException {
ensureOpen();
boolean hasEvents = false;
if (flushControl.anyStalledThreads() || flushControl.numQueuedFlushes() > 0) {
if (flushControl.anyStalledThreads() || (flushControl.numQueuedFlushes() > 0 && config.checkPendingFlushOnUpdate)) {
// Help out flushing any queued DWPTs so we can un-stall:
do {
// Try pick up pending threads here if possible
@ -412,7 +413,7 @@ final class DocumentsWriter implements Closeable, Accountable {
hasEvents |= applyAllDeletes(deleteQueue);
if (flushingDWPT != null) {
hasEvents |= doFlush(flushingDWPT);
} else {
} else if (config.checkPendingFlushOnUpdate) {
final DocumentsWriterPerThread nextPendingFlush = flushControl.nextPendingFlush();
if (nextPendingFlush != null) {
hasEvents |= doFlush(nextPendingFlush);

View File

@ -32,6 +32,7 @@ import org.apache.lucene.index.DocValuesUpdate.NumericDocValuesUpdate;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.ByteArrayDataInput;
@ -684,7 +685,7 @@ class FrozenBufferedUpdates {
}
final IndexSearcher searcher = new IndexSearcher(readerContext.reader());
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
final Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES);
final Scorer scorer = weight.scorer(readerContext);
if (scorer != null) {
final DocIdSetIterator it = scorer.iterator();

View File

@ -480,4 +480,9 @@ public final class IndexWriterConfig extends LiveIndexWriterConfig {
return sb.toString();
}
@Override
public IndexWriterConfig setCheckPendingFlushUpdate(boolean checkPendingFlushOnUpdate) {
return (IndexWriterConfig) super.setCheckPendingFlushUpdate(checkPendingFlushOnUpdate);
}
}

View File

@ -103,6 +103,9 @@ public class LiveIndexWriterConfig {
/** The field names involved in the index sort */
protected Set<String> indexSortFields = Collections.emptySet();
/** if an indexing thread should check for pending flushes on update in order to help out on a full flush*/
protected volatile boolean checkPendingFlushOnUpdate = true;
// used by IndexWriterConfig
LiveIndexWriterConfig(Analyzer analyzer) {
this.analyzer = analyzer;
@ -426,6 +429,29 @@ public class LiveIndexWriterConfig {
return indexSortFields;
}
/**
* Expert: Returns if indexing threads check for pending flushes on update in order
* to help our flushing indexing buffers to disk
* @lucene.experimental
*/
public boolean isCheckPendingFlushOnUpdate() {
return checkPendingFlushOnUpdate;
}
/**
* Expert: sets if indexing threads check for pending flushes on update in order
* to help our flushing indexing buffers to disk. As a consequence, threads calling
* {@link DirectoryReader#openIfChanged(DirectoryReader, IndexWriter)} or {@link IndexWriter#flush()} will
* be the only thread writing segments to disk unless flushes are falling behind. If indexing is stalled
* due to too many pending flushes indexing threads will help our writing pending segment flushes to disk.
*
* @lucene.experimental
*/
public LiveIndexWriterConfig setCheckPendingFlushUpdate(boolean checkPendingFlushOnUpdate) {
this.checkPendingFlushOnUpdate = checkPendingFlushOnUpdate;
return this;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
@ -448,6 +474,7 @@ public class LiveIndexWriterConfig {
sb.append("useCompoundFile=").append(getUseCompoundFile()).append("\n");
sb.append("commitOnClose=").append(getCommitOnClose()).append("\n");
sb.append("indexSort=").append(getIndexSort()).append("\n");
sb.append("checkPendingFlushOnUpdate=").append(isCheckPendingFlushOnUpdate()).append("\n");
return sb.toString();
}
}

View File

@ -464,6 +464,11 @@ final class Sorter {
public float score() throws IOException {
return score;
}
@Override
public float maxScore() {
return Float.POSITIVE_INFINITY;
}
};
}

View File

@ -31,20 +31,20 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
private final BooleanWeight weight;
private final Map<BooleanClause.Occur, Collection<ScorerSupplier>> subs;
private final boolean needsScores;
private final ScoreMode scoreMode;
private final int minShouldMatch;
private long cost = -1;
Boolean2ScorerSupplier(BooleanWeight weight,
Map<Occur, Collection<ScorerSupplier>> subs,
boolean needsScores, int minShouldMatch) {
ScoreMode scoreMode, int minShouldMatch) {
if (minShouldMatch < 0) {
throw new IllegalArgumentException("minShouldMatch must be positive, but got: " + minShouldMatch);
}
if (minShouldMatch != 0 && minShouldMatch >= subs.get(Occur.SHOULD).size()) {
throw new IllegalArgumentException("minShouldMatch must be strictly less than the number of SHOULD clauses");
}
if (needsScores == false && minShouldMatch == 0 && subs.get(Occur.SHOULD).size() > 0
if (scoreMode.needsScores() == false && minShouldMatch == 0 && subs.get(Occur.SHOULD).size() > 0
&& subs.get(Occur.MUST).size() + subs.get(Occur.FILTER).size() > 0) {
throw new IllegalArgumentException("Cannot pass purely optional clauses if scores are not needed");
}
@ -53,7 +53,7 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
}
this.weight = weight;
this.subs = subs;
this.needsScores = needsScores;
this.scoreMode = scoreMode;
this.minShouldMatch = minShouldMatch;
}
@ -94,7 +94,7 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
// pure disjunction
if (subs.get(Occur.FILTER).isEmpty() && subs.get(Occur.MUST).isEmpty()) {
return excl(opt(subs.get(Occur.SHOULD), minShouldMatch, needsScores, leadCost), subs.get(Occur.MUST_NOT), leadCost);
return excl(opt(subs.get(Occur.SHOULD), minShouldMatch, scoreMode, leadCost), subs.get(Occur.MUST_NOT), leadCost);
}
// conjunction-disjunction mix:
@ -104,13 +104,13 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
if (minShouldMatch > 0) {
Scorer req = excl(req(subs.get(Occur.FILTER), subs.get(Occur.MUST), leadCost), subs.get(Occur.MUST_NOT), leadCost);
Scorer opt = opt(subs.get(Occur.SHOULD), minShouldMatch, needsScores, leadCost);
Scorer opt = opt(subs.get(Occur.SHOULD), minShouldMatch, scoreMode, leadCost);
return new ConjunctionScorer(weight, Arrays.asList(req, opt), Arrays.asList(req, opt));
} else {
assert needsScores;
assert scoreMode.needsScores();
return new ReqOptSumScorer(
excl(req(subs.get(Occur.FILTER), subs.get(Occur.MUST), leadCost), subs.get(Occur.MUST_NOT), leadCost),
opt(subs.get(Occur.SHOULD), minShouldMatch, needsScores, leadCost));
opt(subs.get(Occur.SHOULD), minShouldMatch, scoreMode, leadCost));
}
}
@ -121,7 +121,7 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
if (requiredNoScoring.size() + requiredScoring.size() == 1) {
Scorer req = (requiredNoScoring.isEmpty() ? requiredScoring : requiredNoScoring).iterator().next().get(leadCost);
if (needsScores == false) {
if (scoreMode.needsScores() == false) {
return req;
}
@ -134,6 +134,10 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
public float score() throws IOException {
return 0f;
}
@Override
public float maxScore() {
return 0f;
}
};
}
@ -157,12 +161,12 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
if (prohibited.isEmpty()) {
return main;
} else {
return new ReqExclScorer(main, opt(prohibited, 1, false, leadCost));
return new ReqExclScorer(main, opt(prohibited, 1, ScoreMode.COMPLETE_NO_SCORES, leadCost));
}
}
private Scorer opt(Collection<ScorerSupplier> optional, int minShouldMatch,
boolean needsScores, long leadCost) throws IOException {
ScoreMode scoreMode, long leadCost) throws IOException {
if (optional.size() == 1) {
return optional.iterator().next().get(leadCost);
} else {
@ -172,8 +176,10 @@ final class Boolean2ScorerSupplier extends ScorerSupplier {
}
if (minShouldMatch > 1) {
return new MinShouldMatchSumScorer(weight, optionalScorers, minShouldMatch);
} else if (scoreMode == ScoreMode.TOP_SCORES) {
return new WANDScorer(weight, optionalScorers);
} else {
return new DisjunctionSumScorer(weight, optionalScorers, needsScores);
return new DisjunctionSumScorer(weight, optionalScorers, scoreMode.needsScores());
}
}
}

View File

@ -196,12 +196,12 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
BooleanQuery query = this;
if (needsScores == false) {
if (scoreMode.needsScores() == false) {
query = rewriteNoScoring();
}
return new BooleanWeight(query, searcher, needsScores, boost);
return new BooleanWeight(query, searcher, scoreMode, boost);
}
@Override

View File

@ -42,16 +42,16 @@ final class BooleanWeight extends Weight {
final BooleanQuery query;
final ArrayList<Weight> weights;
final boolean needsScores;
final ScoreMode scoreMode;
BooleanWeight(BooleanQuery query, IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
BooleanWeight(BooleanQuery query, IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
super(query);
this.query = query;
this.needsScores = needsScores;
this.similarity = searcher.getSimilarity(needsScores);
this.scoreMode = scoreMode;
this.similarity = searcher.getSimilarity(scoreMode.needsScores());
weights = new ArrayList<>();
for (BooleanClause c : query) {
Weight w = searcher.createWeight(c.getQuery(), needsScores && c.isScoring(), boost);
Weight w = searcher.createWeight(c.getQuery(), c.isScoring() ? scoreMode : ScoreMode.COMPLETE_NO_SCORES, boost);
weights.add(w);
}
}
@ -60,7 +60,7 @@ final class BooleanWeight extends Weight {
public void extractTerms(Set<Term> terms) {
int i = 0;
for (BooleanClause clause : query) {
if (clause.isScoring() || (needsScores == false && clause.isProhibited() == false)) {
if (clause.isScoring() || (scoreMode.needsScores() == false && clause.isProhibited() == false)) {
weights.get(i).extractTerms(terms);
}
i++;
@ -178,7 +178,7 @@ final class BooleanWeight extends Weight {
return optional.get(0);
}
return new BooleanScorer(this, optional, Math.max(1, query.getMinimumNumberShouldMatch()), needsScores);
return new BooleanScorer(this, optional, Math.max(1, query.getMinimumNumberShouldMatch()), scoreMode.needsScores());
}
// Return a BulkScorer for the required clauses only,
@ -201,7 +201,7 @@ final class BooleanWeight extends Weight {
// no matches
return null;
}
if (c.isScoring() == false && needsScores) {
if (c.isScoring() == false && scoreMode.needsScores()) {
scorer = disableScoring(scorer);
}
}
@ -285,6 +285,11 @@ final class BooleanWeight extends Weight {
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
if (scoreMode == ScoreMode.TOP_SCORES) {
// If only the top docs are requested, use the default bulk scorer
// so that we can dynamically prune non-competitive hits.
return super.bulkScorer(context);
}
final BulkScorer bulkScorer = booleanScorer(context);
if (bulkScorer != null) {
// bulk scoring is applicable, use it
@ -361,11 +366,11 @@ final class BooleanWeight extends Weight {
}
// we don't need scores, so if we have required clauses, drop optional clauses completely
if (!needsScores && minShouldMatch == 0 && scorers.get(Occur.MUST).size() + scorers.get(Occur.FILTER).size() > 0) {
if (scoreMode.needsScores() == false && minShouldMatch == 0 && scorers.get(Occur.MUST).size() + scorers.get(Occur.FILTER).size() > 0) {
scorers.get(Occur.SHOULD).clear();
}
return new Boolean2ScorerSupplier(this, scorers, needsScores, minShouldMatch);
return new Boolean2ScorerSupplier(this, scorers, scoreMode, minShouldMatch);
}
}

View File

@ -113,8 +113,8 @@ public final class BoostQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return query.createWeight(searcher, needsScores, BoostQuery.this.boost * boost);
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return query.createWeight(searcher, scoreMode, BoostQuery.this.boost * boost);
}
}

View File

@ -67,6 +67,11 @@ public abstract class CachingCollector extends FilterCollector {
@Override
public final float score() { return score; }
@Override
public float maxScore() {
return Float.POSITIVE_INFINITY;
}
@Override
public int docID() {
return doc;
@ -175,8 +180,8 @@ public abstract class CachingCollector extends FilterCollector {
/** Ensure the scores are collected so they can be replayed, even if the wrapped collector doesn't need them. */
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
@Override
@ -301,8 +306,8 @@ public abstract class CachingCollector extends FilterCollector {
public void collect(int doc) {}
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
};

View File

@ -74,9 +74,7 @@ public interface Collector {
LeafCollector getLeafCollector(LeafReaderContext context) throws IOException;
/**
* Indicates if document scores are needed by this collector.
*
* @return {@code true} if scores are needed.
* Indicates what features are required from the scorer.
*/
boolean needsScores();
ScoreMode scoreMode();
}

View File

@ -61,6 +61,26 @@ class ConjunctionScorer extends Scorer {
return (float) sum;
}
@Override
public float maxScore() {
// We iterate in the same order as #score() so no need to worry
// about floating-point errors: we would do the same errors in
// #score()
double sum = 0d;
for (Scorer scorer : scorers) {
sum += scorer.maxScore();
}
return (float) sum;
}
@Override
public void setMinCompetitiveScore(float score) {
if (scorers.length == 1) {
scorers[0].setMinCompetitiveScore(score);
}
// TODO: handle the case when there are multiple scoring clauses too
}
@Override
public Collection<ChildScorer> getChildren() {
ArrayList<ChildScorer> children = new ArrayList<>();

View File

@ -94,6 +94,10 @@ public final class ConstantScoreQuery extends Query {
public float score() throws IOException {
return theScore;
}
@Override
public float maxScore() {
return theScore;
}
});
}
};
@ -106,9 +110,9 @@ public final class ConstantScoreQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final Weight innerWeight = searcher.createWeight(query, false, 1f);
if (needsScores) {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
final Weight innerWeight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1f);
if (scoreMode.needsScores()) {
return new ConstantScoreWeight(this, boost) {
@Override
@ -137,6 +141,10 @@ public final class ConstantScoreQuery extends Query {
return score;
}
@Override
public float maxScore() {
return score;
}
@Override
public Collection<ChildScorer> getChildren() {
return Collections.singleton(new ChildScorer(innerScorer, "constant"));
}

View File

@ -53,6 +53,11 @@ public final class ConstantScoreScorer extends Scorer {
this.disi = TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator);
}
@Override
public float maxScore() {
return score;
}
@Override
public DocIdSetIterator iterator() {
return disi;

View File

@ -38,6 +38,9 @@ public class DisiWrapper {
// two-phase iteration
public final TwoPhaseIterator twoPhaseView;
// For MaxScoreScorer
long maxScore;
// FOR SPANS
public final Spans spans;
public int lastApproxMatchDoc; // last doc of approximation that did match

View File

@ -97,15 +97,15 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
/** The Weights for our subqueries, in 1-1 correspondence with disjuncts */
protected final ArrayList<Weight> weights = new ArrayList<>(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts
private final boolean needsScores;
private final ScoreMode scoreMode;
/** Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */
public DisjunctionMaxWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public DisjunctionMaxWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
super(DisjunctionMaxQuery.this);
for (Query disjunctQuery : disjuncts) {
weights.add(searcher.createWeight(disjunctQuery, needsScores, boost));
weights.add(searcher.createWeight(disjunctQuery, scoreMode, boost));
}
this.needsScores = needsScores;
this.scoreMode = scoreMode;
}
@Override
@ -133,7 +133,7 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
// only one sub-scorer in this segment
return scorers.get(0);
} else {
return new DisjunctionMaxScorer(this, tieBreakerMultiplier, scorers, needsScores);
return new DisjunctionMaxScorer(this, tieBreakerMultiplier, scorers, scoreMode.needsScores());
}
}
@ -181,8 +181,8 @@ public final class DisjunctionMaxQuery extends Query implements Iterable<Query>
/** Create the Weight used to score us */
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new DisjunctionMaxWeight(searcher, needsScores, boost);
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new DisjunctionMaxWeight(searcher, scoreMode, boost);
}
/** Optimize our representation and our subqueries representations

View File

@ -58,4 +58,10 @@ final class DisjunctionMaxScorer extends DisjunctionScorer {
}
return (float) (scoreMax + (scoreSum - scoreMax) * tieBreakerMultiplier);
}
@Override
public float maxScore() {
// TODO: implement but be careful about floating-point errors.
return Float.POSITIVE_INFINITY;
}
}

View File

@ -40,4 +40,11 @@ final class DisjunctionSumScorer extends DisjunctionScorer {
}
return (float)score;
}
@Override
public float maxScore() {
// TODO: implement it but be careful with floating-point errors
return Float.POSITIVE_INFINITY;
}
}

View File

@ -62,7 +62,7 @@ public final class DocValuesFieldExistsQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {

View File

@ -72,7 +72,7 @@ public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
public final String getField() { return query.getField(); }
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {

View File

@ -81,7 +81,7 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
* IndexReader-independent implementations can just return {@code this}
*
* Queries that use DoubleValuesSource objects should call rewrite() during
* {@link Query#createWeight(IndexSearcher, boolean, float)} rather than during
* {@link Query#createWeight(IndexSearcher, ScoreMode, float)} rather than during
* {@link Query#rewrite(IndexReader)} to avoid IndexReader reference leakage
*/
public abstract DoubleValuesSource rewrite(IndexSearcher reader) throws IOException;
@ -553,7 +553,7 @@ public abstract class DoubleValuesSource implements SegmentCacheable {
@Override
public DoubleValuesSource rewrite(IndexSearcher searcher) throws IOException {
return new WeightDoubleValuesSource(searcher.rewrite(query).createWeight(searcher, true, 1f));
return new WeightDoubleValuesSource(searcher.rewrite(query).createWeight(searcher, ScoreMode.COMPLETE, 1f));
}
@Override

View File

@ -104,6 +104,11 @@ final class ExactPhraseScorer extends Scorer {
return docScorer.score(docID(), freq);
}
@Override
public float maxScore() {
return docScorer.maxScore(Integer.MAX_VALUE);
}
/** Advance the given pos enum to the first doc on or after {@code target}.
* Return {@code false} if the enum was exhausted before reaching
* {@code target} and {@code true} otherwise. */

View File

@ -39,6 +39,11 @@ final class FakeScorer extends Scorer {
return score;
}
@Override
public float maxScore() {
return Float.POSITIVE_INFINITY;
}
@Override
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException();

View File

@ -46,7 +46,7 @@ public abstract class FilterCollector implements Collector {
}
@Override
public boolean needsScores() {
return in.needsScores();
public ScoreMode scoreMode() {
return in.scoreMode();
}
}

View File

@ -59,6 +59,9 @@ public abstract class FilterScorer extends Scorer {
return in.score();
}
// Leave maxScore abstract on purpose since the goal of this Filter class is
// to change the way the score is computed.
@Override
public final int docID() {
return in.docID();

View File

@ -47,7 +47,7 @@ public abstract class FilterWeight extends Weight {
/**
* Alternative constructor.
* Use this variant only if the <code>weight</code> was not obtained
* via the {@link Query#createWeight(IndexSearcher, boolean, float)}
* via the {@link Query#createWeight(IndexSearcher, ScoreMode, float)}
* method of the <code>query</code> object.
*/
protected FilterWeight(Query query, Weight weight) {

View File

@ -110,9 +110,9 @@ public final class IndexOrDocValuesQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final Weight indexWeight = indexQuery.createWeight(searcher, needsScores, boost);
final Weight dvWeight = dvQuery.createWeight(searcher, needsScores, boost);
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
final Weight indexWeight = indexQuery.createWeight(searcher, scoreMode, boost);
final Weight dvWeight = dvQuery.createWeight(searcher, scoreMode, boost);
return new Weight(this) {
@Override
public void extractTerms(Set<Term> terms) {

View File

@ -96,6 +96,10 @@ public class IndexSearcher {
public float score(int doc, float freq) {
return 0f;
}
@Override
public float maxScore(float maxFreq) {
return 0f;
}
};
}
@ -407,7 +411,7 @@ public class IndexSearcher {
@Override
public TopScoreDocCollector newCollector() throws IOException {
return TopScoreDocCollector.create(cappedNumHits, after);
return TopScoreDocCollector.create(cappedNumHits, after, true);
}
@Override
@ -445,7 +449,7 @@ public class IndexSearcher {
*/
public void search(Query query, Collector results)
throws IOException {
search(leafContexts, createNormalizedWeight(query, results.needsScores()), results);
search(leafContexts, createNormalizedWeight(query, results.scoreMode()), results);
}
/** Search implementation with arbitrary sorting, plus
@ -570,14 +574,22 @@ public class IndexSearcher {
return collectorManager.reduce(Collections.singletonList(collector));
} else {
final List<C> collectors = new ArrayList<>(leafSlices.length);
boolean needsScores = false;
ScoreMode scoreMode = null;
for (int i = 0; i < leafSlices.length; ++i) {
final C collector = collectorManager.newCollector();
collectors.add(collector);
needsScores |= collector.needsScores();
if (scoreMode == null) {
scoreMode = collector.scoreMode();
} else if (scoreMode != collector.scoreMode()) {
throw new IllegalStateException("CollectorManager does not always produce collectors with the same score mode");
}
}
if (scoreMode == null) {
// no segments
scoreMode = ScoreMode.COMPLETE;
}
final Weight weight = createNormalizedWeight(query, needsScores);
final Weight weight = createNormalizedWeight(query, scoreMode);
final List<Future<C>> topDocsFutures = new ArrayList<>(leafSlices.length);
for (int i = 0; i < leafSlices.length; ++i) {
final LeafReaderContext[] leaves = leafSlices[i].leaves;
@ -674,7 +686,7 @@ public class IndexSearcher {
* entire index.
*/
public Explanation explain(Query query, int doc) throws IOException {
return explain(createNormalizedWeight(query, true), doc);
return explain(createNormalizedWeight(query, ScoreMode.COMPLETE), doc);
}
/** Expert: low-level implementation method
@ -707,9 +719,9 @@ public class IndexSearcher {
* can then directly be used to get a {@link Scorer}.
* @lucene.internal
*/
public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException {
public Weight createNormalizedWeight(Query query, ScoreMode scoreMode) throws IOException {
query = rewrite(query);
return createWeight(query, needsScores, 1f);
return createWeight(query, scoreMode, 1f);
}
/**
@ -717,10 +729,10 @@ public class IndexSearcher {
* if possible and configured.
* @lucene.experimental
*/
public Weight createWeight(Query query, boolean needsScores, float boost) throws IOException {
public Weight createWeight(Query query, ScoreMode scoreMode, float boost) throws IOException {
final QueryCache queryCache = this.queryCache;
Weight weight = query.createWeight(this, needsScores, boost);
if (needsScores == false && queryCache != null) {
Weight weight = query.createWeight(this, scoreMode, boost);
if (scoreMode.needsScores() == false && queryCache != null) {
weight = queryCache.doCache(weight, queryCachingPolicy);
}
return weight;

View File

@ -29,7 +29,7 @@ import org.apache.lucene.util.Bits;
public final class MatchAllDocsQuery extends Query {
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) {
return new ConstantScoreWeight(this, boost) {
@Override
public String toString() {

View File

@ -42,7 +42,7 @@ public class MatchNoDocsQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new Weight(this) {
@Override
public void extractTerms(Set<Term> terms) {

View File

@ -325,6 +325,12 @@ final class MinShouldMatchSumScorer extends Scorer {
return (float) score;
}
@Override
public float maxScore() {
// TODO: implement but be careful about floating-point errors.
return Float.POSITIVE_INFINITY;
}
@Override
public int docID() {
assert doc == lead.doc;

View File

@ -95,7 +95,7 @@ public class MultiCollector implements Collector {
this.collectors = collectors;
int numNeedsScores = 0;
for (Collector collector : collectors) {
if (collector.needsScores()) {
if (collector.scoreMode().needsScores()) {
numNeedsScores += 1;
}
}
@ -103,13 +103,16 @@ public class MultiCollector implements Collector {
}
@Override
public boolean needsScores() {
public ScoreMode scoreMode() {
ScoreMode scoreMode = null;
for (Collector collector : collectors) {
if (collector.needsScores()) {
return true;
if (scoreMode == null) {
scoreMode = collector.scoreMode();
} else if (scoreMode != collector.scoreMode()) {
return ScoreMode.COMPLETE;
}
}
return false;
return scoreMode;
}
@Override

View File

@ -34,6 +34,9 @@ public class MultiCollectorManager implements CollectorManager<MultiCollectorMan
@SafeVarargs
@SuppressWarnings({"varargs", "unchecked"})
public MultiCollectorManager(final CollectorManager<? extends Collector, ?>... collectorManagers) {
if (collectorManagers.length < 1) {
throw new IllegalArgumentException("There must be at least one collector");
}
this.collectorManagers = (CollectorManager[]) collectorManagers;
}
@ -71,11 +74,16 @@ public class MultiCollectorManager implements CollectorManager<MultiCollectorMan
}
@Override
final public boolean needsScores() {
for (Collector collector : collectors)
if (collector.needsScores())
return true;
return false;
final public ScoreMode scoreMode() {
ScoreMode scoreMode = null;
for (Collector collector : collectors) {
if (scoreMode == null) {
scoreMode = collector.scoreMode();
} else if (scoreMode != collector.scoreMode()) {
return ScoreMode.COMPLETE;
}
}
return scoreMode;
}
public class LeafCollectors implements LeafCollector {

View File

@ -334,8 +334,8 @@ public class MultiPhraseQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new MultiPhraseWeight(searcher, needsScores, boost);
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new MultiPhraseWeight(searcher, scoreMode.needsScores(), boost);
}
/** Prints a user-readable version of this query. */

View File

@ -108,7 +108,7 @@ final class MultiTermQueryConstantScoreWrapper<Q extends MultiTermQuery> extends
public final String getField() { return query.getField(); }
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
/** Try to collect terms from the given terms enum and return true iff all
@ -153,7 +153,7 @@ final class MultiTermQueryConstantScoreWrapper<Q extends MultiTermQuery> extends
bq.add(new TermQuery(new Term(query.field, t.term), termContext), Occur.SHOULD);
}
Query q = new ConstantScoreQuery(bq.build());
final Weight weight = searcher.rewrite(q).createWeight(searcher, needsScores, score());
final Weight weight = searcher.rewrite(q).createWeight(searcher, scoreMode, score());
return new WeightOrDocIdSet(weight);
}

View File

@ -62,7 +62,7 @@ public final class NormsFieldExistsQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {

View File

@ -509,8 +509,8 @@ public class PhraseQuery extends Query {
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new PhraseWeight(searcher, needsScores, boost);
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new PhraseWeight(searcher, scoreMode.needsScores(), boost);
}
/** Prints a user-readable version of this query. */

View File

@ -106,7 +106,7 @@ public abstract class PointInSetQuery extends Query {
}
@Override
public final Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public final Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
// We don't use RandomAccessWeight here: it's no good to approximate with "match all docs".
// This is an inverted structure and should be used in the first pass:

View File

@ -99,7 +99,7 @@ public abstract class PointRangeQuery extends Query {
}
@Override
public final Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public final Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
// We don't use RandomAccessWeight here: it's no good to approximate with "match all docs".
// This is an inverted structure and should be used in the first pass:

View File

@ -59,10 +59,10 @@ public abstract class Query {
* <p>
* Only implemented by primitive queries, which re-write to themselves.
*
* @param needsScores True if document scores ({@link Scorer#score}) are needed.
* @param scoreMode How the produced scorers will be consumed.
* @param boost The boost that is propagated by the parent queries.
*/
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
throw new UnsupportedOperationException("Query " + this + " does not implement createWeight");
}

View File

@ -29,7 +29,7 @@ public interface QueryCache {
* Return a wrapper around the provided <code>weight</code> that will cache
* matching docs per-segment accordingly to the given <code>policy</code>.
* NOTE: The returned weight will only be equivalent if scores are not needed.
* @see Collector#needsScores()
* @see Collector#scoreMode()
*/
Weight doCache(Weight weight, QueryCachingPolicy policy);

View File

@ -60,7 +60,7 @@ public abstract class QueryRescorer extends Rescorer {
List<LeafReaderContext> leaves = searcher.getIndexReader().leaves();
Weight weight = searcher.createNormalizedWeight(query, true);
Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
// Now merge sort docIDs from hits, with reader's leaves:
int hitUpto = 0;

View File

@ -76,6 +76,17 @@ class ReqExclScorer extends Scorer {
return reqScorer.score(); // reqScorer may be null when next() or skipTo() already return false
}
@Override
public float maxScore() {
return reqScorer.maxScore();
}
@Override
public void setMinCompetitiveScore(float score) {
// The score of this scorer is the same as the score of 'reqScorer'.
reqScorer.setMinCompetitiveScore(score);
}
@Override
public Collection<ChildScorer> getChildren() {
return Collections.singleton(new ChildScorer(reqScorer, "MUST"));

View File

@ -85,6 +85,11 @@ class ReqOptSumScorer extends Scorer {
return score;
}
@Override
public float maxScore() {
return reqScorer.maxScore() + optScorer.maxScore();
}
@Override
public Collection<ChildScorer> getChildren() {
ArrayList<ChildScorer> children = new ArrayList<>(2);

View File

@ -32,7 +32,7 @@ import java.util.Collections;
* several places, however all they have in hand is a {@link Scorer} object, and
* might end up computing the score of a document more than once.
*/
public class ScoreCachingWrappingScorer extends FilterScorer {
public final class ScoreCachingWrappingScorer extends FilterScorer {
private int curDoc = -1;
private float curScore;
@ -53,6 +53,16 @@ public class ScoreCachingWrappingScorer extends FilterScorer {
return curScore;
}
@Override
public float maxScore() {
return in.maxScore();
}
@Override
public void setMinCompetitiveScore(float minScore) {
in.setMinCompetitiveScore(minScore);
}
@Override
public Collection<ChildScorer> getChildren() {
return Collections.singleton(new ChildScorer(in, "CACHED"));

View File

@ -0,0 +1,60 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
/**
* Different modes of search.
*/
public enum ScoreMode {
/**
* Produced scorers will allow visiting all matches and get their score.
*/
COMPLETE {
@Override
public boolean needsScores() {
return true;
}
},
/**
* Produced scorers will allow visiting all matches but scores won't be
* available.
*/
COMPLETE_NO_SCORES {
@Override
public boolean needsScores() {
return false;
}
},
/**
* Produced scorers will optionally allow skipping over non-competitive
* hits using the {@link Scorer#setMinCompetitiveScore(float)} API.
*/
TOP_SCORES {
@Override
public boolean needsScores() {
return true;
}
};
/**
* Whether this {@link ScoreMode} needs to compute scores.
*/
public abstract boolean needsScores();
}

View File

@ -143,4 +143,21 @@ public abstract class Scorer {
public TwoPhaseIterator twoPhaseIterator() {
return null;
}
/**
* Optional method: Tell the scorer that its iterator may safely ignore all
* documents whose score is less than the given {@code minScore}. This is a
* no-op by default.
*
* This method may only be called from collectors that use
* {@link ScoreMode#TOP_SCORES}, and successive calls may only set increasing
* values of {@code minScore}.
*/
public void setMinCompetitiveScore(float minScore) {
// no-op by default
}
/** Return the maximum score that this scorer may produce. If scores are not
* bounded, {@link Float#POSITIVE_INFINITY} must be returned. */
public abstract float maxScore();
}

View File

@ -556,6 +556,11 @@ final class SloppyPhraseScorer extends Scorer {
return docScorer.score(docID(), sloppyFreq);
}
@Override
public float maxScore() {
return docScorer.maxScore(Float.POSITIVE_INFINITY);
}
@Override
public String toString() { return "scorer(" + weight + ")"; }

View File

@ -25,6 +25,8 @@ import java.util.List;
import java.util.Objects;
import java.util.Set;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
@ -111,8 +113,8 @@ public final class SynonymQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
if (needsScores) {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
if (scoreMode.needsScores()) {
return new SynonymWeight(this, searcher, boost);
} else {
// if scores are not needed, let BooleanWeight deal with optimizing that case.
@ -120,7 +122,7 @@ public final class SynonymQuery extends Query {
for (Term term : terms) {
bq.add(new TermQuery(term), BooleanClause.Occur.SHOULD);
}
return searcher.rewrite(bq.build()).createWeight(searcher, needsScores, boost);
return searcher.rewrite(bq.build()).createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
}
}
@ -189,19 +191,32 @@ public final class SynonymQuery extends Query {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
Similarity.SimScorer simScorer = null;
IndexOptions indexOptions = IndexOptions.NONE;
if (terms.length > 0) {
FieldInfo info = context.reader()
.getFieldInfos()
.fieldInfo(terms[0].field());
if (info != null) {
indexOptions = info.getIndexOptions();
}
}
// we use termscorers + disjunction as an impl detail
List<Scorer> subScorers = new ArrayList<>();
long maxFreq = 0;
for (int i = 0; i < terms.length; i++) {
TermState state = termContexts[i].get(context.ord);
if (state != null) {
TermsEnum termsEnum = context.reader().terms(terms[i].field()).iterator();
termsEnum.seekExact(terms[i].bytes(), state);
maxFreq += getMaxFreq(indexOptions, termsEnum.totalTermFreq(), termsEnum.docFreq());
PostingsEnum postings = termsEnum.postings(null, PostingsEnum.FREQS);
// lazy init sim, in case no terms exist
if (simScorer == null) {
simScorer = similarity.simScorer(simWeight, context);
}
subScorers.add(new TermScorer(this, postings, simScorer));
subScorers.add(new TermScorer(this, postings, simScorer, Float.POSITIVE_INFINITY));
}
}
if (subScorers.isEmpty()) {
@ -210,7 +225,7 @@ public final class SynonymQuery extends Query {
// we must optimize this case (term not in segment), disjunctionscorer requires >= 2 subs
return subScorers.get(0);
} else {
return new SynonymScorer(simScorer, this, subScorers);
return new SynonymScorer(simScorer, this, subScorers, maxFreq);
}
}
@ -221,12 +236,25 @@ public final class SynonymQuery extends Query {
}
private long getMaxFreq(IndexOptions indexOptions, long ttf, long df) {
// TODO: store the max term freq?
if (indexOptions.compareTo(IndexOptions.DOCS) <= 0) {
// omitTFAP field, tf values are implicitly 1.
return 1;
} else {
assert ttf >= 0;
return Math.min(Integer.MAX_VALUE, ttf - df + 1);
}
}
static class SynonymScorer extends DisjunctionScorer {
private final Similarity.SimScorer similarity;
private final float maxFreq;
SynonymScorer(Similarity.SimScorer similarity, Weight weight, List<Scorer> subScorers) {
SynonymScorer(Similarity.SimScorer similarity, Weight weight, List<Scorer> subScorers, float maxFreq) {
super(weight, subScorers, true);
this.similarity = similarity;
this.maxFreq = maxFreq;
}
@Override
@ -234,6 +262,11 @@ public final class SynonymQuery extends Query {
return similarity.score(topList.doc, tf(topList));
}
@Override
public float maxScore() {
return similarity.maxScore(maxFreq);
}
/** combines TF of all subs. */
final int tf(DisiWrapper topList) throws IOException {
int tf = 0;

View File

@ -205,7 +205,7 @@ public class TermInSetQuery extends Query implements Accountable {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
@ -269,7 +269,7 @@ public class TermInSetQuery extends Query implements Accountable {
bq.add(new TermQuery(new Term(t.field, t.term), termContext), Occur.SHOULD);
}
Query q = new ConstantScoreQuery(bq.build());
final Weight weight = searcher.rewrite(q).createWeight(searcher, needsScores, score());
final Weight weight = searcher.rewrite(q).createWeight(searcher, scoreMode, score());
return new WeightOrDocIdSet(weight);
} else {
assert builder != null;

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.Objects;
import java.util.Set;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
@ -94,9 +95,25 @@ public class TermQuery extends Query {
if (termsEnum == null) {
return null;
}
IndexOptions indexOptions = context.reader()
.getFieldInfos()
.fieldInfo(getTerm().field())
.getIndexOptions();
PostingsEnum docs = termsEnum.postings(null, needsScores ? PostingsEnum.FREQS : PostingsEnum.NONE);
assert docs != null;
return new TermScorer(this, docs, similarity.simScorer(stats, context));
return new TermScorer(this, docs, similarity.simScorer(stats, context),
getMaxFreq(indexOptions, termsEnum.totalTermFreq(), termsEnum.docFreq()));
}
private long getMaxFreq(IndexOptions indexOptions, long ttf, long df) {
// TODO: store the max term freq?
if (indexOptions.compareTo(IndexOptions.DOCS) <= 0) {
// omitTFAP field, tf values are implicitly 1.
return 1;
} else {
assert ttf >= 0;
return Math.min(Integer.MAX_VALUE, ttf - df + 1);
}
}
@Override
@ -185,12 +202,12 @@ public class TermQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
final IndexReaderContext context = searcher.getTopReaderContext();
final TermContext termState;
if (perReaderTermState == null
|| perReaderTermState.wasBuiltFor(context) == false) {
if (needsScores) {
if (scoreMode.needsScores()) {
// make TermQuery single-pass if we don't have a PRTS or if the context
// differs!
termState = TermContext.build(context, term);
@ -204,7 +221,7 @@ public class TermQuery extends Query {
termState = this.perReaderTermState;
}
return new TermWeight(searcher, needsScores, boost, termState);
return new TermWeight(searcher, scoreMode.needsScores(), boost, termState);
}
/** Prints a user-readable version of this query. */

View File

@ -27,6 +27,7 @@ import org.apache.lucene.search.similarities.Similarity;
final class TermScorer extends Scorer {
private final PostingsEnum postingsEnum;
private final Similarity.SimScorer docScorer;
private final float maxFreq;
/**
* Construct a <code>TermScorer</code>.
@ -38,11 +39,14 @@ final class TermScorer extends Scorer {
* @param docScorer
* The <code>Similarity.SimScorer</code> implementation
* to be used for score computations.
* @param maxFreq
* An upper bound of the term frequency of the searched term in any document.
*/
TermScorer(Weight weight, PostingsEnum td, Similarity.SimScorer docScorer) {
TermScorer(Weight weight, PostingsEnum td, Similarity.SimScorer docScorer, float maxFreq) {
super(weight);
this.docScorer = docScorer;
this.postingsEnum = td;
this.maxFreq = maxFreq;
}
@Override
@ -65,6 +69,11 @@ final class TermScorer extends Scorer {
return docScorer.score(postingsEnum.docID(), postingsEnum.freq());
}
@Override
public float maxScore() {
return docScorer.maxScore(maxFreq);
}
/** Returns a string representation of this <code>TermScorer</code>. */
@Override
public String toString() { return "scorer(" + weight + ")[" + super.toString() + "]"; }

View File

@ -162,8 +162,8 @@ public class TimeLimitingCollector implements Collector {
}
@Override
public boolean needsScores() {
return collector.needsScores();
public ScoreMode scoreMode() {
return collector.scoreMode();
}
/**

View File

@ -43,7 +43,7 @@ public abstract class TopDocsCollector<T extends ScoreDoc> implements Collector
* HitQueue for example aggregates the top scoring documents, while other PQ
* implementations may hold documents sorted by other criteria.
*/
protected PriorityQueue<T> pq;
protected final PriorityQueue<T> pq;
/** The total number of documents that the collector encountered. */
protected int totalHits;

View File

@ -121,9 +121,11 @@ public abstract class TopFieldCollector extends TopDocsCollector<Entry> {
final LeafFieldComparator[] comparators = queue.getComparators(context);
final int[] reverseMul = queue.getReverseMul();
final Sort indexSort = context.reader().getMetaData().getSort();
final boolean canEarlyTerminate = trackTotalHits == false &&
trackMaxScore == false &&
canEarlyTerminate(sort, context.reader().getMetaData().getSort());
indexSort != null &&
canEarlyTerminate(sort, indexSort);
final int initialTotalHits = totalHits;
return new MultiComparatorLeafCollector(comparators, reverseMul, mayNeedScoresTwice) {
@ -212,7 +214,9 @@ public abstract class TopFieldCollector extends TopDocsCollector<Entry> {
this.trackTotalHits = trackTotalHits;
// Must set maxScore to NEG_INF, or otherwise Math.max always returns NaN.
if (trackMaxScore) {
maxScore = Float.NEGATIVE_INFINITY;
}
FieldComparator<?>[] comparators = queue.comparators;
// Tell all comparators their top value:
@ -227,9 +231,11 @@ public abstract class TopFieldCollector extends TopDocsCollector<Entry> {
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
docBase = context.docBase;
final int afterDoc = after.doc - docBase;
final Sort indexSort = context.reader().getMetaData().getSort();
final boolean canEarlyTerminate = trackTotalHits == false &&
trackMaxScore == false &&
canEarlyTerminate(sort, context.reader().getMetaData().getSort());
indexSort != null &&
canEarlyTerminate(sort, indexSort);
final int initialTotalHits = totalHits;
return new MultiComparatorLeafCollector(queue.getComparators(context), queue.getReverseMul(), mayNeedScoresTwice) {
@ -338,8 +344,8 @@ public abstract class TopFieldCollector extends TopDocsCollector<Entry> {
}
@Override
public boolean needsScores() {
return needsScores;
public ScoreMode scoreMode() {
return needsScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES;
}
/**

View File

@ -49,16 +49,36 @@ public abstract class TopScoreDocCollector extends TopDocsCollector<ScoreDoc> {
private static class SimpleTopScoreDocCollector extends TopScoreDocCollector {
SimpleTopScoreDocCollector(int numHits) {
private final int numHits;
private final boolean trackTotalHits;
private int sumMaxDoc;
private int maxCollectedExactly = -1;
SimpleTopScoreDocCollector(int numHits, boolean trackTotalHits) {
super(numHits);
this.numHits = numHits;
this.trackTotalHits = trackTotalHits;
}
@Override
public LeafCollector getLeafCollector(LeafReaderContext context)
throws IOException {
final int docBase = context.docBase;
sumMaxDoc += context.reader().maxDoc();
return new ScorerLeafCollector() {
@Override
public void setScorer(Scorer scorer) throws IOException {
super.setScorer(scorer);
if (trackTotalHits == false
&& pqTop != null
&& pqTop.score != Float.NEGATIVE_INFINITY) {
// since we tie-break on doc id and collect in doc id order, we can require
// the next float
scorer.setMinCompetitiveScore(Math.nextUp(pqTop.score));
}
}
@Override
public void collect(int doc) throws IOException {
float score = scorer.score();
@ -76,11 +96,38 @@ public abstract class TopScoreDocCollector extends TopDocsCollector<ScoreDoc> {
pqTop.doc = doc + docBase;
pqTop.score = score;
pqTop = pq.updateTop();
if (trackTotalHits == false && pqTop.score != Float.NEGATIVE_INFINITY) { // -Infinity is the score of sentinels
// since we tie-break on doc id and collect in doc id order, we can require
// the next float
scorer.setMinCompetitiveScore(Math.nextUp(pqTop.score));
if (maxCollectedExactly < 0) {
assert totalHits == numHits;
maxCollectedExactly = doc + docBase;
}
}
}
};
}
@Override
public TopDocs topDocs() {
TopDocs topDocs = super.topDocs();
if (trackTotalHits == false && maxCollectedExactly >= 0) {
// assume matches are evenly spread in the doc id space
// this may be completely off
long totalHitsEstimate = (long) numHits * sumMaxDoc / (maxCollectedExactly + 1);
// we take the max since the current topDocs.totalHits is a lower bound
// of the total hit count
topDocs.totalHits = Math.max(topDocs.totalHits, totalHitsEstimate);
}
return topDocs;
}
@Override
public ScoreMode scoreMode() {
return trackTotalHits ? ScoreMode.COMPLETE : ScoreMode.TOP_SCORES;
}
}
private static class PagingTopScoreDocCollector extends TopScoreDocCollector {
@ -140,8 +187,7 @@ public abstract class TopScoreDocCollector extends TopDocsCollector<ScoreDoc> {
/**
* Creates a new {@link TopScoreDocCollector} given the number of hits to
* collect and whether documents are scored in order by the input
* {@link Scorer} to {@link LeafCollector#setScorer(Scorer)}.
* collect.
*
* <p><b>NOTE</b>: The instances returned by this method
* pre-allocate a full array of length
@ -149,27 +195,30 @@ public abstract class TopScoreDocCollector extends TopDocsCollector<ScoreDoc> {
* objects.
*/
public static TopScoreDocCollector create(int numHits) {
return create(numHits, null);
return create(numHits, null, true);
}
/**
* Creates a new {@link TopScoreDocCollector} given the number of hits to
* collect, the bottom of the previous page, and whether documents are scored in order by the input
* {@link Scorer} to {@link LeafCollector#setScorer(Scorer)}.
* collect, the bottom of the previous page, and whether the total hit count
* is needed.
*
* <p><b>NOTE</b>: If {@code trackTotalHits} is {@code false} then the
* {@link TopDocs#totalHits} of the returned {@link TopDocs} will be an
* approximation and may be completely off.
* <p><b>NOTE</b>: The instances returned by this method
* pre-allocate a full array of length
* <code>numHits</code>, and fill the array with sentinel
* objects.
*/
public static TopScoreDocCollector create(int numHits, ScoreDoc after) {
public static TopScoreDocCollector create(int numHits, ScoreDoc after, boolean trackTotalHits) {
if (numHits <= 0) {
throw new IllegalArgumentException("numHits must be > 0; please use TotalHitCountCollector if you just need the total hit count");
}
if (after == null) {
return new SimpleTopScoreDocCollector(numHits);
return new SimpleTopScoreDocCollector(numHits, trackTotalHits);
} else {
return new PagingTopScoreDocCollector(numHits, after);
}
@ -207,7 +256,7 @@ public abstract class TopScoreDocCollector extends TopDocsCollector<ScoreDoc> {
}
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
}

View File

@ -35,7 +35,7 @@ public class TotalHitCountCollector extends SimpleCollector {
}
@Override
public boolean needsScores() {
return false;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE_NO_SCORES;
}
}

View File

@ -0,0 +1,478 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import static org.apache.lucene.search.DisiPriorityQueue.leftNode;
import static org.apache.lucene.search.DisiPriorityQueue.parentNode;
import static org.apache.lucene.search.DisiPriorityQueue.rightNode;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.OptionalInt;
/**
* This implements the WAND (Weak AND) algorithm for dynamic pruning
* described in "Efficient Query Evaluation using a Two-Level Retrieval
* Process" by Broder, Carmel, Herscovici, Soffer and Zien.
* This scorer maintains a feedback loop with the collector in order to
* know at any time the minimum score that is required in order for a hit
* to be competitive. Then it leverages the {@link Scorer#maxScore() max score}
* from each scorer in order to know when it may call
* {@link DocIdSetIterator#advance} rather than {@link DocIdSetIterator#nextDoc}
* to move to the next competitive hit.
* Implementation is similar to {@link MinShouldMatchSumScorer} except that
* instead of enforcing that {@code freq >= minShouldMatch}, we enforce that
* {@code max_score >= minCompetitiveScore}.
*/
final class WANDScorer extends Scorer {
/** Return a scaling factor for the given float so that
* f x 2^scalingFactor would be in ]2^15, 2^16]. Special
* cases:
* scalingFactor(0) = scalingFactor(MIN_VALUE) - 1
* scalingFactor(+Infty) = scalingFactor(MAX_VALUE) + 1
*/
static int scalingFactor(float f) {
if (f < 0) {
throw new IllegalArgumentException("");
} else if (f == 0) {
return scalingFactor(Float.MIN_VALUE) - 1;
} else if (Float.isInfinite(f)) {
return scalingFactor(Float.MAX_VALUE) + 1;
} else {
double d = f;
// Since doubles have more amplitude than floats for the
// exponent, the cast produces a normal value.
assert d == 0 || Math.getExponent(d) >= Double.MIN_EXPONENT; // normal double
return 15 - Math.getExponent(Math.nextDown(d));
}
}
/**
* Scale max scores in an unsigned integer to avoid overflows
* (only the lower 32 bits of the long are used) as well as
* floating-point arithmetic errors. Those are rounded up in order
* to make sure we do not miss any matches.
*/
private static long scaleMaxScore(float maxScore, int scalingFactor) {
assert Float.isNaN(maxScore) == false;
assert maxScore >= 0;
if (Float.isInfinite(maxScore)) {
return (1L << 32) - 1; // means +Infinity in practice for this scorer
}
// NOTE: because doubles have more amplitude than floats for the
// exponent, the scalb call produces an accurate value.
double scaled = Math.scalb((double) maxScore, scalingFactor);
assert scaled <= 1 << 16 : scaled + " " + maxScore; // regular values of max_score go into 0..2^16
return (long) Math.ceil(scaled); // round up, cast is accurate since value is <= 2^16
}
/**
* Scale min competitive scores the same way as max scores but this time
* by rounding down in order to make sure that we do not miss any matches.
*/
private static long scaleMinScore(float minScore, int scalingFactor) {
assert Float.isNaN(minScore) == false;
assert minScore >= 0;
// like for scaleMaxScore, this scalb call is accurate
double scaled = Math.scalb((double) minScore, scalingFactor);
return (long) Math.floor(scaled); // round down, cast might lower the value again if scaled > Long.MAX_VALUE, which is fine
}
private final int scalingFactor;
// scaled min competitive score
private long minCompetitiveScore = 0;
// list of scorers which 'lead' the iteration and are currently
// positioned on 'doc'. This is sometimes called the 'pivot' in
// some descriptions of WAND (Weak AND).
DisiWrapper lead;
int doc; // current doc ID of the leads
long leadMaxScore; // sum of the max scores of scorers in 'lead'
// priority queue of scorers that are too advanced compared to the current
// doc. Ordered by doc ID.
final DisiPriorityQueue head;
// priority queue of scorers which are behind the current doc.
// Ordered by maxScore.
final DisiWrapper[] tail;
long tailMaxScore; // sum of the max scores of scorers in 'tail'
int tailSize;
final long cost;
WANDScorer(Weight weight, Collection<Scorer> scorers) {
super(weight);
this.minCompetitiveScore = 0;
this.doc = -1;
head = new DisiPriorityQueue(scorers.size());
// there can be at most num_scorers - 1 scorers beyond the current position
tail = new DisiWrapper[scorers.size() - 1];
OptionalInt scalingFactor = OptionalInt.empty();
for (Scorer scorer : scorers) {
float maxScore = scorer.maxScore();
if (maxScore != 0 && Float.isFinite(maxScore)) {
// 0 and +Infty should not impact the scale
scalingFactor = OptionalInt.of(Math.min(scalingFactor.orElse(Integer.MAX_VALUE), scalingFactor(maxScore)));
}
}
// Use a scaling factor of 0 if all max scores are either 0 or +Infty
this.scalingFactor = scalingFactor.orElse(0);
for (Scorer scorer : scorers) {
DisiWrapper w = new DisiWrapper(scorer);
float maxScore = scorer.maxScore();
w.maxScore = scaleMaxScore(maxScore, this.scalingFactor);
addLead(w);
}
long cost = 0;
for (DisiWrapper w = lead; w != null; w = w.next) {
cost += w.cost;
}
this.cost = cost;
}
// returns a boolean so that it can be called from assert
// the return value is useless: it always returns true
private boolean ensureConsistent() {
long maxScoreSum = 0;
for (int i = 0; i < tailSize; ++i) {
assert tail[i].doc < doc;
maxScoreSum = Math.addExact(maxScoreSum, tail[i].maxScore);
}
assert maxScoreSum == tailMaxScore : maxScoreSum + " " + tailMaxScore;
maxScoreSum = 0;
for (DisiWrapper w = lead; w != null; w = w.next) {
assert w.doc == doc;
maxScoreSum = Math.addExact(maxScoreSum, w.maxScore);
}
assert maxScoreSum == leadMaxScore : maxScoreSum + " " + leadMaxScore;
for (DisiWrapper w : head) {
assert w.doc > doc;
}
assert tailSize == 0 || tailMaxScore < minCompetitiveScore;
return true;
}
@Override
public void setMinCompetitiveScore(float minScore) {
assert minScore >= 0;
long scaledMinScore = scaleMinScore(minScore, scalingFactor);
assert scaledMinScore >= minCompetitiveScore;
minCompetitiveScore = scaledMinScore;
}
@Override
public final Collection<ChildScorer> getChildren() throws IOException {
List<ChildScorer> matchingChildren = new ArrayList<>();
updateFreq();
for (DisiWrapper s = lead; s != null; s = s.next) {
matchingChildren.add(new ChildScorer(s.scorer, "SHOULD"));
}
return matchingChildren;
}
@Override
public DocIdSetIterator iterator() {
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
DocIdSetIterator approximation = new DocIdSetIterator() {
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int advance(int target) throws IOException {
assert ensureConsistent();
// Move 'lead' iterators back to the tail
pushBackLeads(target);
// Advance 'head' as well
advanceHead(target);
// Pop the new 'lead' from the 'head'
setDocAndFreq();
assert ensureConsistent();
// Advance to the next possible match
return doNextCandidate();
}
@Override
public long cost() {
return cost;
}
};
return new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
while (leadMaxScore < minCompetitiveScore) {
if (leadMaxScore + tailMaxScore >= minCompetitiveScore) {
// a match on doc is still possible, try to
// advance scorers from the tail
advanceTail();
} else {
return false;
}
}
return true;
}
@Override
public float matchCost() {
// maximum number of scorer that matches() might advance
return tail.length;
}
};
}
private void addLead(DisiWrapper lead) {
lead.next = this.lead;
this.lead = lead;
leadMaxScore += lead.maxScore;
}
private void pushBackLeads(int target) throws IOException {
for (DisiWrapper s = lead; s != null; s = s.next) {
final DisiWrapper evicted = insertTailWithOverFlow(s);
if (evicted != null) {
evicted.doc = evicted.iterator.advance(target);
head.add(evicted);
}
}
}
private void advanceHead(int target) throws IOException {
DisiWrapper headTop = head.top();
while (headTop.doc < target) {
final DisiWrapper evicted = insertTailWithOverFlow(headTop);
if (evicted != null) {
evicted.doc = evicted.iterator.advance(target);
headTop = head.updateTop(evicted);
} else {
head.pop();
headTop = head.top();
}
}
}
private void advanceTail(DisiWrapper disi) throws IOException {
disi.doc = disi.iterator.advance(doc);
if (disi.doc == doc) {
addLead(disi);
} else {
head.add(disi);
}
}
private void advanceTail() throws IOException {
final DisiWrapper top = popTail();
advanceTail(top);
}
/** Reinitializes head, freq and doc from 'head' */
private void setDocAndFreq() {
assert head.size() > 0;
// The top of `head` defines the next potential match
// pop all documents which are on this doc
lead = head.pop();
lead.next = null;
leadMaxScore = lead.maxScore;
doc = lead.doc;
while (head.size() > 0 && head.top().doc == doc) {
addLead(head.pop());
}
}
/** Move iterators to the tail until there is a potential match. */
private int doNextCandidate() throws IOException {
while (leadMaxScore + tailMaxScore < minCompetitiveScore) {
// no match on doc is possible, move to the next potential match
if (head.size() == 0) {
// special case: the total max score is less than the min competitive score, there are no more matches
return doc = DocIdSetIterator.NO_MORE_DOCS;
}
pushBackLeads(doc + 1);
setDocAndFreq();
assert ensureConsistent();
}
return doc;
}
/** Advance all entries from the tail to know about all matches on the
* current doc. */
private void updateFreq() throws IOException {
// we return the next doc when the sum of the scores of the potential
// matching clauses is high enough but some of the clauses in 'tail' might
// match as well
// in general we want to advance least-costly clauses first in order to
// skip over non-matching documents as fast as possible. However here,
// we are advancing everything anyway so iterating over clauses in
// (roughly) cost-descending order might help avoid some permutations in
// the head heap
for (int i = tailSize - 1; i >= 0; --i) {
advanceTail(tail[i]);
}
tailSize = 0;
tailMaxScore = 0;
assert ensureConsistent();
}
@Override
public float score() throws IOException {
// we need to know about all matches
updateFreq();
double score = 0;
for (DisiWrapper s = lead; s != null; s = s.next) {
score += s.scorer.score();
}
return (float) score;
}
@Override
public float maxScore() {
// TODO: implement but be careful about floating-point errors.
return Float.POSITIVE_INFINITY;
}
@Override
public int docID() {
return doc;
}
/** Insert an entry in 'tail' and evict the least-costly scorer if full. */
private DisiWrapper insertTailWithOverFlow(DisiWrapper s) {
if (tailSize < tail.length && tailMaxScore + s.maxScore < minCompetitiveScore) {
// we have free room for this new entry
addTail(s);
tailMaxScore += s.maxScore;
return null;
} else if (tailSize == 0) {
return s;
} else {
final DisiWrapper top = tail[0];
if (greaterMaxScore(top, s) == false) {
return s;
}
// Swap top and s
tail[0] = s;
downHeapMaxScore(tail, tailSize);
tailMaxScore = tailMaxScore - top.maxScore + s.maxScore;
return top;
}
}
/** Add an entry to 'tail'. Fails if over capacity. */
private void addTail(DisiWrapper s) {
tail[tailSize] = s;
upHeapMaxScore(tail, tailSize);
tailSize += 1;
}
/** Pop the least-costly scorer from 'tail'. */
private DisiWrapper popTail() {
assert tailSize > 0;
final DisiWrapper result = tail[0];
tail[0] = tail[--tailSize];
downHeapMaxScore(tail, tailSize);
tailMaxScore -= result.maxScore;
return result;
}
/** Heap helpers */
private static void upHeapMaxScore(DisiWrapper[] heap, int i) {
final DisiWrapper node = heap[i];
int j = parentNode(i);
while (j >= 0 && greaterMaxScore(node, heap[j])) {
heap[i] = heap[j];
i = j;
j = parentNode(j);
}
heap[i] = node;
}
private static void downHeapMaxScore(DisiWrapper[] heap, int size) {
int i = 0;
final DisiWrapper node = heap[0];
int j = leftNode(i);
if (j < size) {
int k = rightNode(j);
if (k < size && greaterMaxScore(heap[k], heap[j])) {
j = k;
}
if (greaterMaxScore(heap[j], node)) {
do {
heap[i] = heap[j];
i = j;
j = leftNode(i);
k = rightNode(j);
if (k < size && greaterMaxScore(heap[k], heap[j])) {
j = k;
}
} while (j < size && greaterMaxScore(heap[j], node));
heap[i] = node;
}
}
}
/**
* In the tail, we want to get first entries that produce the maximum scores
* and in case of ties (eg. constant-score queries), those that have the least
* cost so that they are likely to advance further.
*/
private static boolean greaterMaxScore(DisiWrapper w1, DisiWrapper w2) {
if (w1.maxScore > w2.maxScore) {
return true;
} else if (w1.maxScore < w2.maxScore) {
return false;
} else {
return w1.cost < w2.cost;
}
}
}

View File

@ -43,7 +43,7 @@ import org.apache.lucene.util.Bits;
* A <code>Weight</code> is used in the following way:
* <ol>
* <li>A <code>Weight</code> is constructed by a top-level query, given a
* <code>IndexSearcher</code> ({@link Query#createWeight(IndexSearcher, boolean, float)}).
* <code>IndexSearcher</code> ({@link Query#createWeight(IndexSearcher, ScoreMode, float)}).
* <li>A <code>Scorer</code> is constructed by
* {@link #scorer(org.apache.lucene.index.LeafReaderContext)}.
* </ol>

View File

@ -338,7 +338,7 @@
* {@link org.apache.lucene.search.Query Query} class has several methods that are important for
* derived classes:
* <ol>
* <li>{@link org.apache.lucene.search.Query#createWeight(IndexSearcher,boolean,float) createWeight(IndexSearcher searcher, boolean needsScores, float boost)} &mdash; A
* <li>{@link org.apache.lucene.search.Query#createWeight(IndexSearcher,ScoreMode,float) createWeight(IndexSearcher searcher, boolean needsScores, float boost)} &mdash; A
* {@link org.apache.lucene.search.Weight Weight} is the internal representation of the
* Query, so each Query implementation must
* provide an implementation of Weight. See the subsection on <a
@ -347,7 +347,7 @@
* <li>{@link org.apache.lucene.search.Query#rewrite(org.apache.lucene.index.IndexReader) rewrite(IndexReader reader)} &mdash; Rewrites queries into primitive queries. Primitive queries are:
* {@link org.apache.lucene.search.TermQuery TermQuery},
* {@link org.apache.lucene.search.BooleanQuery BooleanQuery}, <span
* >and other queries that implement {@link org.apache.lucene.search.Query#createWeight(IndexSearcher,boolean,float) createWeight(IndexSearcher searcher,boolean needsScores, float boost)}</span></li>
* >and other queries that implement {@link org.apache.lucene.search.Query#createWeight(IndexSearcher,ScoreMode,float) createWeight(IndexSearcher searcher,boolean needsScores, float boost)}</span></li>
* </ol>
* <a name="weightClass"></a>
* <h3>The Weight Interface</h3>
@ -453,7 +453,7 @@
* <p>Assuming we are not sorting (since sorting doesn't affect the raw Lucene score),
* we call one of the search methods of the IndexSearcher, passing in the
* {@link org.apache.lucene.search.Weight Weight} object created by
* {@link org.apache.lucene.search.IndexSearcher#createNormalizedWeight(org.apache.lucene.search.Query,boolean)
* {@link org.apache.lucene.search.IndexSearcher#createNormalizedWeight(org.apache.lucene.search.Query,ScoreMode)
* IndexSearcher.createNormalizedWeight(Query,boolean)} and the number of results we want.
* This method returns a {@link org.apache.lucene.search.TopDocs TopDocs} object,
* which is an internal collection of search results. The IndexSearcher creates

View File

@ -108,6 +108,12 @@ public abstract class Axiomatic extends SimilarityBase {
- gamma(stats, freq, docLen);
}
@Override
protected double maxScore(BasicStats stats, double maxFreq) {
// TODO: can we compute a better upper bound on the produced scores
return Double.POSITIVE_INFINITY;
}
@Override
protected void explain(List<Explanation> subs, BasicStats stats, int doc,
double freq, double docLen) {

View File

@ -224,6 +224,12 @@ public class BM25Similarity extends Similarity {
return weightValue * (float) (freq / (freq + norm));
}
@Override
public float maxScore(float maxFreq) {
// TODO: leverage maxFreq and the min norm from the cache
return weightValue;
}
@Override
public Explanation explain(int doc, Explanation freq) throws IOException {
List<Explanation> subs = new ArrayList<>();

View File

@ -70,6 +70,11 @@ public class BooleanSimilarity extends Similarity {
return boost;
}
@Override
public float maxScore(float maxFreq) {
return boost;
}
@Override
public Explanation explain(int doc, Explanation freq) throws IOException {
Explanation queryBoostExpl = Explanation.match(boost, "boost");

View File

@ -62,6 +62,12 @@ public class DFISimilarity extends SimilarityBase {
return stats.getBoost() * log2(measure + 1);
}
@Override
protected double maxScore(BasicStats stats, double maxFreq) {
// TODO: can we compute a better upper bound on the produced scores
return Double.POSITIVE_INFINITY;
}
/**
* Returns the measure of independence
*/

View File

@ -112,6 +112,12 @@ public class DFRSimilarity extends SimilarityBase {
return stats.getBoost() * basicModel.score(stats, tfn, aeTimes1pTfn);
}
@Override
protected double maxScore(BasicStats stats, double maxFreq) {
// TODO: can we compute a better upper bound on the produced scores
return Double.POSITIVE_INFINITY;
}
@Override
protected void explain(List<Explanation> subs,
BasicStats stats, int doc, double freq, double docLen) {

View File

@ -103,6 +103,12 @@ public class IBSimilarity extends SimilarityBase {
lambda.lambda(stats));
}
@Override
protected double maxScore(BasicStats stats, double maxFreq) {
// TODO: can we compute a better upper bound on the produced scores
return Double.POSITIVE_INFINITY;
}
@Override
protected void explain(
List<Explanation> subs, BasicStats stats, int doc, double freq, double docLen) {

View File

@ -76,6 +76,12 @@ public class LMDirichletSimilarity extends LMSimilarity {
return score > 0.0d ? score : 0.0d;
}
@Override
protected double maxScore(BasicStats stats, double maxFreq) {
// TODO: can we compute a better upper bound on the produced scores
return Double.POSITIVE_INFINITY;
}
@Override
protected void explain(List<Explanation> subs, BasicStats stats, int doc,
double freq, double docLen) {

View File

@ -66,6 +66,12 @@ public class LMJelinekMercerSimilarity extends LMSimilarity {
(lambda * ((LMStats)stats).getCollectionProbability()));
}
@Override
protected double maxScore(BasicStats stats, double maxFreq) {
// TODO: can we compute a better upper bound on the produced scores
return Double.POSITIVE_INFINITY;
}
@Override
protected void explain(List<Explanation> subs, BasicStats stats, int doc,
double freq, double docLen) {

View File

@ -82,6 +82,15 @@ public class MultiSimilarity extends Similarity {
return sum;
}
@Override
public float maxScore(float freq) {
float sumMaxScore = 0;
for (SimScorer subScorer : subScorers) {
sumMaxScore += subScorer.maxScore(freq);
}
return sumMaxScore;
}
@Override
public Explanation explain(int doc, Explanation freq) throws IOException {
List<Explanation> subs = new ArrayList<>();

View File

@ -158,6 +158,13 @@ public abstract class Similarity {
*/
public abstract float score(int doc, float freq) throws IOException;
/**
* Return the maximum score that this scorer may produce for freqs in {@code ]0, maxFreq]}.
* {@code Float.POSITIVE_INFINITY} is a fine return value if scores are not bounded.
* @param maxFreq the maximum frequency
*/
public abstract float maxScore(float maxFreq);
/**
* Explain the score for a single document
* @param doc document id within the inverted index segment

View File

@ -121,6 +121,13 @@ public abstract class SimilarityBase extends Similarity {
*/
protected abstract double score(BasicStats stats, double freq, double docLen);
/**
* Return the maximum value that may be returned by {@link #score(BasicStats, double, double)}
* for the given stats.
* @see org.apache.lucene.search.similarities.Similarity.SimScorer#maxScore(float)
*/
protected abstract double maxScore(BasicStats stats, double maxFreq);
/**
* Subclasses should implement this method to explain the score. {@code expl}
* already contains the score, the name of the class and the doc id, as well
@ -249,6 +256,11 @@ public abstract class SimilarityBase extends Similarity {
return (float) SimilarityBase.this.score(stats, freq, getLengthValue(doc));
}
@Override
public float maxScore(float maxFreq) {
return (float) SimilarityBase.this.maxScore(stats, maxFreq);
}
@Override
public Explanation explain(int doc, Explanation freq) throws IOException {
return SimilarityBase.this.explain(stats, doc, freq, getLengthValue(doc));

View File

@ -560,6 +560,20 @@ public abstract class TFIDFSimilarity extends Similarity {
}
}
@Override
public float maxScore(float maxFreq) {
final float raw = tf(maxFreq) * weightValue;
if (norms == null) {
return raw;
} else {
float maxNormValue = Float.NEGATIVE_INFINITY;
for (float norm : normTable) {
maxNormValue = Math.max(maxNormValue, norm);
}
return raw * maxNormValue;
}
}
@Override
public Explanation explain(int doc, Explanation freq) throws IOException {
return explainScore(doc, freq, stats, norms, normTable);

View File

@ -20,6 +20,7 @@ package org.apache.lucene.search.spans;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import java.io.IOException;
import java.util.Objects;
@ -89,8 +90,8 @@ public final class FieldMaskingSpanQuery extends SpanQuery {
// ...this is done to be more consistent with things like SpanFirstQuery
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return maskedQuery.createWeight(searcher, needsScores, boost);
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return maskedQuery.createWeight(searcher, scoreMode, boost);
}
@Override

View File

@ -24,6 +24,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
/**
* Counterpart of {@link BoostQuery} for spans.
@ -108,8 +109,8 @@ public final class SpanBoostQuery extends SpanQuery {
}
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return query.createWeight(searcher, needsScores, SpanBoostQuery.this.boost * boost);
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return query.createWeight(searcher, scoreMode, SpanBoostQuery.this.boost * boost);
}
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreMode;
/** Keep matches that contain another SpanScorer. */
public final class SpanContainingQuery extends SpanContainQuery {
@ -43,10 +44,10 @@ public final class SpanContainingQuery extends SpanContainQuery {
}
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
SpanWeight bigWeight = big.createWeight(searcher, false, boost);
SpanWeight littleWeight = little.createWeight(searcher, false, boost);
return new SpanContainingWeight(searcher, needsScores ? getTermContexts(bigWeight, littleWeight) : null,
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
SpanWeight bigWeight = big.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
SpanWeight littleWeight = little.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
return new SpanContainingWeight(searcher, scoreMode.needsScores() ? getTermContexts(bigWeight, littleWeight) : null,
bigWeight, littleWeight, boost);
}

View File

@ -29,6 +29,7 @@ import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.ScoringRewrite;
import org.apache.lucene.search.TopTermsRewrite;
@ -95,7 +96,7 @@ public class SpanMultiTermQueryWrapper<Q extends MultiTermQuery> extends SpanQue
}
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
throw new IllegalArgumentException("Rewrite first!");
}

View File

@ -33,6 +33,7 @@ import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Weight;
/** Matches spans which are near one another. One can specify <i>slop</i>, the
@ -177,12 +178,12 @@ public class SpanNearQuery extends SpanQuery implements Cloneable {
}
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
List<SpanWeight> subWeights = new ArrayList<>();
for (SpanQuery q : clauses) {
subWeights.add(q.createWeight(searcher, false, boost));
subWeights.add(q.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost));
}
return new SpanNearWeight(subWeights, searcher, needsScores ? getTermContexts(subWeights) : null, boost);
return new SpanNearWeight(subWeights, searcher, scoreMode.needsScores() ? getTermContexts(subWeights) : null, boost);
}
public class SpanNearWeight extends SpanWeight {
@ -306,7 +307,7 @@ public class SpanNearQuery extends SpanQuery implements Cloneable {
}
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new SpanGapWeight(searcher, boost);
}

View File

@ -29,6 +29,7 @@ import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.TwoPhaseIterator;
/** Removes matches which overlap with another SpanQuery or which are
@ -97,10 +98,10 @@ public final class SpanNotQuery extends SpanQuery {
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
SpanWeight includeWeight = include.createWeight(searcher, false, boost);
SpanWeight excludeWeight = exclude.createWeight(searcher, false, boost);
return new SpanNotWeight(searcher, needsScores ? getTermContexts(includeWeight, excludeWeight) : null,
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
SpanWeight includeWeight = include.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
SpanWeight excludeWeight = exclude.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
return new SpanNotWeight(searcher, scoreMode.needsScores() ? getTermContexts(includeWeight, excludeWeight) : null,
includeWeight, excludeWeight, boost);
}

View File

@ -33,6 +33,7 @@ import org.apache.lucene.search.DisiWrapper;
import org.apache.lucene.search.DisjunctionDISIApproximation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
@ -115,12 +116,12 @@ public final class SpanOrQuery extends SpanQuery {
}
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
List<SpanWeight> subWeights = new ArrayList<>(clauses.size());
for (SpanQuery q : clauses) {
subWeights.add(q.createWeight(searcher, false, boost));
subWeights.add(q.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost));
}
return new SpanOrWeight(searcher, needsScores ? getTermContexts(subWeights) : null, subWeights, boost);
return new SpanOrWeight(searcher, scoreMode.needsScores() ? getTermContexts(subWeights) : null, subWeights, boost);
}
public class SpanOrWeight extends SpanWeight {

View File

@ -28,6 +28,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.spans.FilterSpans.AcceptStatus;
@ -67,9 +68,9 @@ public abstract class SpanPositionCheckQuery extends SpanQuery implements Clonea
protected abstract AcceptStatus acceptPosition(Spans spans) throws IOException;
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
SpanWeight matchWeight = match.createWeight(searcher, false, boost);
return new SpanPositionCheckWeight(matchWeight, searcher, needsScores ? getTermContexts(matchWeight) : null, boost);
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
SpanWeight matchWeight = match.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
return new SpanPositionCheckWeight(matchWeight, searcher, scoreMode.needsScores() ? getTermContexts(matchWeight) : null, boost);
}
public class SpanPositionCheckWeight extends SpanWeight {

View File

@ -26,6 +26,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
/** Base class for span-based queries. */
public abstract class SpanQuery extends Query {
@ -36,7 +37,7 @@ public abstract class SpanQuery extends Query {
public abstract String getField();
@Override
public abstract SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException;
public abstract SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException;
/**
* Build a map of terms to termcontexts, for use in constructing SpanWeights

View File

@ -134,6 +134,11 @@ public class SpanScorer extends Scorer {
return scoreCurrentDoc();
}
@Override
public float maxScore() {
return Float.POSITIVE_INFINITY;
}
/** Returns the intermediate "sloppy freq" adjusted for edit distance
* @lucene.internal */
final float sloppyFreq() throws IOException {

View File

@ -33,6 +33,7 @@ import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreMode;
/** Matches spans containing a term.
* This should not be used for terms that are indexed at position Integer.MAX_VALUE.
@ -64,7 +65,7 @@ public class SpanTermQuery extends SpanQuery {
public String getField() { return term.field(); }
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
final TermContext context;
final IndexReaderContext topContext = searcher.getTopReaderContext();
if (termContext == null || termContext.wasBuiltFor(topContext) == false) {
@ -73,7 +74,7 @@ public class SpanTermQuery extends SpanQuery {
else {
context = termContext;
}
return new SpanTermWeight(context, searcher, needsScores ? Collections.singletonMap(term, context) : null, boost);
return new SpanTermWeight(context, searcher, scoreMode.needsScores() ? Collections.singletonMap(term, context) : null, boost);
}
public class SpanTermWeight extends SpanWeight {

View File

@ -25,6 +25,7 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreMode;
/** Keep matches that are contained within another Spans. */
public final class SpanWithinQuery extends SpanContainQuery {
@ -44,10 +45,10 @@ public final class SpanWithinQuery extends SpanContainQuery {
}
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
SpanWeight bigWeight = big.createWeight(searcher, false, boost);
SpanWeight littleWeight = little.createWeight(searcher, false, boost);
return new SpanWithinWeight(searcher, needsScores ? getTermContexts(bigWeight, littleWeight) : null,
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
SpanWeight bigWeight = big.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
SpanWeight littleWeight = little.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
return new SpanWithinWeight(searcher, scoreMode.needsScores() ? getTermContexts(bigWeight, littleWeight) : null,
bigWeight, littleWeight, boost);
}

View File

@ -29,6 +29,7 @@ import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@ -37,6 +38,7 @@ import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CannedTokenStream;
@ -2877,4 +2879,89 @@ public class TestIndexWriter extends LuceneTestCase {
dir.close();
}
public void testCheckPendingFlushPostUpdate() throws IOException, InterruptedException {
MockDirectoryWrapper dir = newMockDirectory();
Set<String> flushingThreads = Collections.synchronizedSet(new HashSet<>());
dir.failOn(new MockDirectoryWrapper.Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
StackTraceElement[] trace = new Exception().getStackTrace();
for (int i = 0; i < trace.length; i++) {
if ("flush".equals(trace[i].getMethodName())
&& "org.apache.lucene.index.DocumentsWriterPerThread".equals(trace[i].getClassName())) {
flushingThreads.add(Thread.currentThread().getName());
break;
}
}
}
});
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig()
.setCheckPendingFlushUpdate(false)
.setMaxBufferedDocs(Integer.MAX_VALUE)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
AtomicBoolean done = new AtomicBoolean(false);
int numThreads = 1 + random().nextInt(3);
CountDownLatch latch = new CountDownLatch(numThreads);
Set<String> indexingThreads = new HashSet<>();
Thread[] threads = new Thread[numThreads];
for (int i = 0; i < numThreads; i++) {
threads[i] = new Thread(() -> {
latch.countDown();
int numDocs = 0;
while (done.get() == false) {
Document doc = new Document();
doc.add(new StringField("id", "foo", Field.Store.YES));
try {
w.addDocument(doc);
} catch (Exception e) {
throw new AssertionError(e);
}
if (numDocs++ % 10 == 0) {
Thread.yield();
}
}
});
indexingThreads.add(threads[i].getName());
threads[i].start();
}
latch.await();
try {
int numIters = rarely() ? 1 + random().nextInt(5) : 1;
for (int i = 0; i < numIters; i++) {
waitForDocs(w);
w.commit();
assertTrue(flushingThreads.toString(), flushingThreads.contains(Thread.currentThread().getName()));
flushingThreads.retainAll(indexingThreads);
assertTrue(flushingThreads.toString(), flushingThreads.isEmpty());
}
w.getConfig().setCheckPendingFlushUpdate(true);
numIters = 0;
while (true) {
assertFalse("should finish in less than 100 iterations", numIters++ >= 100);
waitForDocs(w);
w.flush();
flushingThreads.retainAll(indexingThreads);
if (flushingThreads.isEmpty() == false) {
break;
}
}
} finally {
done.set(true);
for (int i = 0; i < numThreads; i++) {
threads[i].join();
}
IOUtils.close(w, dir);
}
}
private static void waitForDocs(IndexWriter w) {
int numDocsInRam = w.numRamDocs();
while(true) {
if (numDocsInRam != w.numRamDocs()) {
return;
}
}
}
}

View File

@ -74,6 +74,7 @@ public class TestIndexWriterConfig extends LuceneTestCase {
assertEquals(Codec.getDefault(), conf.getCodec());
assertEquals(InfoStream.getDefault(), conf.getInfoStream());
assertEquals(IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM, conf.getUseCompoundFile());
assertTrue(conf.isCheckPendingFlushOnUpdate());
// Sanity check - validate that all getters are covered.
Set<String> getters = new HashSet<>();
getters.add("getAnalyzer");
@ -98,6 +99,7 @@ public class TestIndexWriterConfig extends LuceneTestCase {
getters.add("getCodec");
getters.add("getInfoStream");
getters.add("getUseCompoundFile");
getters.add("isCheckPendingFlushOnUpdate");
for (Method m : IndexWriterConfig.class.getDeclaredMethods()) {
if (m.getDeclaringClass() == IndexWriterConfig.class && m.getName().startsWith("get")) {

View File

@ -123,6 +123,10 @@ public class TestMaxTermFrequency extends LuceneTestCase {
return 0;
}
@Override
public float maxScore(float maxFreq) {
return 0;
}
};
}

View File

@ -31,6 +31,7 @@ import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SimpleCollector;
import org.apache.lucene.search.TermQuery;
@ -310,8 +311,8 @@ public class TestOmitTf extends LuceneTestCase {
new CountingHitCollector() {
private Scorer scorer;
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
@Override
public final void setScorer(Scorer scorer) {
@ -332,8 +333,8 @@ public class TestOmitTf extends LuceneTestCase {
new CountingHitCollector() {
private Scorer scorer;
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
@Override
public final void setScorer(Scorer scorer) {
@ -357,8 +358,8 @@ public class TestOmitTf extends LuceneTestCase {
new CountingHitCollector() {
private Scorer scorer;
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
@Override
public final void setScorer(Scorer scorer) {
@ -380,8 +381,8 @@ public class TestOmitTf extends LuceneTestCase {
new CountingHitCollector() {
private Scorer scorer;
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
@Override
public final void setScorer(Scorer scorer) {
@ -438,8 +439,8 @@ public class TestOmitTf extends LuceneTestCase {
}
@Override
public boolean needsScores() {
return false;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE_NO_SCORES;
}
}

View File

@ -55,7 +55,7 @@ final class JustCompileSearch {
}
@Override
public boolean needsScores() {
public ScoreMode scoreMode() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
}
@ -175,6 +175,11 @@ final class JustCompileSearch {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public float maxScore() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public int docID() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
@ -231,7 +236,7 @@ final class JustCompileSearch {
}
@Override
public boolean needsScores() {
public ScoreMode scoreMode() {
throw new UnsupportedOperationException( UNSUPPORTED_MSG );
}
}

View File

@ -51,8 +51,8 @@ public class MultiCollectorTest extends LuceneTestCase {
}
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
}
@ -102,7 +102,7 @@ public class MultiCollectorTest extends LuceneTestCase {
}
private static Collector collector(boolean needsScores, Class<?> expectedScorer) {
private static Collector collector(ScoreMode scoreMode, Class<?> expectedScorer) {
return new Collector() {
@Override
@ -121,8 +121,8 @@ public class MultiCollectorTest extends LuceneTestCase {
}
@Override
public boolean needsScores() {
return needsScores;
public ScoreMode scoreMode() {
return scoreMode;
}
};
@ -139,22 +139,22 @@ public class MultiCollectorTest extends LuceneTestCase {
final LeafReaderContext ctx = reader.leaves().get(0);
expectThrows(AssertionError.class, () -> {
collector(false, ScoreCachingWrappingScorer.class).getLeafCollector(ctx).setScorer(new FakeScorer());
collector(ScoreMode.COMPLETE_NO_SCORES, ScoreCachingWrappingScorer.class).getLeafCollector(ctx).setScorer(new FakeScorer());
});
// no collector needs scores => no caching
Collector c1 = collector(false, FakeScorer.class);
Collector c2 = collector(false, FakeScorer.class);
Collector c1 = collector(ScoreMode.COMPLETE_NO_SCORES, FakeScorer.class);
Collector c2 = collector(ScoreMode.COMPLETE_NO_SCORES, FakeScorer.class);
MultiCollector.wrap(c1, c2).getLeafCollector(ctx).setScorer(new FakeScorer());
// only one collector needs scores => no caching
c1 = collector(true, FakeScorer.class);
c2 = collector(false, FakeScorer.class);
c1 = collector(ScoreMode.COMPLETE, FakeScorer.class);
c2 = collector(ScoreMode.COMPLETE_NO_SCORES, FakeScorer.class);
MultiCollector.wrap(c1, c2).getLeafCollector(ctx).setScorer(new FakeScorer());
// several collectors need scores => caching
c1 = collector(true, ScoreCachingWrappingScorer.class);
c2 = collector(true, ScoreCachingWrappingScorer.class);
c1 = collector(ScoreMode.COMPLETE, ScoreCachingWrappingScorer.class);
c2 = collector(ScoreMode.COMPLETE, ScoreCachingWrappingScorer.class);
MultiCollector.wrap(c1, c2).getLeafCollector(ctx).setScorer(new FakeScorer());
reader.close();

View File

@ -51,6 +51,10 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
}
@Override
public float maxScore() {
return 1;
}
public DocIdSetIterator iterator() {
return it;
}
@ -104,13 +108,13 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
}
subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(42));
assertEquals(42, new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).cost());
assertEquals(42, new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).cost());
subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(12));
assertEquals(12, new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).cost());
assertEquals(12, new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).cost());
subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(20));
assertEquals(12, new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).cost());
assertEquals(12, new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).cost());
}
public void testDisjunctionCost() throws IOException {
@ -120,17 +124,17 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
}
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42));
ScorerSupplier s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0);
ScorerSupplier s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0);
assertEquals(42, s.cost());
assertEquals(42, s.get(random().nextInt(100)).iterator().cost());
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12));
s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0);
s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0);
assertEquals(42 + 12, s.cost());
assertEquals(42 + 12, s.get(random().nextInt(100)).iterator().cost());
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20));
s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0);
s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0);
assertEquals(42 + 12 + 20, s.cost());
assertEquals(42 + 12 + 20, s.get(random().nextInt(100)).iterator().cost());
}
@ -143,26 +147,26 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12));
ScorerSupplier s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 1);
ScorerSupplier s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
assertEquals(42 + 12, s.cost());
assertEquals(42 + 12, s.get(random().nextInt(100)).iterator().cost());
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20));
s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 1);
s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
assertEquals(42 + 12 + 20, s.cost());
assertEquals(42 + 12 + 20, s.get(random().nextInt(100)).iterator().cost());
s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2);
s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2);
assertEquals(12 + 20, s.cost());
assertEquals(12 + 20, s.get(random().nextInt(100)).iterator().cost());
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30));
s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 1);
s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 1);
assertEquals(42 + 12 + 20 + 30, s.cost());
assertEquals(42 + 12 + 20 + 30, s.get(random().nextInt(100)).iterator().cost());
s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2);
s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2);
assertEquals(12 + 20 + 30, s.cost());
assertEquals(12 + 20 + 30, s.get(random().nextInt(100)).iterator().cost());
s = new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 3);
s = new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 3);
assertEquals(12 + 20, s.cost());
assertEquals(12 + 20, s.get(random().nextInt(100)).iterator().cost());
}
@ -186,8 +190,8 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
numRequired++;
}
}
boolean needsScores = random().nextBoolean();
if (needsScores == false && numRequired > 0) {
ScoreMode scoreMode = RandomPicks.randomFrom(random(), ScoreMode.values());
if (scoreMode.needsScores() == false && numRequired > 0) {
numClauses -= numShoulds;
numShoulds = 0;
subs.get(Occur.SHOULD).clear();
@ -198,7 +202,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
}
int minShouldMatch = numShoulds == 0 ? 0 : TestUtil.nextInt(random(), 0, numShoulds - 1);
Boolean2ScorerSupplier supplier = new Boolean2ScorerSupplier(null,
subs, needsScores, minShouldMatch);
subs, scoreMode, minShouldMatch);
long cost1 = supplier.cost();
long cost2 = supplier.get(Long.MAX_VALUE).iterator().cost();
assertEquals("clauses=" + subs + ", minShouldMatch=" + minShouldMatch, cost1, cost2);
@ -222,7 +226,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
// If the clauses are less costly than the lead cost, the min cost is the new lead cost
subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(42, 12));
subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(12, 12));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(Long.MAX_VALUE); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(Long.MAX_VALUE); // triggers assertions as a side-effect
subs = new EnumMap<>(Occur.class);
for (Occur occur : Occur.values()) {
@ -232,7 +236,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
// If the lead cost is less that the clauses' cost, then we don't modify it
subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(42, 7));
subs.get(RandomPicks.randomFrom(random(), Arrays.asList(Occur.FILTER, Occur.MUST))).add(new FakeScorerSupplier(12, 7));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(7); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(7); // triggers assertions as a side-effect
}
public void testDisjunctionLeadCost() throws IOException {
@ -242,12 +246,12 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
}
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, 54));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 54));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(100); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(100); // triggers assertions as a side-effect
subs.get(Occur.SHOULD).clear();
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, 20));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 20));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(20); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(20); // triggers assertions as a side-effect
}
public void testDisjunctionWithMinShouldMatchLeadCost() throws IOException {
@ -261,7 +265,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(50, 42));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 42));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 42));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2).get(100); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2).get(100); // triggers assertions as a side-effect
subs = new EnumMap<>(Occur.class);
for (Occur occur : Occur.values()) {
@ -272,7 +276,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(42, 20));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 20));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 20));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2).get(20); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2).get(20); // triggers assertions as a side-effect
subs = new EnumMap<>(Occur.class);
for (Occur occur : Occur.values()) {
@ -283,7 +287,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 62));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 62));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20, 62));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 2).get(100); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 2).get(100); // triggers assertions as a side-effect
subs = new EnumMap<>(Occur.class);
for (Occur occur : Occur.values()) {
@ -294,7 +298,7 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(12, 32));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 32));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(20, 32));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 3).get(100); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 3).get(100); // triggers assertions as a side-effect
}
public void testProhibitedLeadCost() throws IOException {
@ -306,19 +310,19 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
// The MUST_NOT clause is called with the same lead cost as the MUST clause
subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 42));
subs.get(Occur.MUST_NOT).add(new FakeScorerSupplier(30, 42));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(100); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(100); // triggers assertions as a side-effect
subs.get(Occur.MUST).clear();
subs.get(Occur.MUST_NOT).clear();
subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 42));
subs.get(Occur.MUST_NOT).add(new FakeScorerSupplier(80, 42));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(100); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(100); // triggers assertions as a side-effect
subs.get(Occur.MUST).clear();
subs.get(Occur.MUST_NOT).clear();
subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 20));
subs.get(Occur.MUST_NOT).add(new FakeScorerSupplier(30, 20));
new Boolean2ScorerSupplier(null, subs, random().nextBoolean(), 0).get(20); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, RandomPicks.randomFrom(random(), ScoreMode.values()), 0).get(20); // triggers assertions as a side-effect
}
public void testMixedLeadCost() throws IOException {
@ -330,19 +334,19 @@ public class TestBoolean2ScorerSupplier extends LuceneTestCase {
// The SHOULD clause is always called with the same lead cost as the MUST clause
subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 42));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(30, 42));
new Boolean2ScorerSupplier(null, subs, true, 0).get(100); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, ScoreMode.COMPLETE, 0).get(100); // triggers assertions as a side-effect
subs.get(Occur.MUST).clear();
subs.get(Occur.SHOULD).clear();
subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 42));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(80, 42));
new Boolean2ScorerSupplier(null, subs, true, 0).get(100); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, ScoreMode.COMPLETE, 0).get(100); // triggers assertions as a side-effect
subs.get(Occur.MUST).clear();
subs.get(Occur.SHOULD).clear();
subs.get(Occur.MUST).add(new FakeScorerSupplier(42, 20));
subs.get(Occur.SHOULD).add(new FakeScorerSupplier(80, 20));
new Boolean2ScorerSupplier(null, subs, true, 0).get(20); // triggers assertions as a side-effect
new Boolean2ScorerSupplier(null, subs, ScoreMode.COMPLETE, 0).get(20); // triggers assertions as a side-effect
}
}

View File

@ -186,7 +186,7 @@ public class TestBooleanOr extends LuceneTestCase {
bq.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD);
bq.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD);
Weight w = s.createNormalizedWeight(bq.build(), true);
Weight w = s.createNormalizedWeight(bq.build(), ScoreMode.COMPLETE);
assertEquals(1, s.getIndexReader().leaves().size());
BulkScorer scorer = w.bulkScorer(s.getIndexReader().leaves().get(0));
@ -202,8 +202,8 @@ public class TestBooleanOr extends LuceneTestCase {
}
@Override
public boolean needsScores() {
return false;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE_NO_SCORES;
}
};

View File

@ -313,7 +313,7 @@ public class TestBooleanQuery extends LuceneTestCase {
q.add(new BooleanClause(new TermQuery(new Term("field", term)), BooleanClause.Occur.SHOULD));
}
Weight weight = s.createNormalizedWeight(q.build(), true);
Weight weight = s.createNormalizedWeight(q.build(), ScoreMode.COMPLETE);
Scorer scorer = weight.scorer(s.leafContexts.get(0));
@ -331,7 +331,7 @@ public class TestBooleanQuery extends LuceneTestCase {
// verify exact match:
for(int iter2=0;iter2<10;iter2++) {
weight = s.createNormalizedWeight(q.build(), true);
weight = s.createNormalizedWeight(q.build(), ScoreMode.COMPLETE);
scorer = weight.scorer(s.leafContexts.get(0));
if (VERBOSE) {
@ -431,8 +431,8 @@ public class TestBooleanQuery extends LuceneTestCase {
searcher.search(query, new SimpleCollector() {
int docBase = 0;
@Override
public boolean needsScores() {
return random().nextBoolean();
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE_NO_SCORES;
}
@Override
protected void doSetNextReader(LeafReaderContext context)
@ -511,8 +511,8 @@ public class TestBooleanQuery extends LuceneTestCase {
}
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
@Override
@ -617,7 +617,7 @@ public class TestBooleanQuery extends LuceneTestCase {
q.add(pq, Occur.MUST);
q.add(new TermQuery(new Term("field", "c")), Occur.FILTER);
final Weight weight = searcher.createNormalizedWeight(q.build(), random().nextBoolean());
final Weight weight = searcher.createNormalizedWeight(q.build(), ScoreMode.COMPLETE);
final Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertTrue(scorer instanceof ConjunctionScorer);
assertNotNull(scorer.twoPhaseIterator());
@ -646,7 +646,7 @@ public class TestBooleanQuery extends LuceneTestCase {
q.add(pq, Occur.SHOULD);
q.add(new TermQuery(new Term("field", "c")), Occur.SHOULD);
final Weight weight = searcher.createNormalizedWeight(q.build(), random().nextBoolean());
final Weight weight = searcher.createNormalizedWeight(q.build(), ScoreMode.COMPLETE);
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertTrue(scorer instanceof DisjunctionScorer);
assertNotNull(scorer.twoPhaseIterator());
@ -677,7 +677,7 @@ public class TestBooleanQuery extends LuceneTestCase {
q.add(pq, Occur.SHOULD);
q.add(new TermQuery(new Term("field", "d")), Occur.SHOULD);
final Weight weight = searcher.createNormalizedWeight(q.build(), random().nextBoolean());
final Weight weight = searcher.createNormalizedWeight(q.build(), ScoreMode.COMPLETE);
final Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertTrue(scorer instanceof ExactPhraseScorer);
assertNotNull(scorer.twoPhaseIterator());
@ -706,7 +706,7 @@ public class TestBooleanQuery extends LuceneTestCase {
q.add(pq, Occur.SHOULD);
q.add(new TermQuery(new Term("field", "c")), Occur.MUST_NOT);
final Weight weight = searcher.createNormalizedWeight(q.build(), random().nextBoolean());
final Weight weight = searcher.createNormalizedWeight(q.build(), ScoreMode.COMPLETE);
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertTrue(scorer instanceof ReqExclScorer);
assertNotNull(scorer.twoPhaseIterator());
@ -735,7 +735,7 @@ public class TestBooleanQuery extends LuceneTestCase {
q.add(pq, Occur.MUST);
q.add(new TermQuery(new Term("field", "c")), Occur.SHOULD);
final Weight weight = searcher.createNormalizedWeight(q.build(), true);
final Weight weight = searcher.createNormalizedWeight(q.build(), ScoreMode.COMPLETE);
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertTrue(scorer instanceof ReqOptSumScorer);
assertNotNull(scorer.twoPhaseIterator());
@ -768,11 +768,11 @@ public class TestBooleanQuery extends LuceneTestCase {
BooleanQuery bq = bqBuilder.build();
Set<Term> scoringTerms = new HashSet<>();
searcher.createNormalizedWeight(bq, true).extractTerms(scoringTerms);
searcher.createNormalizedWeight(bq, ScoreMode.COMPLETE).extractTerms(scoringTerms);
assertEquals(new HashSet<>(Arrays.asList(a, b)), scoringTerms);
Set<Term> matchingTerms = new HashSet<>();
searcher.createNormalizedWeight(bq, false).extractTerms(matchingTerms);
searcher.createNormalizedWeight(bq, ScoreMode.COMPLETE_NO_SCORES).extractTerms(matchingTerms);
assertEquals(new HashSet<>(Arrays.asList(a, b, c)), matchingTerms);
}
}

View File

@ -197,7 +197,7 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
bq1.add(new TermQuery(new Term(F1, "lucene")), Occur.SHOULD);
bq1.add(new PhraseQuery(F2, "search", "engine"), Occur.SHOULD);
Weight w1 = scorerSearcher.createNormalizedWeight(bq1.build(), true);
Weight w1 = scorerSearcher.createNormalizedWeight(bq1.build(), ScoreMode.COMPLETE);
Scorer s1 = w1.scorer(reader.leaves().get(0));
assertEquals(0, s1.iterator().nextDoc());
assertEquals(2, s1.getChildren().size());
@ -206,7 +206,7 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
bq2.add(new TermQuery(new Term(F1, "lucene")), Occur.SHOULD);
bq2.add(new PhraseQuery(F2, "search", "library"), Occur.SHOULD);
Weight w2 = scorerSearcher.createNormalizedWeight(bq2.build(), true);
Weight w2 = scorerSearcher.createNormalizedWeight(bq2.build(), ScoreMode.COMPLETE);
Scorer s2 = w2.scorer(reader.leaves().get(0));
assertEquals(0, s2.iterator().nextDoc());
assertEquals(1, s2.getChildren().size());
@ -219,7 +219,7 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
bq.add(new PhraseQuery(F2, "search", "library"), Occur.SHOULD);
bq.setMinimumNumberShouldMatch(2);
Weight w = scorerSearcher.createNormalizedWeight(bq.build(), true);
Weight w = scorerSearcher.createNormalizedWeight(bq.build(), ScoreMode.COMPLETE);
Scorer s = w.scorer(reader.leaves().get(0));
assertEquals(0, s.iterator().nextDoc());
assertEquals(2, s.getChildren().size());
@ -275,8 +275,8 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
}
@Override
public boolean needsScores() {
return true;
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
@Override
@ -340,6 +340,10 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
public float score(int doc, float freq) throws IOException {
return freq;
}
@Override
public float maxScore(float maxFreq) {
return maxFreq;
}
};
}
}

View File

@ -93,7 +93,7 @@ public class TestBooleanRewrites extends LuceneTestCase {
BooleanQuery.Builder query2 = new BooleanQuery.Builder();
query2.add(new TermQuery(new Term("field", "a")), Occur.FILTER);
query2.add(new TermQuery(new Term("field", "b")), Occur.SHOULD);
final Weight weight = searcher.createNormalizedWeight(query2.build(), true);
final Weight weight = searcher.createNormalizedWeight(query2.build(), ScoreMode.COMPLETE);
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertEquals(0, scorer.iterator().nextDoc());
assertTrue(scorer.getClass().getName(), scorer instanceof FilterScorer);

View File

@ -77,7 +77,7 @@ public class TestBooleanScorer extends LuceneTestCase {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new Weight(CrazyMustUseBulkScorerQuery.this) {
@Override
public void extractTerms(Set<Term> terms) {
@ -172,7 +172,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.build();
// no scores -> term scorer
Weight weight = searcher.createNormalizedWeight(query, false);
Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE_NO_SCORES);
BulkScorer scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof DefaultBulkScorer); // term scorer
@ -181,7 +181,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new TermQuery(new Term("foo", "bar")), Occur.SHOULD) // existing term
.add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD) // missing term
.build();
weight = searcher.createNormalizedWeight(query, true);
weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof DefaultBulkScorer); // term scorer
@ -210,7 +210,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new TermQuery(new Term("foo", "baz")), Occur.SHOULD)
.add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
.build();
Weight weight = searcher.createNormalizedWeight(query, true);
Weight weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
BulkScorer scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof ReqExclBulkScorer);
@ -219,7 +219,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new MatchAllDocsQuery(), Occur.SHOULD)
.add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
.build();
weight = searcher.createNormalizedWeight(query, true);
weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof ReqExclBulkScorer);
@ -227,7 +227,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new TermQuery(new Term("foo", "baz")), Occur.MUST)
.add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
.build();
weight = searcher.createNormalizedWeight(query, true);
weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof ReqExclBulkScorer);
@ -235,7 +235,7 @@ public class TestBooleanScorer extends LuceneTestCase {
.add(new TermQuery(new Term("foo", "baz")), Occur.FILTER)
.add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT)
.build();
weight = searcher.createNormalizedWeight(query, true);
weight = searcher.createNormalizedWeight(query, ScoreMode.COMPLETE);
scorer = ((BooleanWeight) weight).booleanScorer(ctx);
assertTrue(scorer instanceof ReqExclBulkScorer);

Some files were not shown because too many files have changed in this diff Show More