LUCENE-6919: Make Scorer expose an iterator instead of extending DocIdSetIterator.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1719081 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Adrien Grand 2015-12-10 13:43:50 +00:00
parent d33ca97167
commit ddb3a97678
121 changed files with 1548 additions and 1637 deletions

View File

@ -123,6 +123,10 @@ API Changes
* LUCENE-6900: Grouping sortWithinGroup variables used to allow null to mean
Sort.RELEVANCE. Null is no longer permitted. (David Smiley)
* LUCENE-6919: The Scorer class has been refactored to expose an iterator
instead of extending DocIdSetIterator. asTwoPhaseIterator() has been renamed
to twoPhaseIterator() for consistency. (Adrien Grand)
Optimizations
* LUCENE-6889: BooleanQuery.rewrite now performs some query optimization, in

View File

@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.Accountable;
@ -690,8 +691,9 @@ class BufferedUpdatesStream implements Accountable {
final IndexSearcher searcher = new IndexSearcher(readerContext.reader());
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
final DocIdSetIterator it = weight.scorer(readerContext);
if (it != null) {
final Scorer scorer = weight.scorer(readerContext);
if (scorer != null) {
final DocIdSetIterator it = scorer.iterator();
final Bits liveDocs = readerContext.reader().getLiveDocs();
while (true) {
int doc = it.nextDoc();

View File

@ -123,22 +123,25 @@ class BooleanTopLevelScorers {
this.coordReq = coordReq;
this.coordBoth = coordBoth;
}
@Override
public float score() throws IOException {
// TODO: sum into a double and cast to float if we ever send required clauses to BS1
int curDoc = reqScorer.docID();
float reqScore = reqScorer.score();
if (optScorer == null) {
return reqScore * coordReq;
float score = reqScorer.score();
int optScorerDoc = optIterator.docID();
if (optScorerDoc < curDoc) {
optScorerDoc = optIterator.advance(curDoc);
}
int optScorerDoc = optScorer.docID();
if (optScorerDoc < curDoc && (optScorerDoc = optScorer.advance(curDoc)) == NO_MORE_DOCS) {
optScorer = null;
return reqScore * coordReq;
if (optScorerDoc == curDoc) {
score = (score + optScorer.score()) * coordBoth;
} else {
score = score * coordReq;
}
return optScorerDoc == curDoc ? (reqScore + optScorer.score()) * coordBoth : reqScore * coordReq;
return score;
}
}
@ -155,22 +158,25 @@ class BooleanTopLevelScorers {
this.requiredCount = requiredCount;
this.coords = coords;
}
@Override
public float score() throws IOException {
// TODO: sum into a double and cast to float if we ever send required clauses to BS1
int curDoc = reqScorer.docID();
float reqScore = reqScorer.score();
if (optScorer == null) {
return reqScore * coords[requiredCount];
float score = reqScorer.score();
int optScorerDoc = optIterator.docID();
if (optScorerDoc < curDoc) {
optScorerDoc = optIterator.advance(curDoc);
}
int optScorerDoc = optScorer.docID();
if (optScorerDoc < curDoc && (optScorerDoc = optScorer.advance(curDoc)) == NO_MORE_DOCS) {
optScorer = null;
return reqScore * coords[requiredCount];
if (optScorerDoc == curDoc) {
score = (score + optScorer.score()) * coords[requiredCount + optScorer.freq()];
} else {
score = score * coords[requiredCount];
}
return optScorerDoc == curDoc ? (reqScore + optScorer.score()) * coords[requiredCount + optScorer.freq()] : reqScore * coords[requiredCount];
return score;
}
}
}

View File

@ -84,7 +84,7 @@ public abstract class BulkScorer {
public abstract int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException;
/**
* Same as {@link Scorer#cost()} for bulk scorers.
* Same as {@link DocIdSetIterator#cost()} for bulk scorers.
*/
public abstract long cost();
}

View File

@ -61,24 +61,22 @@ public abstract class CachingCollector extends FilterCollector {
private CachedScorer() { super(null); }
@Override
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException();
}
@Override
public final float score() { return score; }
@Override
public final int advance(int target) { throw new UnsupportedOperationException(); }
@Override
public final int docID() { return doc; }
public int docID() {
return doc;
}
@Override
public final int freq() { throw new UnsupportedOperationException(); }
@Override
public final int nextDoc() { throw new UnsupportedOperationException(); }
@Override
public long cost() { return 1; }
}
private static class NoScoreCachingCollector extends CachingCollector {

View File

@ -23,6 +23,7 @@ import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.util.CollectionUtil;
/** A conjunction of DocIdSetIterators.
@ -32,16 +33,16 @@ import org.apache.lucene.util.CollectionUtil;
*/
public class ConjunctionDISI extends DocIdSetIterator {
/** Create a conjunction over the provided iterators, taking advantage of
* {@link TwoPhaseIterator}. */
public static ConjunctionDISI intersect(List<? extends DocIdSetIterator> iterators) {
if (iterators.size() < 2) {
/** Create a conjunction over the provided {@link Scorer}s, taking advantage
* of {@link TwoPhaseIterator}. */
public static ConjunctionDISI intersectScorers(List<Scorer> scorers) {
if (scorers.size() < 2) {
throw new IllegalArgumentException("Cannot make a ConjunctionDISI of less than 2 iterators");
}
final List<DocIdSetIterator> allIterators = new ArrayList<>();
final List<TwoPhaseIterator> twoPhaseIterators = new ArrayList<>();
for (DocIdSetIterator iter : iterators) {
addIterator(iter, allIterators, twoPhaseIterators);
for (Scorer scorer : scorers) {
addScorer(scorer, allIterators, twoPhaseIterators);
}
if (twoPhaseIterators.isEmpty()) {
@ -51,12 +52,66 @@ public class ConjunctionDISI extends DocIdSetIterator {
}
}
/** Adds the iterator, possibly splitting up into two phases or collapsing if it is another conjunction */
/** Create a conjunction over the provided DocIdSetIterators. */
public static ConjunctionDISI intersectIterators(List<DocIdSetIterator> iterators) {
if (iterators.size() < 2) {
throw new IllegalArgumentException("Cannot make a ConjunctionDISI of less than 2 iterators");
}
final List<DocIdSetIterator> allIterators = new ArrayList<>();
final List<TwoPhaseIterator> twoPhaseIterators = new ArrayList<>();
for (DocIdSetIterator iterator : iterators) {
addIterator(iterator, allIterators, twoPhaseIterators);
}
if (twoPhaseIterators.isEmpty()) {
return new ConjunctionDISI(allIterators);
} else {
return new TwoPhase(allIterators, twoPhaseIterators);
}
}
/** Create a conjunction over the provided {@link Scorer}s, taking advantage
* of {@link TwoPhaseIterator}. */
public static ConjunctionDISI intersectSpans(List<Spans> spanList) {
if (spanList.size() < 2) {
throw new IllegalArgumentException("Cannot make a ConjunctionDISI of less than 2 iterators");
}
final List<DocIdSetIterator> allIterators = new ArrayList<>();
final List<TwoPhaseIterator> twoPhaseIterators = new ArrayList<>();
for (Spans spans : spanList) {
addSpans(spans, allIterators, twoPhaseIterators);
}
if (twoPhaseIterators.isEmpty()) {
return new ConjunctionDISI(allIterators);
} else {
return new TwoPhase(allIterators, twoPhaseIterators);
}
}
/** Adds the scorer, possibly splitting up into two phases or collapsing if it is another conjunction */
private static void addScorer(Scorer scorer, List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
TwoPhaseIterator twoPhaseIter = scorer.twoPhaseIterator();
if (twoPhaseIter != null) {
addTwoPhaseIterator(twoPhaseIter, allIterators, twoPhaseIterators);
} else { // no approximation support, use the iterator as-is
addIterator(scorer.iterator(), allIterators, twoPhaseIterators);
}
}
/** Adds the Spans. */
private static void addSpans(Spans spans, List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
TwoPhaseIterator twoPhaseIter = spans.asTwoPhaseIterator();
if (twoPhaseIter != null) {
addTwoPhaseIterator(twoPhaseIter, allIterators, twoPhaseIterators);
} else { // no approximation support, use the iterator as-is
addIterator(spans, allIterators, twoPhaseIterators);
}
}
private static void addIterator(DocIdSetIterator disi, List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
// Check for exactly this class for collapsing. Subclasses can do their own optimizations.
if (disi.getClass() == ConjunctionScorer.class) {
addIterator(((ConjunctionScorer) disi).disi, allIterators, twoPhaseIterators);
} else if (disi.getClass() == ConjunctionDISI.class || disi.getClass() == TwoPhase.class) {
if (disi.getClass() == ConjunctionDISI.class || disi.getClass() == TwoPhase.class) {
ConjunctionDISI conjunction = (ConjunctionDISI) disi;
// subconjuctions have already split themselves into two phase iterators and others, so we can take those
// iterators as they are and move them up to this conjunction
@ -67,16 +122,15 @@ public class ConjunctionDISI extends DocIdSetIterator {
Collections.addAll(twoPhaseIterators, twoPhase.twoPhaseView.twoPhaseIterators);
}
} else {
TwoPhaseIterator twoPhaseIter = TwoPhaseIterator.asTwoPhaseIterator(disi);
if (twoPhaseIter != null) {
allIterators.add(twoPhaseIter.approximation());
twoPhaseIterators.add(twoPhaseIter);
} else { // no approximation support, use the iterator as-is
allIterators.add(disi);
}
allIterators.add(disi);
}
}
private static void addTwoPhaseIterator(TwoPhaseIterator twoPhaseIter, List<DocIdSetIterator> allIterators, List<TwoPhaseIterator> twoPhaseIterators) {
addIterator(twoPhaseIter.approximation(), allIterators, twoPhaseIterators);
twoPhaseIterators.add(twoPhaseIter);
}
final DocIdSetIterator lead;
final DocIdSetIterator[] others;

View File

@ -29,27 +29,27 @@ class ConjunctionScorer extends Scorer {
final Scorer[] scorers;
final float coord;
ConjunctionScorer(Weight weight, List<? extends DocIdSetIterator> required, List<Scorer> scorers) {
ConjunctionScorer(Weight weight, List<Scorer> required, List<Scorer> scorers) {
this(weight, required, scorers, 1f);
}
/** Create a new {@link ConjunctionScorer}, note that {@code scorers} must be a subset of {@code required}. */
ConjunctionScorer(Weight weight, List<? extends DocIdSetIterator> required, List<Scorer> scorers, float coord) {
ConjunctionScorer(Weight weight, List<Scorer> required, List<Scorer> scorers, float coord) {
super(weight);
assert required.containsAll(scorers);
this.coord = coord;
this.disi = ConjunctionDISI.intersect(required);
this.disi = ConjunctionDISI.intersectScorers(required);
this.scorers = scorers.toArray(new Scorer[scorers.size()]);
}
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
public TwoPhaseIterator twoPhaseIterator() {
return disi.asTwoPhaseIterator();
}
@Override
public int advance(int target) throws IOException {
return disi.advance(target);
public DocIdSetIterator iterator() {
return disi;
}
@Override
@ -57,11 +57,6 @@ class ConjunctionScorer extends Scorer {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
return disi.nextDoc();
}
@Override
public float score() throws IOException {
double sum = 0.0d;
@ -76,11 +71,6 @@ class ConjunctionScorer extends Scorer {
return scorers.length;
}
@Override
public long cost() {
return disi.cost();
}
@Override
public Collection<ChildScorer> getChildren() {
ArrayList<ChildScorer> children = new ArrayList<>();

View File

@ -54,10 +54,20 @@ public final class ConstantScoreScorer extends Scorer {
}
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
public DocIdSetIterator iterator() {
return disi;
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
return twoPhaseIterator;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public float score() throws IOException {
return score;
@ -68,24 +78,5 @@ public final class ConstantScoreScorer extends Scorer {
return 1;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
return disi.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return disi.advance(target);
}
@Override
public long cost() {
return disi.cost();
}
}

View File

@ -82,9 +82,9 @@ public abstract class ConstantScoreWeight extends Weight {
if (s == null) {
exists = false;
} else {
final TwoPhaseIterator twoPhase = s.asTwoPhaseIterator();
final TwoPhaseIterator twoPhase = s.twoPhaseIterator();
if (twoPhase == null) {
exists = s.advance(doc) == doc;
exists = s.iterator().advance(doc) == doc;
} else {
exists = twoPhase.approximation().advance(doc) == doc && twoPhase.matches();
}

View File

@ -28,8 +28,7 @@ import org.apache.lucene.util.PriorityQueue;
* pluggable comparison function makes the rebalancing quite slow.
* @lucene.internal
*/
public final class DisiPriorityQueue<Iter extends DocIdSetIterator>
implements Iterable<DisiWrapper<Iter>> {
public final class DisiPriorityQueue implements Iterable<DisiWrapper> {
static int leftNode(int node) {
return ((node + 1) << 1) - 1;
@ -43,10 +42,9 @@ implements Iterable<DisiWrapper<Iter>> {
return ((node + 1) >>> 1) - 1;
}
private final DisiWrapper<Iter>[] heap;
private final DisiWrapper[] heap;
private int size;
@SuppressWarnings({"unchecked","rawtypes"})
public DisiPriorityQueue(int maxSize) {
heap = new DisiWrapper[maxSize];
size = 0;
@ -56,15 +54,15 @@ implements Iterable<DisiWrapper<Iter>> {
return size;
}
public DisiWrapper<Iter> top() {
public DisiWrapper top() {
return heap[0];
}
/** Get the list of scorers which are on the current doc. */
public DisiWrapper<Iter> topList() {
final DisiWrapper<Iter>[] heap = this.heap;
public DisiWrapper topList() {
final DisiWrapper[] heap = this.heap;
final int size = this.size;
DisiWrapper<Iter> list = heap[0];
DisiWrapper list = heap[0];
list.next = null;
if (size >= 3) {
list = topList(list, heap, size, 1);
@ -76,14 +74,14 @@ implements Iterable<DisiWrapper<Iter>> {
}
// prepend w1 (iterator) to w2 (list)
private DisiWrapper<Iter> prepend(DisiWrapper<Iter> w1, DisiWrapper<Iter> w2) {
private DisiWrapper prepend(DisiWrapper w1, DisiWrapper w2) {
w1.next = w2;
return w1;
}
private DisiWrapper<Iter> topList(DisiWrapper<Iter> list, DisiWrapper<Iter>[] heap,
private DisiWrapper topList(DisiWrapper list, DisiWrapper[] heap,
int size, int i) {
final DisiWrapper<Iter> w = heap[i];
final DisiWrapper w = heap[i];
if (w.doc == list.doc) {
list = prepend(w, list);
final int left = leftNode(i);
@ -98,8 +96,8 @@ implements Iterable<DisiWrapper<Iter>> {
return list;
}
public DisiWrapper<Iter> add(DisiWrapper<Iter> entry) {
final DisiWrapper<Iter>[] heap = this.heap;
public DisiWrapper add(DisiWrapper entry) {
final DisiWrapper[] heap = this.heap;
final int size = this.size;
heap[size] = entry;
upHeap(size);
@ -107,9 +105,9 @@ implements Iterable<DisiWrapper<Iter>> {
return heap[0];
}
public DisiWrapper<Iter> pop() {
final DisiWrapper<Iter>[] heap = this.heap;
final DisiWrapper<Iter> result = heap[0];
public DisiWrapper pop() {
final DisiWrapper[] heap = this.heap;
final DisiWrapper result = heap[0];
final int i = --size;
heap[0] = heap[i];
heap[i] = null;
@ -117,18 +115,18 @@ implements Iterable<DisiWrapper<Iter>> {
return result;
}
public DisiWrapper<Iter> updateTop() {
public DisiWrapper updateTop() {
downHeap(size);
return heap[0];
}
DisiWrapper<Iter> updateTop(DisiWrapper<Iter> topReplacement) {
DisiWrapper updateTop(DisiWrapper topReplacement) {
heap[0] = topReplacement;
return updateTop();
}
void upHeap(int i) {
final DisiWrapper<Iter> node = heap[i];
final DisiWrapper node = heap[i];
final int nodeDoc = node.doc;
int j = parentNode(i);
while (j >= 0 && nodeDoc < heap[j].doc) {
@ -141,7 +139,7 @@ implements Iterable<DisiWrapper<Iter>> {
void downHeap(int size) {
int i = 0;
final DisiWrapper<Iter> node = heap[0];
final DisiWrapper node = heap[0];
int j = leftNode(i);
if (j < size) {
int k = rightNode(j);
@ -164,7 +162,7 @@ implements Iterable<DisiWrapper<Iter>> {
}
@Override
public Iterator<DisiWrapper<Iter>> iterator() {
public Iterator<DisiWrapper> iterator() {
return Arrays.asList(heap).subList(0, size).iterator();
}

View File

@ -17,15 +17,18 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import org.apache.lucene.search.spans.Spans;
/**
* Wrapper used in {@link DisiPriorityQueue}.
* @lucene.internal
*/
public class DisiWrapper<Iter extends DocIdSetIterator> {
public final Iter iterator;
public class DisiWrapper {
public final DocIdSetIterator iterator;
public final Scorer scorer;
public final long cost;
public int doc; // the current doc, used for comparison
public DisiWrapper<Iter> next; // reference to a next element, see #topList
public DisiWrapper next; // reference to a next element, see #topList
// An approximation of the iterator, or the iterator itself if it does not
// support two-phase iteration
@ -33,15 +36,34 @@ public class DisiWrapper<Iter extends DocIdSetIterator> {
// A two-phase view of the iterator, or null if the iterator does not support
// two-phase iteration
public final TwoPhaseIterator twoPhaseView;
// FOR SPANS
public final Spans spans;
public int lastApproxMatchDoc; // last doc of approximation that did match
public int lastApproxNonMatchDoc; // last doc of approximation that did not match
public DisiWrapper(Iter iterator) {
this.iterator = iterator;
public DisiWrapper(Scorer scorer) {
this.scorer = scorer;
this.spans = null;
this.iterator = scorer.iterator();
this.cost = iterator.cost();
this.doc = -1;
this.twoPhaseView = TwoPhaseIterator.asTwoPhaseIterator(iterator);
this.twoPhaseView = scorer.twoPhaseIterator();
if (twoPhaseView != null) {
approximation = twoPhaseView.approximation();
} else {
approximation = iterator;
}
}
public DisiWrapper(Spans spans) {
this.scorer = null;
this.spans = spans;
this.iterator = spans;
this.cost = iterator.cost();
this.doc = -1;
this.twoPhaseView = spans.asTwoPhaseIterator();
if (twoPhaseView != null) {
approximation = twoPhaseView.approximation();

View File

@ -23,16 +23,15 @@ import java.io.IOException;
* the provided iterators.
* @lucene.internal
*/
public class DisjunctionDISIApproximation<Iter extends DocIdSetIterator>
extends DocIdSetIterator {
public class DisjunctionDISIApproximation extends DocIdSetIterator {
final DisiPriorityQueue<Iter> subIterators;
final DisiPriorityQueue subIterators;
final long cost;
public DisjunctionDISIApproximation(DisiPriorityQueue<Iter> subIterators) {
public DisjunctionDISIApproximation(DisiPriorityQueue subIterators) {
this.subIterators = subIterators;
long cost = 0;
for (DisiWrapper<Iter> w : subIterators) {
for (DisiWrapper w : subIterators) {
cost += w.cost;
}
this.cost = cost;
@ -50,7 +49,7 @@ extends DocIdSetIterator {
@Override
public int nextDoc() throws IOException {
DisiWrapper<Iter> top = subIterators.top();
DisiWrapper top = subIterators.top();
final int doc = top.doc;
do {
top.doc = top.approximation.nextDoc();
@ -62,7 +61,7 @@ extends DocIdSetIterator {
@Override
public int advance(int target) throws IOException {
DisiWrapper<Iter> top = subIterators.top();
DisiWrapper top = subIterators.top();
do {
top.doc = top.approximation.advance(target);
top = subIterators.updateTop();

View File

@ -46,11 +46,11 @@ final class DisjunctionMaxScorer extends DisjunctionScorer {
}
@Override
protected float score(DisiWrapper<Scorer> topList) throws IOException {
protected float score(DisiWrapper topList) throws IOException {
float scoreSum = 0;
float scoreMax = 0;
for (DisiWrapper<Scorer> w = topList; w != null; w = w.next) {
final float subScore = w.iterator.score();
for (DisiWrapper w = topList; w != null; w = w.next) {
final float subScore = w.scorer.score();
scoreSum += subScore;
if (subScore > scoreMax) {
scoreMax = subScore;

View File

@ -28,21 +28,21 @@ import java.util.List;
abstract class DisjunctionScorer extends Scorer {
private final boolean needsScores;
final DisiPriorityQueue<Scorer> subScorers;
final DisiPriorityQueue subScorers;
private final long cost;
/** Linked list of scorers which are on the current doc */
private DisiWrapper<Scorer> topScorers;
private DisiWrapper topScorers;
protected DisjunctionScorer(Weight weight, List<Scorer> subScorers, boolean needsScores) {
super(weight);
if (subScorers.size() <= 1) {
throw new IllegalArgumentException("There must be at least 2 subScorers");
}
this.subScorers = new DisiPriorityQueue<Scorer>(subScorers.size());
this.subScorers = new DisiPriorityQueue(subScorers.size());
long cost = 0;
for (Scorer scorer : subScorers) {
final DisiWrapper<Scorer> w = new DisiWrapper<>(scorer);
final DisiWrapper w = new DisiWrapper(scorer);
cost += w.cost;
this.subScorers.add(w);
}
@ -51,13 +51,55 @@ abstract class DisjunctionScorer extends Scorer {
}
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
@Override
public int docID() {
return subScorers.top().doc;
}
@Override
public final int nextDoc() throws IOException {
topScorers = null;
DisiWrapper top = subScorers.top();
final int doc = top.doc;
do {
top.doc = top.iterator.nextDoc();
top = subScorers.updateTop();
} while (top.doc == doc);
return top.doc;
}
@Override
public final int advance(int target) throws IOException {
topScorers = null;
DisiWrapper top = subScorers.top();
do {
top.doc = top.iterator.advance(target);
top = subScorers.updateTop();
} while (top.doc < target);
return top.doc;
}
@Override
public final long cost() {
return cost;
}
};
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
float sumMatchCost = 0;
long sumApproxCost = 0;
// Compute matchCost as the avarage over the matchCost of the subScorers.
// This is weighted by the cost, which is an expected number of matching documents.
for (DisiWrapper<Scorer> w : subScorers) {
for (DisiWrapper w : subScorers) {
if (w.twoPhaseView != null) {
long costWeight = (w.cost <= 1) ? 1 : w.cost;
sumMatchCost += w.twoPhaseView.matchCost() * costWeight;
@ -74,11 +116,11 @@ abstract class DisjunctionScorer extends Scorer {
// note it is important to share the same pq as this scorer so that
// rebalancing the pq through the approximation will also rebalance
// the pq in this scorer.
return new TwoPhaseIterator(new DisjunctionDISIApproximation<Scorer>(subScorers)) {
return new TwoPhaseIterator(new DisjunctionDISIApproximation(subScorers)) {
@Override
public boolean matches() throws IOException {
DisiWrapper<Scorer> topScorers = subScorers.topList();
DisiWrapper topScorers = subScorers.topList();
// remove the head of the list as long as it does not match
while (topScorers.twoPhaseView != null && ! topScorers.twoPhaseView.matches()) {
topScorers = topScorers.next;
@ -90,8 +132,8 @@ abstract class DisjunctionScorer extends Scorer {
if (needsScores) {
// if scores or freqs are needed, we also need to remove scorers
// from the top list that do not actually match
DisiWrapper<Scorer> previous = topScorers;
for (DisiWrapper<Scorer> w = topScorers.next; w != null; w = w.next) {
DisiWrapper previous = topScorers;
for (DisiWrapper w = topScorers.next; w != null; w = w.next) {
if (w.twoPhaseView != null && ! w.twoPhaseView.matches()) {
// w does not match, remove it
previous.next = w.next;
@ -119,48 +161,18 @@ abstract class DisjunctionScorer extends Scorer {
};
}
@Override
public final long cost() {
return cost;
}
@Override
public final int docID() {
return subScorers.top().doc;
}
@Override
public final int nextDoc() throws IOException {
topScorers = null;
DisiWrapper<Scorer> top = subScorers.top();
final int doc = top.doc;
do {
top.doc = top.iterator.nextDoc();
top = subScorers.updateTop();
} while (top.doc == doc);
return top.doc;
}
@Override
public final int advance(int target) throws IOException {
topScorers = null;
DisiWrapper<Scorer> top = subScorers.top();
do {
top.doc = top.iterator.advance(target);
top = subScorers.updateTop();
} while (top.doc < target);
return top.doc;
}
@Override
public final int freq() throws IOException {
if (topScorers == null) {
topScorers = subScorers.topList();
}
int freq = 1;
for (DisiWrapper<Scorer> w = topScorers.next; w != null; w = w.next) {
for (DisiWrapper w = topScorers.next; w != null; w = w.next) {
freq += 1;
}
return freq;
@ -175,13 +187,13 @@ abstract class DisjunctionScorer extends Scorer {
}
/** Compute the score for the given linked list of scorers. */
protected abstract float score(DisiWrapper<Scorer> topList) throws IOException;
protected abstract float score(DisiWrapper topList) throws IOException;
@Override
public final Collection<ChildScorer> getChildren() {
ArrayList<ChildScorer> children = new ArrayList<>();
for (DisiWrapper<Scorer> scorer : subScorers) {
children.add(new ChildScorer(scorer.iterator, "SHOULD"));
for (DisiWrapper scorer : subScorers) {
children.add(new ChildScorer(scorer.scorer, "SHOULD"));
}
return children;
}

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.List;
/** A Scorer for OR like queries, counterpart of <code>ConjunctionScorer</code>.
* This Scorer implements {@link Scorer#advance(int)} and uses advance() on the given Scorers.
*/
final class DisjunctionSumScorer extends DisjunctionScorer {
private final float[] coord;
@ -37,11 +36,11 @@ final class DisjunctionSumScorer extends DisjunctionScorer {
}
@Override
protected float score(DisiWrapper<Scorer> topList) throws IOException {
protected float score(DisiWrapper topList) throws IOException {
double score = 0;
int freq = 0;
for (DisiWrapper<Scorer> w = topList; w != null; w = w.next) {
score += w.iterator.score();
for (DisiWrapper w = topList; w != null; w = w.next) {
score += w.scorer.score();
freq += 1;
}
return (float)score * coord[freq];

View File

@ -59,13 +59,13 @@ final class ExactPhraseScorer extends Scorer {
iterators.add(posting.postings);
postingsAndPositions.add(new PostingsAndPosition(posting.postings, posting.position));
}
conjunction = ConjunctionDISI.intersect(iterators);
conjunction = ConjunctionDISI.intersectIterators(iterators);
this.postings = postingsAndPositions.toArray(new PostingsAndPosition[postingsAndPositions.size()]);
this.matchCost = matchCost;
}
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
public TwoPhaseIterator twoPhaseIterator() {
return new TwoPhaseIterator(conjunction) {
@Override
public boolean matches() throws IOException {
@ -79,22 +79,9 @@ final class ExactPhraseScorer extends Scorer {
};
}
private int doNext(int doc) throws IOException {
for (;; doc = conjunction.nextDoc()) {
if (doc == NO_MORE_DOCS || phraseFreq() > 0) {
return doc;
}
}
}
@Override
public int nextDoc() throws IOException {
return doNext(conjunction.nextDoc());
}
@Override
public int advance(int target) throws IOException {
return doNext(conjunction.advance(target));
public DocIdSetIterator iterator() {
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
}
@Override
@ -180,8 +167,4 @@ final class ExactPhraseScorer extends Scorer {
return this.freq = freq;
}
@Override
public long cost() {
return conjunction.cost();
}
}

View File

@ -29,11 +29,6 @@ final class FakeScorer extends Scorer {
public FakeScorer() {
super(null);
}
@Override
public int advance(int target) {
throw new UnsupportedOperationException("FakeScorer doesn't support advance(int)");
}
@Override
public int docID() {
@ -44,11 +39,6 @@ final class FakeScorer extends Scorer {
public int freq() {
return freq;
}
@Override
public int nextDoc() {
throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
}
@Override
public float score() {
@ -56,8 +46,8 @@ final class FakeScorer extends Scorer {
}
@Override
public long cost() {
return 1;
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException();
}
@Override

View File

@ -70,22 +70,12 @@ public abstract class FilterScorer extends Scorer {
}
@Override
public final int nextDoc() throws IOException {
return in.nextDoc();
}
@Override
public final int advance(int target) throws IOException {
return in.advance(target);
}
@Override
public long cost() {
return in.cost();
public final DocIdSetIterator iterator() {
return in.iterator();
}
@Override
public final TwoPhaseIterator asTwoPhaseIterator() {
return in.asTwoPhaseIterator();
public final TwoPhaseIterator twoPhaseIterator() {
return in.twoPhaseIterator();
}
}

View File

@ -64,7 +64,7 @@ final class MinShouldMatchSumScorer extends Scorer {
final PriorityQueue<Scorer> pq = new PriorityQueue<Scorer>(scorers.size() - minShouldMatch + 1) {
@Override
protected boolean lessThan(Scorer a, Scorer b) {
return a.cost() > b.cost();
return a.iterator().cost() > b.iterator().cost();
}
};
for (Scorer scorer : scorers) {
@ -72,7 +72,7 @@ final class MinShouldMatchSumScorer extends Scorer {
}
long cost = 0;
for (Scorer scorer = pq.pop(); scorer != null; scorer = pq.pop()) {
cost += scorer.cost();
cost += scorer.iterator().cost();
}
return cost;
}
@ -82,23 +82,22 @@ final class MinShouldMatchSumScorer extends Scorer {
// list of scorers which 'lead' the iteration and are currently
// positioned on 'doc'
DisiWrapper<Scorer> lead;
DisiWrapper lead;
int doc; // current doc ID of the leads
int freq; // number of scorers on the desired doc ID
// priority queue of scorers that are too advanced compared to the current
// doc. Ordered by doc ID.
final DisiPriorityQueue<Scorer> head;
final DisiPriorityQueue head;
// priority queue of scorers which are behind the current doc.
// Ordered by cost.
final DisiWrapper<Scorer>[] tail;
final DisiWrapper[] tail;
int tailSize;
final Collection<ChildScorer> childScorers;
final long cost;
@SuppressWarnings({"unchecked","rawtypes"})
MinShouldMatchSumScorer(Weight weight, Collection<Scorer> scorers, int minShouldMatch, float[] coord) {
super(weight);
@ -113,13 +112,13 @@ final class MinShouldMatchSumScorer extends Scorer {
this.coord = coord;
this.doc = -1;
head = new DisiPriorityQueue<Scorer>(scorers.size() - minShouldMatch + 1);
head = new DisiPriorityQueue(scorers.size() - minShouldMatch + 1);
// there can be at most minShouldMatch - 1 scorers beyond the current position
// otherwise we might be skipping over matching documents
tail = new DisiWrapper[minShouldMatch - 1];
for (Scorer scorer : scorers) {
addLead(new DisiWrapper<Scorer>(scorer));
addLead(new DisiWrapper(scorer));
}
List<ChildScorer> children = new ArrayList<>();
@ -130,77 +129,89 @@ final class MinShouldMatchSumScorer extends Scorer {
this.cost = cost(scorers, minShouldMatch);
}
@Override
public long cost() {
return cost;
}
@Override
public final Collection<ChildScorer> getChildren() {
return childScorers;
}
@Override
public int nextDoc() throws IOException {
// We are moving to the next doc ID, so scorers in 'lead' need to go in
// 'tail'. If there is not enough space in 'tail', then we take the least
// costly scorers and advance them.
for (DisiWrapper<Scorer> s = lead; s != null; s = s.next) {
final DisiWrapper<Scorer> evicted = insertTailWithOverFlow(s);
if (evicted != null) {
if (evicted.doc == doc) {
evicted.doc = evicted.iterator.nextDoc();
} else {
evicted.doc = evicted.iterator.advance(doc + 1);
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
@Override
public int docID() {
assert doc == lead.doc;
return doc;
}
@Override
public int nextDoc() throws IOException {
// We are moving to the next doc ID, so scorers in 'lead' need to go in
// 'tail'. If there is not enough space in 'tail', then we take the least
// costly scorers and advance them.
for (DisiWrapper s = lead; s != null; s = s.next) {
final DisiWrapper evicted = insertTailWithOverFlow(s);
if (evicted != null) {
if (evicted.doc == doc) {
evicted.doc = evicted.iterator.nextDoc();
} else {
evicted.doc = evicted.iterator.advance(doc + 1);
}
head.add(evicted);
}
}
head.add(evicted);
}
}
setDocAndFreq();
return doNext();
setDocAndFreq();
return doNext();
}
@Override
public int advance(int target) throws IOException {
// Same logic as in nextDoc
for (DisiWrapper s = lead; s != null; s = s.next) {
final DisiWrapper evicted = insertTailWithOverFlow(s);
if (evicted != null) {
evicted.doc = evicted.iterator.advance(target);
head.add(evicted);
}
}
// But this time there might also be scorers in 'head' behind the desired
// target so we need to do the same thing that we did on 'lead' on 'head'
DisiWrapper headTop = head.top();
while (headTop.doc < target) {
final DisiWrapper evicted = insertTailWithOverFlow(headTop);
// We know that the tail is full since it contains at most
// minShouldMatch - 1 entries and we just moved at least minShouldMatch
// entries to it, so evicted is not null
evicted.doc = evicted.iterator.advance(target);
headTop = head.updateTop(evicted);
}
setDocAndFreq();
return doNext();
}
@Override
public long cost() {
return cost;
}
};
}
@Override
public int advance(int target) throws IOException {
// Same logic as in nextDoc
for (DisiWrapper<Scorer> s = lead; s != null; s = s.next) {
final DisiWrapper<Scorer> evicted = insertTailWithOverFlow(s);
if (evicted != null) {
evicted.doc = evicted.iterator.advance(target);
head.add(evicted);
}
}
// But this time there might also be scorers in 'head' behind the desired
// target so we need to do the same thing that we did on 'lead' on 'head'
DisiWrapper<Scorer> headTop = head.top();
while (headTop.doc < target) {
final DisiWrapper<Scorer> evicted = insertTailWithOverFlow(headTop);
// We know that the tail is full since it contains at most
// minShouldMatch - 1 entries and we just moved at least minShouldMatch
// entries to it, so evicted is not null
evicted.doc = evicted.iterator.advance(target);
headTop = head.updateTop(evicted);
}
setDocAndFreq();
return doNext();
}
private void addLead(DisiWrapper<Scorer> lead) {
private void addLead(DisiWrapper lead) {
lead.next = this.lead;
this.lead = lead;
freq += 1;
}
private void pushBackLeads() throws IOException {
for (DisiWrapper<Scorer> s = lead; s != null; s = s.next) {
for (DisiWrapper s = lead; s != null; s = s.next) {
addTail(s);
}
}
private void advanceTail(DisiWrapper<Scorer> top) throws IOException {
private void advanceTail(DisiWrapper top) throws IOException {
top.doc = top.iterator.advance(doc);
if (top.doc == doc) {
addLead(top);
@ -210,7 +221,7 @@ final class MinShouldMatchSumScorer extends Scorer {
}
private void advanceTail() throws IOException {
final DisiWrapper<Scorer> top = popTail();
final DisiWrapper top = popTail();
advanceTail(top);
}
@ -276,8 +287,8 @@ final class MinShouldMatchSumScorer extends Scorer {
// we need to know about all matches
updateFreq();
double score = 0;
for (DisiWrapper<Scorer> s = lead; s != null; s = s.next) {
score += s.iterator.score();
for (DisiWrapper s = lead; s != null; s = s.next) {
score += s.scorer.score();
}
return coord[freq] * (float) score;
}
@ -289,12 +300,12 @@ final class MinShouldMatchSumScorer extends Scorer {
}
/** Insert an entry in 'tail' and evict the least-costly scorer if full. */
private DisiWrapper<Scorer> insertTailWithOverFlow(DisiWrapper<Scorer> s) {
private DisiWrapper insertTailWithOverFlow(DisiWrapper s) {
if (tailSize < tail.length) {
addTail(s);
return null;
} else if (tail.length >= 1) {
final DisiWrapper<Scorer> top = tail[0];
final DisiWrapper top = tail[0];
if (top.cost < s.cost) {
tail[0] = s;
downHeapCost(tail, tailSize);
@ -305,16 +316,16 @@ final class MinShouldMatchSumScorer extends Scorer {
}
/** Add an entry to 'tail'. Fails if over capacity. */
private void addTail(DisiWrapper<Scorer> s) {
private void addTail(DisiWrapper s) {
tail[tailSize] = s;
upHeapCost(tail, tailSize);
tailSize += 1;
}
/** Pop the least-costly scorer from 'tail'. */
private DisiWrapper<Scorer> popTail() {
private DisiWrapper popTail() {
assert tailSize > 0;
final DisiWrapper<Scorer> result = tail[0];
final DisiWrapper result = tail[0];
tail[0] = tail[--tailSize];
downHeapCost(tail, tailSize);
return result;
@ -322,8 +333,8 @@ final class MinShouldMatchSumScorer extends Scorer {
/** Heap helpers */
private static void upHeapCost(DisiWrapper<Scorer>[] heap, int i) {
final DisiWrapper<Scorer> node = heap[i];
private static void upHeapCost(DisiWrapper[] heap, int i) {
final DisiWrapper node = heap[i];
final long nodeCost = node.cost;
int j = parentNode(i);
while (j >= 0 && nodeCost < heap[j].cost) {
@ -334,9 +345,9 @@ final class MinShouldMatchSumScorer extends Scorer {
heap[i] = node;
}
private static void downHeapCost(DisiWrapper<Scorer>[] heap, int size) {
private static void downHeapCost(DisiWrapper[] heap, int size) {
int i = 0;
final DisiWrapper<Scorer> node = heap[0];
final DisiWrapper node = heap[0];
int j = leftNode(i);
if (j < size) {
int k = rightNode(j);

View File

@ -241,7 +241,7 @@ public class MultiPhraseQuery extends Query {
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context);
if (scorer != null) {
int newDoc = scorer.advance(doc);
int newDoc = scorer.iterator().advance(doc);
if (newDoc == doc) {
float freq = slop == 0 ? scorer.freq() : ((SloppyPhraseScorer)scorer).sloppyFreq();
SimScorer docScorer = similarity.simScorer(stats, context);

View File

@ -453,7 +453,7 @@ public class PhraseQuery extends Query {
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context);
if (scorer != null) {
int newDoc = scorer.advance(doc);
int newDoc = scorer.iterator().advance(doc);
if (newDoc == doc) {
float freq = slop == 0 ? scorer.freq() : ((SloppyPhraseScorer)scorer).sloppyFreq();
SimScorer docScorer = similarity.simScorer(stats, context);

View File

@ -89,7 +89,7 @@ public abstract class QueryRescorer extends Rescorer {
int targetDoc = docID - docBase;
int actualDoc = scorer.docID();
if (actualDoc < targetDoc) {
actualDoc = scorer.advance(targetDoc);
actualDoc = scorer.iterator().advance(targetDoc);
}
if (actualDoc == targetDoc) {

View File

@ -23,9 +23,6 @@ import java.util.Collections;
/** A Scorer for queries with a required subscorer
* and an excluding (prohibited) sub {@link Scorer}.
* <br>
* This <code>Scorer</code> implements {@link Scorer#advance(int)},
* and it uses the advance() on the given scorers.
*/
class ReqExclScorer extends Scorer {
@ -44,25 +41,20 @@ class ReqExclScorer extends Scorer {
public ReqExclScorer(Scorer reqScorer, Scorer exclScorer) {
super(reqScorer.weight);
this.reqScorer = reqScorer;
reqTwoPhaseIterator = reqScorer.asTwoPhaseIterator();
reqTwoPhaseIterator = reqScorer.twoPhaseIterator();
if (reqTwoPhaseIterator == null) {
reqApproximation = reqScorer;
reqApproximation = reqScorer.iterator();
} else {
reqApproximation = reqTwoPhaseIterator.approximation();
}
exclTwoPhaseIterator = exclScorer.asTwoPhaseIterator();
exclTwoPhaseIterator = exclScorer.twoPhaseIterator();
if (exclTwoPhaseIterator == null) {
exclApproximation = exclScorer;
exclApproximation = exclScorer.iterator();
} else {
exclApproximation = exclTwoPhaseIterator.approximation();
}
}
@Override
public int nextDoc() throws IOException {
return toNonExcluded(reqApproximation.nextDoc());
}
/** Confirms whether or not the given {@link TwoPhaseIterator}
* matches on the current document. */
private static boolean matches(TwoPhaseIterator it) throws IOException {
@ -85,25 +77,52 @@ class ReqExclScorer extends Scorer {
return matches(reqTwoPhaseIterator);
}
/** Advance to the next non-excluded doc. */
private int toNonExcluded(int doc) throws IOException {
int exclDoc = exclApproximation.docID();
for (;; doc = reqApproximation.nextDoc()) {
if (doc == NO_MORE_DOCS) {
return NO_MORE_DOCS;
@Override
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
/** Advance to the next non-excluded doc. */
private int toNonExcluded(int doc) throws IOException {
int exclDoc = exclApproximation.docID();
for (;; doc = reqApproximation.nextDoc()) {
if (doc == NO_MORE_DOCS) {
return NO_MORE_DOCS;
}
if (exclDoc < doc) {
exclDoc = exclApproximation.advance(doc);
}
if (matches(doc, exclDoc, reqTwoPhaseIterator, exclTwoPhaseIterator)) {
return doc;
}
}
}
if (exclDoc < doc) {
exclDoc = exclApproximation.advance(doc);
@Override
public int nextDoc() throws IOException {
return toNonExcluded(reqApproximation.nextDoc());
}
if (matches(doc, exclDoc, reqTwoPhaseIterator, exclTwoPhaseIterator)) {
return doc;
@Override
public int advance(int target) throws IOException {
return toNonExcluded(reqApproximation.advance(target));
}
}
@Override
public int docID() {
return reqApproximation.docID();
}
@Override
public long cost() {
return reqApproximation.cost();
}
};
}
@Override
public int docID() {
return reqScorer.docID();
return reqApproximation.docID();
}
@Override
@ -111,11 +130,6 @@ class ReqExclScorer extends Scorer {
return reqScorer.freq();
}
@Override
public long cost() {
return reqScorer.cost();
}
@Override
public float score() throws IOException {
return reqScorer.score(); // reqScorer may be null when next() or skipTo() already return false
@ -127,12 +141,7 @@ class ReqExclScorer extends Scorer {
}
@Override
public int advance(int target) throws IOException {
return toNonExcluded(reqApproximation.advance(target));
}
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
public TwoPhaseIterator twoPhaseIterator() {
if (reqTwoPhaseIterator == null) {
return null;
}

View File

@ -22,15 +22,14 @@ import java.util.Collection;
/** A Scorer for queries with a required part and an optional part.
* Delays skipTo() on the optional part until a score() is needed.
* <br>
* This <code>Scorer</code> implements {@link Scorer#advance(int)}.
*/
class ReqOptSumScorer extends Scorer {
/** The scorers passed from the constructor.
* These are set to null as soon as their next() or skipTo() returns false.
*/
protected final Scorer reqScorer;
protected Scorer optScorer;
protected final Scorer optScorer;
protected final DocIdSetIterator optIterator;
/** Construct a <code>ReqOptScorer</code>.
* @param reqScorer The required scorer. This must match.
@ -45,30 +44,26 @@ class ReqOptSumScorer extends Scorer {
assert optScorer != null;
this.reqScorer = reqScorer;
this.optScorer = optScorer;
this.optIterator = optScorer.iterator();
}
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
return reqScorer.asTwoPhaseIterator();
public TwoPhaseIterator twoPhaseIterator() {
return reqScorer.twoPhaseIterator();
}
@Override
public int nextDoc() throws IOException {
return reqScorer.nextDoc();
public DocIdSetIterator iterator() {
return reqScorer.iterator();
}
@Override
public int advance(int target) throws IOException {
return reqScorer.advance(target);
}
@Override
public int docID() {
return reqScorer.docID();
}
/** Returns the score of the current document matching the query.
* Initially invalid, until {@link #nextDoc()} is called the first time.
* Initially invalid, until the {@link #iterator()} is advanced the first time.
* @return The score of the required scorer, eventually increased by the score
* of the optional scorer when it also matches the current document.
*/
@ -76,25 +71,25 @@ class ReqOptSumScorer extends Scorer {
public float score() throws IOException {
// TODO: sum into a double and cast to float if we ever send required clauses to BS1
int curDoc = reqScorer.docID();
float reqScore = reqScorer.score();
if (optScorer == null) {
return reqScore;
float score = reqScorer.score();
int optScorerDoc = optIterator.docID();
if (optScorerDoc < curDoc) {
optScorerDoc = optIterator.advance(curDoc);
}
int optScorerDoc = optScorer.docID();
if (optScorerDoc < curDoc && (optScorerDoc = optScorer.advance(curDoc)) == NO_MORE_DOCS) {
optScorer = null;
return reqScore;
if (optScorerDoc == curDoc) {
score += optScorer.score();
}
return optScorerDoc == curDoc ? reqScore + optScorer.score() : reqScore;
return score;
}
@Override
public int freq() throws IOException {
// we might have deferred advance()
score();
return (optScorer != null && optScorer.docID() == reqScorer.docID()) ? 2 : 1;
return optIterator.docID() == reqScorer.docID() ? 2 : 1;
}
@Override
@ -105,9 +100,5 @@ class ReqOptSumScorer extends Scorer {
return children;
}
@Override
public long cost() {
return reqScorer.cost();
}
}

View File

@ -25,8 +25,8 @@ import java.util.Collections;
* Expert: Common scoring functionality for different types of queries.
*
* <p>
* A <code>Scorer</code> iterates over documents matching a
* query in increasing order of doc Id.
* A <code>Scorer</code> exposes an {@link #iterator()} over documents
* matching a query in increasing order of doc Id.
* </p>
* <p>
* Document scores are computed using a given <code>Similarity</code>
@ -39,7 +39,7 @@ import java.util.Collections;
* TopScoreDocCollector}) will not properly collect hits
* with these scores.
*/
public abstract class Scorer extends DocIdSetIterator {
public abstract class Scorer {
/** the Scorer's parent Weight. in some cases this may be null */
// TODO can we clean this up?
protected final Weight weight;
@ -52,10 +52,18 @@ public abstract class Scorer extends DocIdSetIterator {
this.weight = weight;
}
/**
* Returns the doc ID that is currently being scored.
* This will return {@code -1} if the {@link #iterator()} is not positioned
* or {@link DocIdSetIterator#NO_MORE_DOCS} if it has been entirely consumed.
* @see DocIdSetIterator#docID()
*/
public abstract int docID();
/** Returns the score of the current document matching the query.
* Initially invalid, until {@link #nextDoc()} or {@link #advance(int)}
* is called the first time, or when called from within
* {@link LeafCollector#collect}.
* Initially invalid, until {@link DocIdSetIterator#nextDoc()} or
* {@link DocIdSetIterator#advance(int)} is called on the {@link #iterator()}
* the first time, or when called from within {@link LeafCollector#collect}.
*/
public abstract float score() throws IOException;
@ -101,6 +109,19 @@ public abstract class Scorer extends DocIdSetIterator {
}
}
/**
* Return a {@link DocIdSetIterator} over matching documents.
*
* The returned iterator will either be positioned on {@code -1} if no
* documents have been scored yet, {@link DocIdSetIterator#NO_MORE_DOCS}
* if all documents have been scored already, or the last document id that
* has been scored otherwise.
*
* The returned iterator is a view: calling this method several times will
* return iterators that have the same state.
*/
public abstract DocIdSetIterator iterator();
/**
* Optional method: Return a {@link TwoPhaseIterator} view of this
* {@link Scorer}. A return value of {@code null} indicates that
@ -108,15 +129,15 @@ public abstract class Scorer extends DocIdSetIterator {
*
* Note that the returned {@link TwoPhaseIterator}'s
* {@link TwoPhaseIterator#approximation() approximation} must
* advance synchronously with this iterator: advancing the approximation must
* advance this iterator and vice-versa.
* advance synchronously with the {@link #iterator()}: advancing the
* approximation must advance the iterator and vice-versa.
*
* Implementing this method is typically useful on {@link Scorer}s
* that have a high per-document overhead in order to confirm matches.
*
* The default implementation returns {@code null}.
*/
public TwoPhaseIterator asTwoPhaseIterator() {
public TwoPhaseIterator twoPhaseIterator() {
return null;
}
}

View File

@ -69,7 +69,7 @@ final class SloppyPhraseScorer extends Scorer {
iterators[i] = postings[i].postings;
phrasePositions[i] = new PhrasePositions(postings[i].postings, postings[i].position, i, postings[i].terms);
}
conjunction = ConjunctionDISI.intersect(Arrays.asList(iterators));
conjunction = ConjunctionDISI.intersectIterators(Arrays.asList(iterators));
this.matchCost = matchCost;
}
@ -550,49 +550,17 @@ final class SloppyPhraseScorer extends Scorer {
public int docID() {
return conjunction.docID();
}
@Override
public int nextDoc() throws IOException {
int doc;
for (doc = conjunction.nextDoc(); doc != NO_MORE_DOCS; doc = conjunction.nextDoc()) {
sloppyFreq = phraseFreq(); // check for phrase
if (sloppyFreq != 0f) {
break;
}
}
return doc;
}
@Override
public float score() {
return docScorer.score(docID(), sloppyFreq);
}
@Override
public int advance(int target) throws IOException {
assert target > docID();
int doc;
for (doc = conjunction.advance(target); doc != NO_MORE_DOCS; doc = conjunction.nextDoc()) {
sloppyFreq = phraseFreq(); // check for phrase
if (sloppyFreq != 0f) {
break;
}
}
return doc;
}
@Override
public long cost() {
return conjunction.cost();
}
@Override
public String toString() { return "scorer(" + weight + ")"; }
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
public TwoPhaseIterator twoPhaseIterator() {
return new TwoPhaseIterator(conjunction) {
@Override
public boolean matches() throws IOException {
@ -611,4 +579,9 @@ final class SloppyPhraseScorer extends Scorer {
}
};
}
@Override
public DocIdSetIterator iterator() {
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
}
}

View File

@ -162,7 +162,7 @@ public final class SynonymQuery extends Query {
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context);
if (scorer != null) {
int newDoc = scorer.advance(doc);
int newDoc = scorer.iterator().advance(doc);
if (newDoc == doc) {
final float freq;
if (scorer instanceof SynonymScorer) {
@ -229,15 +229,15 @@ public final class SynonymQuery extends Query {
}
@Override
protected float score(DisiWrapper<Scorer> topList) throws IOException {
protected float score(DisiWrapper topList) throws IOException {
return similarity.score(topList.doc, tf(topList));
}
/** combines TF of all subs. */
final int tf(DisiWrapper<Scorer> topList) throws IOException {
final int tf(DisiWrapper topList) throws IOException {
int tf = 0;
for (DisiWrapper<Scorer> w = topList; w != null; w = w.next) {
tf += w.iterator.freq();
for (DisiWrapper w = topList; w != null; w = w.next) {
tf += w.scorer.freq();
}
return tf;
}

View File

@ -136,7 +136,7 @@ public class TermQuery extends Query {
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context);
if (scorer != null) {
int newDoc = scorer.advance(doc);
int newDoc = scorer.iterator().advance(doc);
if (newDoc == doc) {
float freq = scorer.freq();
SimScorer docScorer = similarity.simScorer(stats, context);

View File

@ -55,41 +55,17 @@ final class TermScorer extends Scorer {
return postingsEnum.freq();
}
/**
* Advances to the next document matching the query. <br>
*
* @return the document matching the query or NO_MORE_DOCS if there are no more documents.
*/
@Override
public int nextDoc() throws IOException {
return postingsEnum.nextDoc();
public DocIdSetIterator iterator() {
return postingsEnum;
}
@Override
public float score() throws IOException {
assert docID() != NO_MORE_DOCS;
assert docID() != DocIdSetIterator.NO_MORE_DOCS;
return docScorer.score(postingsEnum.docID(), postingsEnum.freq());
}
/**
* Advances to the first match beyond the current whose document number is
* greater than or equal to a given target. <br>
* The implementation uses {@link org.apache.lucene.index.PostingsEnum#advance(int)}.
*
* @param target
* The target document number.
* @return the matching document or NO_MORE_DOCS if none exist.
*/
@Override
public int advance(int target) throws IOException {
return postingsEnum.advance(target);
}
@Override
public long cost() {
return postingsEnum.cost();
}
/** Returns a string representation of this <code>TermScorer</code>. */
@Override
public String toString() { return "scorer(" + weight + ")[" + super.toString() + "]"; }

View File

@ -21,7 +21,7 @@ import java.io.IOException;
import java.util.Objects;
/**
* Returned by {@link Scorer#asTwoPhaseIterator()}
* Returned by {@link Scorer#twoPhaseIterator()}
* to expose an approximation of a {@link DocIdSetIterator}.
* When the {@link #approximation()}'s
* {@link DocIdSetIterator#nextDoc()} or {@link DocIdSetIterator#advance(int)}
@ -98,14 +98,4 @@ public abstract class TwoPhaseIterator {
*/
public abstract float matchCost();
/**
* Returns a {@link TwoPhaseIterator} for this {@link DocIdSetIterator}
* when available, otherwise returns null.
*/
public static TwoPhaseIterator asTwoPhaseIterator(DocIdSetIterator iter) {
return (iter instanceof Scorer)
? ((Scorer) iter).asTwoPhaseIterator()
: null;
}
}

View File

@ -147,6 +147,8 @@ public abstract class Weight {
* @lucene.internal */
protected static class DefaultBulkScorer extends BulkScorer {
private final Scorer scorer;
private final DocIdSetIterator iterator;
private final TwoPhaseIterator twoPhase;
/** Sole constructor. */
public DefaultBulkScorer(Scorer scorer) {
@ -154,30 +156,31 @@ public abstract class Weight {
throw new NullPointerException();
}
this.scorer = scorer;
this.iterator = scorer.iterator();
this.twoPhase = scorer.twoPhaseIterator();
}
@Override
public long cost() {
return scorer.cost();
return iterator.cost();
}
@Override
public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
collector.setScorer(scorer);
final TwoPhaseIterator twoPhase = scorer.asTwoPhaseIterator();
if (scorer.docID() == -1 && min == 0 && max == DocIdSetIterator.NO_MORE_DOCS) {
scoreAll(collector, scorer, twoPhase, acceptDocs);
scoreAll(collector, iterator, twoPhase, acceptDocs);
return DocIdSetIterator.NO_MORE_DOCS;
} else {
int doc = scorer.docID();
if (doc < min) {
if (twoPhase == null) {
doc = scorer.advance(min);
doc = iterator.advance(min);
} else {
doc = twoPhase.approximation().advance(min);
}
}
return scoreRange(collector, scorer, twoPhase, acceptDocs, doc, max);
return scoreRange(collector, iterator, twoPhase, acceptDocs, doc, max);
}
}
@ -185,14 +188,14 @@ public abstract class Weight {
* separate this from {@link #scoreAll} to help out
* hotspot.
* See <a href="https://issues.apache.org/jira/browse/LUCENE-5487">LUCENE-5487</a> */
static int scoreRange(LeafCollector collector, Scorer scorer, TwoPhaseIterator twoPhase,
static int scoreRange(LeafCollector collector, DocIdSetIterator iterator, TwoPhaseIterator twoPhase,
Bits acceptDocs, int currentDoc, int end) throws IOException {
if (twoPhase == null) {
while (currentDoc < end) {
if (acceptDocs == null || acceptDocs.get(currentDoc)) {
collector.collect(currentDoc);
}
currentDoc = scorer.nextDoc();
currentDoc = iterator.nextDoc();
}
return currentDoc;
} else {
@ -211,9 +214,9 @@ public abstract class Weight {
* separate this from {@link #scoreRange} to help out
* hotspot.
* See <a href="https://issues.apache.org/jira/browse/LUCENE-5487">LUCENE-5487</a> */
static void scoreAll(LeafCollector collector, Scorer scorer, TwoPhaseIterator twoPhase, Bits acceptDocs) throws IOException {
static void scoreAll(LeafCollector collector, DocIdSetIterator iterator, TwoPhaseIterator twoPhase, Bits acceptDocs) throws IOException {
if (twoPhase == null) {
for (int doc = scorer.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = scorer.nextDoc()) {
for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
if (acceptDocs == null || acceptDocs.get(doc)) {
collector.collect(doc);
}

View File

@ -427,13 +427,14 @@
* <p>The
* {@link org.apache.lucene.search.Scorer Scorer}
* abstract class provides common scoring functionality for all Scorer implementations and
* is the heart of the Lucene scoring process. The Scorer defines the following abstract (some of them are not
* yet abstract, but will be in future versions and should be considered as such now) methods which
* must be implemented (some of them inherited from {@link org.apache.lucene.search.DocIdSetIterator DocIdSetIterator}):
* is the heart of the Lucene scoring process. The Scorer defines the following methods which
* must be implemented:
* <ol>
* <li>
* {@link org.apache.lucene.search.Scorer#nextDoc nextDoc()} &mdash; Advances to the next
* document that matches this Query, returning true if and only if there is another document that matches.</li>
* {@link org.apache.lucene.search.Scorer#iterator iterator()} &mdash; Return a
* {@link org.apache.lucene.search.DocIdSetIterator DocIdSetIterator} that can iterate over all
* document that matches this Query.
* </li>
* <li>
* {@link org.apache.lucene.search.Scorer#docID docID()} &mdash; Returns the id of the
* {@link org.apache.lucene.document.Document Document} that contains the match.
@ -451,13 +452,6 @@
* {@link org.apache.lucene.index.PostingsEnum#freq PostingsEnum.freq()}.
* </li>
* <li>
* {@link org.apache.lucene.search.Scorer#advance advance()} &mdash; Skip ahead in
* the document matches to the document whose id is greater than
* or equal to the passed in value. In many instances, advance can be
* implemented more efficiently than simply looping through all the matching documents until
* the target document is identified.
* </li>
* <li>
* {@link org.apache.lucene.search.Scorer#getChildren getChildren()} &mdash; Returns any child subscorers
* underneath this scorer. This allows for users to navigate the scorer hierarchy and receive more fine-grained
* details on the scoring process.
@ -531,7 +525,7 @@
* <p>Assuming a BooleanScorer2, we first initialize the Coordinator, which is used to apply the coord()
* factor. We then get a internal Scorer based on the required, optional and prohibited parts of the query.
* Using this internal Scorer, the BooleanScorer2 then proceeds into a while loop based on the
* {@link org.apache.lucene.search.Scorer#nextDoc Scorer.nextDoc()} method. The nextDoc() method advances
* {@link org.apache.lucene.search.DocIdSetIterator#nextDoc DocIdSetIterator.nextDoc()} method. The nextDoc() method advances
* to the next document matching the query. This is an abstract method in the Scorer class and is thus
* overridden by all derived implementations. If you have a simple OR query your internal Scorer is most
* likely a DisjunctionSumScorer, which essentially combines the scorers from the sub scorers of the OR'd terms.

View File

@ -23,7 +23,6 @@ import java.util.List;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.ConjunctionDISI;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.similarities.Similarity;
/**
* Common super class for multiple sub spans required in a document.
@ -34,13 +33,12 @@ abstract class ConjunctionSpans extends Spans {
boolean atFirstInCurrentDoc; // a first start position is available in current doc for nextStartPosition
boolean oneExhaustedInCurrentDoc; // one subspans exhausted in current doc
ConjunctionSpans(List<Spans> subSpans, SpanWeight weight, Similarity.SimScorer docScorer) {
super(weight, docScorer);
ConjunctionSpans(List<Spans> subSpans) {
if (subSpans.size() < 2) {
throw new IllegalArgumentException("Less than 2 subSpans.size():" + subSpans.size());
}
this.subSpans = subSpans.toArray(new Spans[subSpans.size()]);
this.conjunction = ConjunctionDISI.intersect(subSpans);
this.conjunction = ConjunctionDISI.intersectSpans(subSpans);
this.atFirstInCurrentDoc = true; // ensure for doc -1 that start/end positions are -1
}

View File

@ -21,15 +21,13 @@ import java.io.IOException;
import java.util.Arrays;
import java.util.Objects;
import org.apache.lucene.search.similarities.Similarity;
abstract class ContainSpans extends ConjunctionSpans {
Spans sourceSpans;
Spans bigSpans;
Spans littleSpans;
ContainSpans(SpanWeight weight, Similarity.SimScorer simScorer, Spans bigSpans, Spans littleSpans, Spans sourceSpans) {
super(Arrays.asList(bigSpans, littleSpans), weight, simScorer);
ContainSpans(Spans bigSpans, Spans littleSpans, Spans sourceSpans) {
super(Arrays.asList(bigSpans, littleSpans));
this.bigSpans = Objects.requireNonNull(bigSpans);
this.littleSpans = Objects.requireNonNull(littleSpans);
this.sourceSpans = Objects.requireNonNull(sourceSpans);

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.Objects;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.similarities.Similarity;
/**
* A {@link Spans} implementation wrapping another spans instance,
@ -36,8 +35,7 @@ public abstract class FilterSpans extends Spans {
private int startPos = -1;
/** Wrap the given {@link Spans}. */
protected FilterSpans(Spans in, Similarity.SimScorer docScorer) {
super((SpanWeight)in.getWeight(), docScorer);
protected FilterSpans(Spans in) {
this.in = Objects.requireNonNull(in);
}

View File

@ -20,8 +20,6 @@ package org.apache.lucene.search.spans;
import java.io.IOException;
import java.util.List;
import org.apache.lucene.search.similarities.Similarity;
/**
* A Spans that is formed from the ordered subspans of a SpanNearQuery
* where the subspans do not overlap and have a maximum slop between them.
@ -52,8 +50,8 @@ public class NearSpansOrdered extends ConjunctionSpans {
private final int allowedSlop;
public NearSpansOrdered(SpanWeight weight, int allowedSlop, List<Spans> subSpans, Similarity.SimScorer simScorer) throws IOException {
super(subSpans, weight, simScorer);
public NearSpansOrdered(int allowedSlop, List<Spans> subSpans) throws IOException {
super(subSpans);
this.atFirstInCurrentDoc = true; // -1 startPosition/endPosition also at doc -1
this.allowedSlop = allowedSlop;
}
@ -152,10 +150,5 @@ public class NearSpansOrdered extends ConjunctionSpans {
}
}
@Override
public String toString() {
return "NearSpansOrdered("+weight.getQuery().toString()+")@"+docID()+": "+startPosition()+" - "+endPosition();
}
}

View File

@ -22,7 +22,6 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.PriorityQueue;
/**
@ -38,9 +37,9 @@ public class NearSpansUnordered extends ConjunctionSpans {
private SpanPositionQueue spanPositionQueue;
public NearSpansUnordered(SpanWeight weight, int allowedSlop, List<Spans> subSpans, Similarity.SimScorer simScorer)
public NearSpansUnordered(int allowedSlop, List<Spans> subSpans)
throws IOException {
super(subSpans, weight, simScorer);
super(subSpans);
this.subSpanCells = new ArrayList<>(subSpans.size());
for (Spans subSpan : subSpans) { // sub spans in query order
@ -77,7 +76,6 @@ public class NearSpansUnordered extends ConjunctionSpans {
final Spans in;
public SpansCell(Spans spans) {
super((SpanWeight) NearSpansUnordered.this.weight, NearSpansUnordered.this.docScorer);
this.in = spans;
}
@ -267,13 +265,4 @@ public class NearSpansUnordered extends ConjunctionSpans {
}
}
@Override
public String toString() {
if (minPositionCell() != null) {
return getClass().getName() + "("+weight.getQuery().toString()+")@"+
(docID()+":"+startPosition()+"-"+endPosition());
} else {
return getClass().getName() + "("+weight.getQuery().toString()+")@ ?START?";
}
}
}

View File

@ -35,7 +35,6 @@ public class ScoringWrapperSpans extends Spans {
* @param simScorer the SimScorer to use for scoring
*/
public ScoringWrapperSpans(Spans spans, Similarity.SimScorer simScorer) {
super((SpanWeight) spans.getWeight(), simScorer);
this.in = spans;
}

View File

@ -151,7 +151,7 @@ public final class SpanBoostQuery extends SpanQuery {
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
public SpanScorer scorer(LeafReaderContext context) throws IOException {
return weight.scorer(context);
}

View File

@ -71,7 +71,7 @@ public final class SpanContainingQuery extends SpanContainQuery {
Spans big = containerContained.get(0);
Spans little = containerContained.get(1);
return new ContainSpans(this, getSimScorer(context), big, little, big) {
return new ContainSpans(big, little, big) {
@Override
boolean twoPhaseCurrentDocMatches() throws IOException {

View File

@ -219,8 +219,8 @@ public class SpanNearQuery extends SpanQuery implements Cloneable {
}
// all NearSpans require at least two subSpans
return (!inOrder) ? new NearSpansUnordered(this, slop, subSpans, getSimScorer(context))
: new NearSpansOrdered(this, slop, subSpans, getSimScorer(context));
return (!inOrder) ? new NearSpansUnordered(slop, subSpans)
: new NearSpansOrdered(slop, subSpans);
}
@Override
@ -330,7 +330,6 @@ public class SpanNearQuery extends SpanQuery implements Cloneable {
final int width;
GapSpans(int width) {
super(null, null);
this.width = width;
}

View File

@ -132,7 +132,7 @@ public final class SpanNotQuery extends SpanQuery {
TwoPhaseIterator excludeTwoPhase = excludeSpans.asTwoPhaseIterator();
DocIdSetIterator excludeApproximation = excludeTwoPhase == null ? null : excludeTwoPhase.approximation();
return new FilterSpans(includeSpans, getSimScorer(context)) {
return new FilterSpans(includeSpans) {
// last document we have checked matches() against for the exclusion, and failed
// when using approximations, so we don't call it again, and pass thru all inclusions.
int lastApproxDoc = -1;

View File

@ -169,20 +169,20 @@ public final class SpanOrQuery extends SpanQuery {
return new ScoringWrapperSpans(subSpans.get(0), getSimScorer(context));
}
DisiPriorityQueue<Spans> byDocQueue = new DisiPriorityQueue<>(subSpans.size());
DisiPriorityQueue byDocQueue = new DisiPriorityQueue(subSpans.size());
for (Spans spans : subSpans) {
byDocQueue.add(new DisiWrapper<>(spans));
byDocQueue.add(new DisiWrapper(spans));
}
SpanPositionQueue byPositionQueue = new SpanPositionQueue(subSpans.size()); // when empty use -1
return new Spans(this, getSimScorer(context)) {
return new Spans() {
Spans topPositionSpans = null;
@Override
public int nextDoc() throws IOException {
topPositionSpans = null;
DisiWrapper<Spans> topDocSpans = byDocQueue.top();
DisiWrapper topDocSpans = byDocQueue.top();
int currentDoc = topDocSpans.doc;
do {
topDocSpans.doc = topDocSpans.iterator.nextDoc();
@ -194,7 +194,7 @@ public final class SpanOrQuery extends SpanQuery {
@Override
public int advance(int target) throws IOException {
topPositionSpans = null;
DisiWrapper<Spans> topDocSpans = byDocQueue.top();
DisiWrapper topDocSpans = byDocQueue.top();
do {
topDocSpans.doc = topDocSpans.iterator.advance(target);
topDocSpans = byDocQueue.updateTop();
@ -204,7 +204,7 @@ public final class SpanOrQuery extends SpanQuery {
@Override
public int docID() {
DisiWrapper<Spans> topDocSpans = byDocQueue.top();
DisiWrapper topDocSpans = byDocQueue.top();
return topDocSpans.doc;
}
@ -213,7 +213,7 @@ public final class SpanOrQuery extends SpanQuery {
float sumMatchCost = 0; // See also DisjunctionScorer.asTwoPhaseIterator()
long sumApproxCost = 0;
for (DisiWrapper<Spans> w : byDocQueue) {
for (DisiWrapper w : byDocQueue) {
if (w.twoPhaseView != null) {
long costWeight = (w.cost <= 1) ? 1 : w.cost;
sumMatchCost += w.twoPhaseView.matchCost() * costWeight;
@ -228,7 +228,7 @@ public final class SpanOrQuery extends SpanQuery {
final float matchCost = sumMatchCost / sumApproxCost;
return new TwoPhaseIterator(new DisjunctionDISIApproximation<Spans>(byDocQueue)) {
return new TwoPhaseIterator(new DisjunctionDISIApproximation(byDocQueue)) {
@Override
public boolean matches() throws IOException {
return twoPhaseCurrentDocMatches();
@ -246,9 +246,9 @@ public final class SpanOrQuery extends SpanQuery {
void computePositionsCost() {
float sumPositionsCost = 0;
long sumCost = 0;
for (DisiWrapper<Spans> w : byDocQueue) {
for (DisiWrapper w : byDocQueue) {
long costWeight = (w.cost <= 1) ? 1 : w.cost;
sumPositionsCost += w.iterator.positionsCost() * costWeight;
sumPositionsCost += w.spans.positionsCost() * costWeight;
sumCost += costWeight;
}
positionsCost = sumPositionsCost / sumCost;
@ -265,7 +265,7 @@ public final class SpanOrQuery extends SpanQuery {
int lastDocTwoPhaseMatched = -1;
boolean twoPhaseCurrentDocMatches() throws IOException {
DisiWrapper<Spans> listAtCurrentDoc = byDocQueue.topList();
DisiWrapper listAtCurrentDoc = byDocQueue.topList();
// remove the head of the list as long as it does not match
final int currentDoc = listAtCurrentDoc.doc;
while (listAtCurrentDoc.twoPhaseView != null) {
@ -289,9 +289,9 @@ public final class SpanOrQuery extends SpanQuery {
void fillPositionQueue() throws IOException { // called at first nextStartPosition
assert byPositionQueue.size() == 0;
// add all matching Spans at current doc to byPositionQueue
DisiWrapper<Spans> listAtCurrentDoc = byDocQueue.topList();
DisiWrapper listAtCurrentDoc = byDocQueue.topList();
while (listAtCurrentDoc != null) {
Spans spansAtDoc = listAtCurrentDoc.iterator;
Spans spansAtDoc = listAtCurrentDoc.spans;
if (lastDocTwoPhaseMatched == listAtCurrentDoc.doc) { // matched by DisjunctionDisiApproximation
if (listAtCurrentDoc.twoPhaseView != null) { // matched by approximation
if (listAtCurrentDoc.lastApproxNonMatchDoc == listAtCurrentDoc.doc) { // matches() returned false

View File

@ -94,7 +94,7 @@ public abstract class SpanPositionCheckQuery extends SpanQuery implements Clonea
@Override
public Spans getSpans(final LeafReaderContext context, Postings requiredPostings) throws IOException {
Spans matchSpans = matchWeight.getSpans(context, requiredPostings);
return (matchSpans == null) ? null : new FilterSpans(matchSpans, getSimScorer(context)) {
return (matchSpans == null) ? null : new FilterSpans(matchSpans) {
@Override
protected AcceptStatus accept(Spans candidate) throws IOException {
return acceptPosition(candidate);

View File

@ -0,0 +1,145 @@
package org.apache.lucene.search.spans;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Objects;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.similarities.Similarity;
/**
* A basic {@link Scorer} over {@link Spans}.
* @lucene.experimental
*/
public class SpanScorer extends Scorer {
protected final Spans spans;
protected final Similarity.SimScorer docScorer;
/** accumulated sloppy freq (computed in setFreqCurrentDoc) */
private float freq;
/** number of matches (computed in setFreqCurrentDoc) */
private int numMatches;
private int lastScoredDoc = -1; // last doc we called setFreqCurrentDoc() for
/** Sole constructor. */
public SpanScorer(SpanWeight weight, Spans spans, Similarity.SimScorer docScorer) {
super(weight);
this.spans = Objects.requireNonNull(spans);
this.docScorer = docScorer;
}
@Override
public int docID() {
return spans.docID();
}
@Override
public DocIdSetIterator iterator() {
return spans;
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
return spans.asTwoPhaseIterator();
}
/**
* Score the current doc. The default implementation scores the doc
* with the similarity using the slop-adjusted {@link #freq}.
*/
protected float scoreCurrentDoc() throws IOException {
assert docScorer != null : getClass() + " has a null docScorer!";
return docScorer.score(docID(), freq);
}
/**
* Sets {@link #freq} and {@link #numMatches} for the current document.
* <p>
* This will be called at most once per document.
*/
protected final void setFreqCurrentDoc() throws IOException {
freq = 0.0f;
numMatches = 0;
spans.doStartCurrentDoc();
assert spans.startPosition() == -1 : "incorrect initial start position, " + this.toString();
assert spans.endPosition() == -1 : "incorrect initial end position, " + this.toString();
int prevStartPos = -1;
int prevEndPos = -1;
int startPos = spans.nextStartPosition();
assert startPos != Spans.NO_MORE_POSITIONS : "initial startPos NO_MORE_POSITIONS, " + this.toString();
do {
assert startPos >= prevStartPos;
int endPos = spans.endPosition();
assert endPos != Spans.NO_MORE_POSITIONS;
// This assertion can fail for Or spans on the same term:
// assert (startPos != prevStartPos) || (endPos > prevEndPos) : "non increased endPos="+endPos;
assert (startPos != prevStartPos) || (endPos >= prevEndPos) : "decreased endPos="+endPos;
numMatches++;
if (docScorer == null) { // scores not required, break out here
freq = 1;
return;
}
freq += docScorer.computeSlopFactor(spans.width());
spans.doCurrentSpans();
prevStartPos = startPos;
prevEndPos = endPos;
startPos = spans.nextStartPosition();
} while (startPos != Spans.NO_MORE_POSITIONS);
assert spans.startPosition() == Spans.NO_MORE_POSITIONS : "incorrect final start position, " + this.toString();
assert spans.endPosition() == Spans.NO_MORE_POSITIONS : "incorrect final end position, " + this.toString();
}
/**
* Ensure setFreqCurrentDoc is called, if not already called for the current doc.
*/
private void ensureFreq() throws IOException {
int currentDoc = docID();
if (lastScoredDoc != currentDoc) {
setFreqCurrentDoc();
lastScoredDoc = currentDoc;
}
}
@Override
public final float score() throws IOException {
ensureFreq();
return scoreCurrentDoc();
}
@Override
public final int freq() throws IOException {
ensureFreq();
return numMatches;
}
/** Returns the intermediate "sloppy freq" adjusted for edit distance
* @lucene.internal */
final float sloppyFreq() throws IOException {
ensureFreq();
return freq;
}
}

View File

@ -118,7 +118,7 @@ public class SpanTermQuery extends SpanQuery {
final PostingsEnum postings = termsEnum.postings(null, requiredPostings.getRequiredPostings());
float positionsCost = termPositionsCost(termsEnum) * PHRASE_TO_SPAN_TERM_POSITIONS_COST;
return new TermSpans(this, getSimScorer(context), postings, term, positionsCost);
return new TermSpans(getSimScorer(context), postings, term, positionsCost);
}
}

View File

@ -27,7 +27,6 @@ import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TermStatistics;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.Similarity;
@ -130,8 +129,13 @@ public abstract class SpanWeight extends Weight {
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
return getSpans(context, Postings.POSITIONS);
public SpanScorer scorer(LeafReaderContext context) throws IOException {
final Spans spans = getSpans(context, Postings.POSITIONS);
if (spans == null) {
return null;
}
final Similarity.SimScorer docScorer = getSimScorer(context);
return new SpanScorer(this, spans, docScorer);
}
/**
@ -146,9 +150,9 @@ public abstract class SpanWeight extends Weight {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
Spans scorer = (Spans) scorer(context);
SpanScorer scorer = scorer(context);
if (scorer != null) {
int newDoc = scorer.advance(doc);
int newDoc = scorer.iterator().advance(doc);
if (newDoc == doc) {
float freq = scorer.sloppyFreq();
SimScorer docScorer = similarity.simScorer(simWeight, context);

View File

@ -72,7 +72,7 @@ public final class SpanWithinQuery extends SpanContainQuery {
Spans big = containerContained.get(0);
Spans little = containerContained.get(1);
return new ContainSpans(this, getSimScorer(context), big, little, little) {
return new ContainSpans(big, little, little) {
@Override
boolean twoPhaseCurrentDocMatches() throws IOException {

View File

@ -19,8 +19,9 @@ package org.apache.lucene.search.spans;
import java.io.IOException;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
/** Iterates through combinations of start/end positions per-doc.
@ -28,24 +29,10 @@ import org.apache.lucene.search.similarities.Similarity.SimScorer;
* These are enumerated in order, by increasing document number, within that by
* increasing start position and finally by increasing end position.
*/
public abstract class Spans extends Scorer {
public abstract class Spans extends DocIdSetIterator {
public static final int NO_MORE_POSITIONS = Integer.MAX_VALUE;
protected final Similarity.SimScorer docScorer;
protected Spans(SpanWeight weight, SimScorer docScorer) {
super(weight);
this.docScorer = docScorer;
}
/** accumulated sloppy freq (computed in setFreqCurrentDoc) */
protected float freq;
/** number of matches (computed in setFreqCurrentDoc) */
protected int numMatches;
private int lastScoredDoc = -1; // last doc we called setFreqCurrentDoc() for
/**
* Returns the next start position for the current doc.
* There is always at least one start/end position per doc.
@ -97,6 +84,16 @@ public abstract class Spans extends Scorer {
*/
public abstract float positionsCost();
/**
* Optional method: Return a {@link TwoPhaseIterator} view of this
* {@link Scorer}. A return value of {@code null} indicates that
* two-phase iteration is not supported.
* @see Scorer#twoPhaseIterator()
*/
public TwoPhaseIterator asTwoPhaseIterator() {
return null;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
@ -109,58 +106,6 @@ public abstract class Spans extends Scorer {
return sb.toString();
}
/**
* Ensure setFreqCurrentDoc is called, if not already called for the current doc.
*/
private void ensureFreq() throws IOException {
int currentDoc = docID();
if (lastScoredDoc != currentDoc) {
setFreqCurrentDoc();
lastScoredDoc = currentDoc;
}
}
/**
* Sets {@link #freq} and {@link #numMatches} for the current document.
* <p>
* This will be called at most once per document.
*/
protected final void setFreqCurrentDoc() throws IOException {
freq = 0.0f;
numMatches = 0;
doStartCurrentDoc();
assert startPosition() == -1 : "incorrect initial start position, " + this.toString();
assert endPosition() == -1 : "incorrect initial end position, " + this.toString();
int prevStartPos = -1;
int prevEndPos = -1;
int startPos = nextStartPosition();
assert startPos != Spans.NO_MORE_POSITIONS : "initial startPos NO_MORE_POSITIONS, " + this.toString();
do {
assert startPos >= prevStartPos;
int endPos = endPosition();
assert endPos != Spans.NO_MORE_POSITIONS;
// This assertion can fail for Or spans on the same term:
// assert (startPos != prevStartPos) || (endPos > prevEndPos) : "non increased endPos="+endPos;
assert (startPos != prevStartPos) || (endPos >= prevEndPos) : "decreased endPos="+endPos;
numMatches++;
if (docScorer == null) { // scores not required, break out here
freq = 1;
return;
}
freq += docScorer.computeSlopFactor(width());
doCurrentSpans();
prevStartPos = startPos;
prevEndPos = endPos;
startPos = nextStartPosition();
} while (startPos != Spans.NO_MORE_POSITIONS);
assert startPosition() == Spans.NO_MORE_POSITIONS : "incorrect final start position, " + this.toString();
assert endPosition() == Spans.NO_MORE_POSITIONS : "incorrect final end position, " + this.toString();
}
/**
* Called before the current doc's frequency is calculated
*/
@ -171,32 +116,4 @@ public abstract class Spans extends Scorer {
*/
protected void doCurrentSpans() throws IOException {}
/**
* Score the current doc. The default implementation scores the doc
* with the similarity using the slop-adjusted {@link #freq}.
*/
protected float scoreCurrentDoc() throws IOException {
assert docScorer != null : getClass() + " has a null docScorer!";
return docScorer.score(docID(), freq);
}
@Override
public final float score() throws IOException {
ensureFreq();
return scoreCurrentDoc();
}
@Override
public final int freq() throws IOException {
ensureFreq();
return numMatches;
}
/** Returns the intermediate "sloppy freq" adjusted for edit distance
* @lucene.internal */
final float sloppyFreq() throws IOException {
ensureFreq();
return freq;
}
}

View File

@ -39,9 +39,8 @@ public class TermSpans extends Spans {
protected boolean readPayload;
private final float positionsCost;
public TermSpans(SpanWeight weight, Similarity.SimScorer scorer,
public TermSpans(Similarity.SimScorer scorer,
PostingsEnum postings, Term term, float positionsCost) {
super(weight, scorer);
this.postings = Objects.requireNonNull(postings);
this.term = Objects.requireNonNull(term);
this.doc = -1;

View File

@ -177,17 +177,7 @@ final class JustCompileSearch {
}
@Override
public int nextDoc() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public int advance(int target) {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
@Override
public long cost() {
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
}

View File

@ -824,7 +824,7 @@ public class TestBooleanCoord extends LuceneTestCase {
Weight weight = searcher.createNormalizedWeight(query, true);
Scorer scorer = weight.scorer(reader.leaves().get(0));
assertTrue(scorer.docID() == -1 || scorer.docID() == DocIdSetIterator.NO_MORE_DOCS);
assertEquals(0, scorer.nextDoc());
assertEquals(0, scorer.iterator().nextDoc());
assertEquals(expected, scorer.score(), 0.0001f);
// test bulk scorer

View File

@ -357,7 +357,7 @@ public class TestBooleanQuery extends LuceneTestCase {
// First pass: just use .nextDoc() to gather all hits
final List<ScoreDoc> hits = new ArrayList<>();
while(scorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
while(scorer.iterator().nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
hits.add(new ScoreDoc(scorer.docID(), scorer.score()));
}
@ -384,12 +384,12 @@ public class TestBooleanQuery extends LuceneTestCase {
if (left == 1 || random().nextBoolean()) {
// next
nextUpto = 1+upto;
nextDoc = scorer.nextDoc();
nextDoc = scorer.iterator().nextDoc();
} else {
// advance
int inc = TestUtil.nextInt(random(), 1, left - 1);
nextUpto = inc + upto;
nextDoc = scorer.advance(hits.get(nextUpto).doc);
nextDoc = scorer.iterator().advance(hits.get(nextUpto).doc);
}
if (nextUpto == hits.size()) {
@ -658,7 +658,7 @@ public class TestBooleanQuery extends LuceneTestCase {
final Weight weight = searcher.createNormalizedWeight(q.build(), random().nextBoolean());
final Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertTrue(scorer instanceof ConjunctionScorer);
assertNotNull(scorer.asTwoPhaseIterator());
assertNotNull(scorer.twoPhaseIterator());
reader.close();
w.close();
@ -687,7 +687,7 @@ public class TestBooleanQuery extends LuceneTestCase {
final Weight weight = searcher.createNormalizedWeight(q.build(), random().nextBoolean());
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertTrue(scorer instanceof DisjunctionScorer);
assertNotNull(scorer.asTwoPhaseIterator());
assertNotNull(scorer.twoPhaseIterator());
reader.close();
w.close();
@ -718,7 +718,7 @@ public class TestBooleanQuery extends LuceneTestCase {
final Weight weight = searcher.createNormalizedWeight(q.build(), random().nextBoolean());
final Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertTrue(scorer instanceof BoostedScorer || scorer instanceof ExactPhraseScorer);
assertNotNull(scorer.asTwoPhaseIterator());
assertNotNull(scorer.twoPhaseIterator());
reader.close();
w.close();
@ -747,7 +747,7 @@ public class TestBooleanQuery extends LuceneTestCase {
final Weight weight = searcher.createNormalizedWeight(q.build(), random().nextBoolean());
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertTrue(scorer instanceof ReqExclScorer);
assertNotNull(scorer.asTwoPhaseIterator());
assertNotNull(scorer.twoPhaseIterator());
reader.close();
w.close();
@ -776,7 +776,7 @@ public class TestBooleanQuery extends LuceneTestCase {
final Weight weight = searcher.createNormalizedWeight(q.build(), true);
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertTrue(scorer instanceof ReqOptSumScorer);
assertNotNull(scorer.asTwoPhaseIterator());
assertNotNull(scorer.twoPhaseIterator());
reader.close();
w.close();

View File

@ -91,7 +91,7 @@ public class TestBooleanRewrites extends LuceneTestCase {
query2.add(new TermQuery(new Term("field", "b")), Occur.SHOULD);
final Weight weight = searcher.createNormalizedWeight(query2.build(), true);
final Scorer scorer = weight.scorer(reader.leaves().get(0));
assertEquals(0, scorer.nextDoc());
assertEquals(0, scorer.iterator().nextDoc());
assertTrue(scorer.getClass().getName(), scorer instanceof FilterScorer);
assertEquals(0f, scorer.score(), 0f);

View File

@ -41,15 +41,9 @@ public class TestCachingCollector extends LuceneTestCase {
public int docID() { return 0; }
@Override
public int nextDoc() throws IOException { return 0; }
@Override
public int advance(int target) throws IOException { return 0; }
@Override
public long cost() {
return 1;
}
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException();
}
}
private static class NoOpCollector extends SimpleCollector {

View File

@ -52,8 +52,8 @@ public class TestConjunctionDISI extends LuceneTestCase {
/**
* Create a {@link Scorer} that wraps the given {@link DocIdSetIterator}. It
* also accepts a {@link TwoPhaseIterator} view, which is exposed in
* {@link Scorer#asTwoPhaseIterator()}. When the two-phase view is not null,
* then {@link Scorer#nextDoc()} and {@link Scorer#advance(int)} will raise
* {@link Scorer#twoPhaseIterator()}. When the two-phase view is not null,
* then {@link DocIdSetIterator#nextDoc()} and {@link DocIdSetIterator#advance(int)} will raise
* an exception in order to make sure that {@link ConjunctionDISI} takes
* advantage of the {@link TwoPhaseIterator} view.
*/
@ -61,7 +61,42 @@ public class TestConjunctionDISI extends LuceneTestCase {
return new Scorer(null) {
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
@Override
public int docID() {
return it.docID();
}
@Override
public int nextDoc() throws IOException {
if (twoPhaseIterator != null) {
throw new UnsupportedOperationException("ConjunctionDISI should call the two-phase iterator");
}
return it.nextDoc();
}
@Override
public int advance(int target) throws IOException {
if (twoPhaseIterator != null) {
throw new UnsupportedOperationException("ConjunctionDISI should call the two-phase iterator");
}
return it.advance(target);
}
@Override
public long cost() {
if (twoPhaseIterator != null) {
throw new UnsupportedOperationException("ConjunctionDISI should call the two-phase iterator");
}
return it.cost();
}
};
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
return twoPhaseIterator;
}
@ -73,30 +108,6 @@ public class TestConjunctionDISI extends LuceneTestCase {
return it.docID();
}
@Override
public int nextDoc() throws IOException {
if (twoPhaseIterator != null) {
throw new UnsupportedOperationException("ConjunctionDISI should call the two-phase iterator");
}
return it.nextDoc();
}
@Override
public int advance(int target) throws IOException {
if (twoPhaseIterator != null) {
throw new UnsupportedOperationException("ConjunctionDISI should call the two-phase iterator");
}
return it.advance(target);
}
@Override
public long cost() {
if (twoPhaseIterator != null) {
throw new UnsupportedOperationException("ConjunctionDISI should call the two-phase iterator");
}
return it.cost();
}
@Override
public float score() throws IOException {
return 0;
@ -154,13 +165,13 @@ public class TestConjunctionDISI extends LuceneTestCase {
final int maxDoc = TestUtil.nextInt(random(), 100, 10000);
final int numIterators = TestUtil.nextInt(random(), 2, 5);
final FixedBitSet[] sets = new FixedBitSet[numIterators];
final DocIdSetIterator[] iterators = new DocIdSetIterator[numIterators];
final Scorer[] iterators = new Scorer[numIterators];
for (int i = 0; i < iterators.length; ++i) {
final FixedBitSet set = randomSet(maxDoc);
if (random().nextBoolean()) {
// simple iterator
sets[i] = set;
iterators[i] = new BitDocIdSet(set).iterator();
iterators[i] = new ConstantScoreScorer(null, 0f, new BitDocIdSet(set).iterator());
} else {
// scorer with approximation
final FixedBitSet confirmed = clearRandomBits(set);
@ -170,7 +181,7 @@ public class TestConjunctionDISI extends LuceneTestCase {
}
}
final ConjunctionDISI conjunction = ConjunctionDISI.intersect(Arrays.asList(iterators));
final ConjunctionDISI conjunction = ConjunctionDISI.intersectScorers(Arrays.asList(iterators));
assertEquals(intersect(sets), toBitSet(maxDoc, conjunction));
}
}
@ -182,14 +193,14 @@ public class TestConjunctionDISI extends LuceneTestCase {
final int maxDoc = TestUtil.nextInt(random(), 100, 10000);
final int numIterators = TestUtil.nextInt(random(), 2, 5);
final FixedBitSet[] sets = new FixedBitSet[numIterators];
final DocIdSetIterator[] iterators = new DocIdSetIterator[numIterators];
final Scorer[] iterators = new Scorer[numIterators];
boolean hasApproximation = false;
for (int i = 0; i < iterators.length; ++i) {
final FixedBitSet set = randomSet(maxDoc);
if (random().nextBoolean()) {
// simple iterator
sets[i] = set;
iterators[i] = new BitDocIdSet(set).iterator();
iterators[i] = new ConstantScoreScorer(null, 0f, new BitDocIdSet(set).iterator());
} else {
// scorer with approximation
final FixedBitSet confirmed = clearRandomBits(set);
@ -200,7 +211,7 @@ public class TestConjunctionDISI extends LuceneTestCase {
}
}
final ConjunctionDISI conjunction = ConjunctionDISI.intersect(Arrays.asList(iterators));
final ConjunctionDISI conjunction = ConjunctionDISI.intersectScorers(Arrays.asList(iterators));
TwoPhaseIterator twoPhaseIterator = conjunction.asTwoPhaseIterator();
assertEquals(hasApproximation, twoPhaseIterator != null);
if (hasApproximation) {
@ -216,15 +227,15 @@ public class TestConjunctionDISI extends LuceneTestCase {
final int maxDoc = TestUtil.nextInt(random(), 100, 10000);
final int numIterators = TestUtil.nextInt(random(), 2, 5);
final FixedBitSet[] sets = new FixedBitSet[numIterators];
DocIdSetIterator conjunction = null;
Scorer conjunction = null;
boolean hasApproximation = false;
for (int i = 0; i < numIterators; ++i) {
final FixedBitSet set = randomSet(maxDoc);
final DocIdSetIterator newIterator;
final Scorer newIterator;
if (random().nextBoolean()) {
// simple iterator
sets[i] = set;
newIterator = new BitDocIdSet(set).iterator();
newIterator = new ConstantScoreScorer(null, 0f, new BitDocIdSet(set).iterator());
} else {
// scorer with approximation
final FixedBitSet confirmed = clearRandomBits(set);
@ -237,17 +248,17 @@ public class TestConjunctionDISI extends LuceneTestCase {
if (conjunction == null) {
conjunction = newIterator;
} else {
final ConjunctionDISI conj = ConjunctionDISI.intersect(Arrays.asList(conjunction, newIterator));
final ConjunctionDISI conj = ConjunctionDISI.intersectScorers(Arrays.asList(conjunction, newIterator));
conjunction = scorer(conj, conj.asTwoPhaseIterator());
}
}
TwoPhaseIterator twoPhaseIterator = ((Scorer) conjunction).asTwoPhaseIterator();
TwoPhaseIterator twoPhaseIterator = ((Scorer) conjunction).twoPhaseIterator();
assertEquals(hasApproximation, twoPhaseIterator != null);
if (hasApproximation) {
assertEquals(intersect(sets), toBitSet(maxDoc, TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator)));
} else {
assertEquals(intersect(sets), toBitSet(maxDoc, conjunction));
assertEquals(intersect(sets), toBitSet(maxDoc, conjunction.iterator()));
}
}
}
@ -258,47 +269,47 @@ public class TestConjunctionDISI extends LuceneTestCase {
final int maxDoc = TestUtil.nextInt(random(), 100, 10000);
final int numIterators = TestUtil.nextInt(random(), 5, 10);
final FixedBitSet[] sets = new FixedBitSet[numIterators];
final List<DocIdSetIterator> iterators = new LinkedList<>();
final List<Scorer> scorers = new LinkedList<>();
for (int i = 0; i < numIterators; ++i) {
final FixedBitSet set = randomSet(maxDoc);
if (random().nextBoolean()) {
// simple iterator
sets[i] = set;
iterators.add(new BitDocIdSet(set).iterator());
scorers.add(new ConstantScoreScorer(null, 0f, new BitDocIdSet(set).iterator()));
} else {
// scorer with approximation
final FixedBitSet confirmed = clearRandomBits(set);
sets[i] = confirmed;
final TwoPhaseIterator approximation = approximation(new BitDocIdSet(set).iterator(), confirmed);
iterators.add(scorer(approximation));
scorers.add(scorer(approximation));
}
}
// make some sub sequences into sub conjunctions
final int subIters = atLeast(3);
for (int subIter = 0; subIter < subIters && iterators.size() > 3; ++subIter) {
final int subSeqStart = TestUtil.nextInt(random(), 0, iterators.size() - 2);
final int subSeqEnd = TestUtil.nextInt(random(), subSeqStart + 2, iterators.size());
List<DocIdSetIterator> subIterators = iterators.subList(subSeqStart, subSeqEnd);
DocIdSetIterator subConjunction;
for (int subIter = 0; subIter < subIters && scorers.size() > 3; ++subIter) {
final int subSeqStart = TestUtil.nextInt(random(), 0, scorers.size() - 2);
final int subSeqEnd = TestUtil.nextInt(random(), subSeqStart + 2, scorers.size());
List<Scorer> subIterators = scorers.subList(subSeqStart, subSeqEnd);
Scorer subConjunction;
if (wrapWithScorer) {
subConjunction = new ConjunctionScorer(null, subIterators, Collections.emptyList());
} else {
subConjunction = ConjunctionDISI.intersect(subIterators);
subConjunction = new ConstantScoreScorer(null, 0f, ConjunctionDISI.intersectScorers(subIterators));
}
iterators.set(subSeqStart, subConjunction);
scorers.set(subSeqStart, subConjunction);
int toRemove = subSeqEnd - subSeqStart - 1;
while (toRemove-- > 0) {
iterators.remove(subSeqStart + 1);
scorers.remove(subSeqStart + 1);
}
}
if (iterators.size() == 1) {
if (scorers.size() == 1) {
// ConjunctionDISI needs two iterators
iterators.add(DocIdSetIterator.all(maxDoc));
scorers.add(new ConstantScoreScorer(null, 0f, DocIdSetIterator.all(maxDoc)));
}
final ConjunctionDISI conjunction = ConjunctionDISI.intersect(iterators);
final ConjunctionDISI conjunction = ConjunctionDISI.intersectScorers(scorers);
assertEquals(intersect(sets), toBitSet(maxDoc, conjunction));
}
}

View File

@ -216,7 +216,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
final Weight weight = searcher.createNormalizedWeight(q, true);
final Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertNotNull(scorer.asTwoPhaseIterator());
assertNotNull(scorer.twoPhaseIterator());
reader.close();
w.close();

View File

@ -180,7 +180,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
final Weight dw = s.createNormalizedWeight(dq, true);
LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
final Scorer ds = dw.scorer(context);
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
final boolean skipOk = ds.iterator().advance(3) != DocIdSetIterator.NO_MORE_DOCS;
if (skipOk) {
fail("firsttime skipTo found a match? ... "
+ r.document(ds.docID()).get("id"));
@ -197,7 +197,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
final Scorer ds = dw.scorer(context);
assertTrue("firsttime skipTo found no match",
ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
ds.iterator().advance(3) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));
}

View File

@ -147,36 +147,40 @@ public class TestMinShouldMatch2 extends LuceneTestCase {
private void assertNext(Scorer expected, Scorer actual) throws Exception {
if (actual == null) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, expected.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, expected.iterator().nextDoc());
return;
}
int doc;
while ((doc = expected.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(doc, actual.nextDoc());
DocIdSetIterator expectedIt = expected.iterator();
DocIdSetIterator actualIt = actual.iterator();
while ((doc = expectedIt.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(doc, actualIt.nextDoc());
assertEquals(expected.freq(), actual.freq());
float expectedScore = expected.score();
float actualScore = actual.score();
assertEquals(expectedScore, actualScore, CheckHits.explainToleranceDelta(expectedScore, actualScore));
}
assertEquals(DocIdSetIterator.NO_MORE_DOCS, actual.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, actualIt.nextDoc());
}
private void assertAdvance(Scorer expected, Scorer actual, int amount) throws Exception {
if (actual == null) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, expected.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, expected.iterator().nextDoc());
return;
}
DocIdSetIterator expectedIt = expected.iterator();
DocIdSetIterator actualIt = actual.iterator();
int prevDoc = 0;
int doc;
while ((doc = expected.advance(prevDoc+amount)) != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(doc, actual.advance(prevDoc+amount));
while ((doc = expectedIt.advance(prevDoc+amount)) != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(doc, actualIt.advance(prevDoc+amount));
assertEquals(expected.freq(), actual.freq());
float expectedScore = expected.score();
float actualScore = actual.score();
assertEquals(expectedScore, actualScore, CheckHits.explainToleranceDelta(expectedScore, actualScore));
prevDoc = doc;
}
assertEquals(DocIdSetIterator.NO_MORE_DOCS, actual.advance(prevDoc+amount));
assertEquals(DocIdSetIterator.NO_MORE_DOCS, actualIt.advance(prevDoc+amount));
}
/** simple test for next(): minShouldMatch=2 on 3 terms (one common, one medium, one rare) */
@ -361,37 +365,48 @@ public class TestMinShouldMatch2 extends LuceneTestCase {
}
@Override
public int nextDoc() throws IOException {
assert currentDoc != NO_MORE_DOCS;
for (currentDoc = currentDoc+1; currentDoc < maxDoc; currentDoc++) {
currentMatched = 0;
score = 0;
dv.setDocument(currentDoc);
long ord;
while ((ord = dv.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
if (ords.contains(ord)) {
currentMatched++;
score += sims[(int)ord].score(currentDoc, 1);
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
@Override
public int nextDoc() throws IOException {
assert currentDoc != NO_MORE_DOCS;
for (currentDoc = currentDoc+1; currentDoc < maxDoc; currentDoc++) {
currentMatched = 0;
score = 0;
dv.setDocument(currentDoc);
long ord;
while ((ord = dv.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
if (ords.contains(ord)) {
currentMatched++;
score += sims[(int)ord].score(currentDoc, 1);
}
}
if (currentMatched >= minNrShouldMatch) {
return currentDoc;
}
}
return currentDoc = NO_MORE_DOCS;
}
if (currentMatched >= minNrShouldMatch) {
@Override
public int advance(int target) throws IOException {
int doc;
while ((doc = nextDoc()) < target) {
}
return doc;
}
@Override
public long cost() {
return maxDoc;
}
@Override
public int docID() {
return currentDoc;
}
}
return currentDoc = NO_MORE_DOCS;
}
@Override
public int advance(int target) throws IOException {
int doc;
while ((doc = nextDoc()) < target) {
}
return doc;
}
@Override
public long cost() {
return maxDoc;
};
}
}
}

View File

@ -43,19 +43,29 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
@Override public int docID() { return idx; }
@Override public int nextDoc() {
return ++idx != scores.length ? idx : NO_MORE_DOCS;
}
@Override public int advance(int target) {
idx = target;
return idx < scores.length ? idx : NO_MORE_DOCS;
}
@Override
public long cost() {
return scores.length;
}
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
@Override
public int docID() {
return idx;
}
@Override public int nextDoc() {
return ++idx != scores.length ? idx : NO_MORE_DOCS;
}
@Override public int advance(int target) {
idx = target;
return idx < scores.length ? idx : NO_MORE_DOCS;
}
@Override
public long cost() {
return scores.length;
}
};
}
}
// The scores must have positive as well as negative values
@ -90,7 +100,7 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
Collector c = new PositiveScoresOnlyCollector(tdc);
LeafCollector ac = c.getLeafCollector(ir.leaves().get(0));
ac.setScorer(s);
while (s.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
while (s.iterator().nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
ac.collect(0);
}
TopDocs td = tdc.topDocs();

View File

@ -446,23 +446,34 @@ public class TestQueryRescorer extends LuceneTestCase {
}
@Override
public long cost() {
return 1;
}
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
@Override
public int nextDoc() {
docID++;
if (docID >= context.reader().maxDoc()) {
return NO_MORE_DOCS;
}
return docID;
}
@Override
public int docID() {
return docID;
}
@Override
public int advance(int target) {
docID = target;
return docID;
@Override
public long cost() {
return 1;
}
@Override
public int nextDoc() {
docID++;
if (docID >= context.reader().maxDoc()) {
return NO_MORE_DOCS;
}
return docID;
}
@Override
public int advance(int target) {
docID = target;
return docID;
}
};
}
@Override

View File

@ -49,18 +49,25 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase {
@Override public int docID() { return doc; }
@Override public int nextDoc() {
return ++doc < scores.length ? doc : NO_MORE_DOCS;
}
@Override public int advance(int target) {
doc = target;
return doc < scores.length ? doc : NO_MORE_DOCS;
}
@Override
public long cost() {
return scores.length;
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
@Override public int docID() { return doc; }
@Override public int nextDoc() {
return ++doc < scores.length ? doc : NO_MORE_DOCS;
}
@Override public int advance(int target) {
doc = target;
return doc < scores.length ? doc : NO_MORE_DOCS;
}
@Override
public long cost() {
return scores.length;
}
};
}
}
@ -116,7 +123,7 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase {
// We need to iterate on the scorer so that its doc() advances.
int doc;
while ((doc = s.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
while ((doc = s.iterator().nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
scc.collect(doc);
}

View File

@ -133,11 +133,11 @@ public class TestTermScorer extends LuceneTestCase {
LeafReaderContext context = (LeafReaderContext) indexSearcher.getTopReaderContext();
Scorer ts = weight.scorer(context);
assertTrue("next did not return a doc",
ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
ts.iterator().nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue("next did not return a doc",
ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
ts.iterator().nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue("next returned a doc and it should not have",
ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
ts.iterator().nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
}
public void testAdvance() throws Exception {
@ -149,7 +149,7 @@ public class TestTermScorer extends LuceneTestCase {
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext) indexSearcher.getTopReaderContext();
Scorer ts = weight.scorer(context);
assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
assertTrue("Didn't skip", ts.iterator().advance(3) != DocIdSetIterator.NO_MORE_DOCS);
// The next doc should be doc 5
assertTrue("doc should be number 5", ts.docID() == 5);
}
@ -186,7 +186,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = indexSearcher.createNormalizedWeight(termQuery, true);
try {
weight.scorer(forbiddenNorms.getContext()).nextDoc();
weight.scorer(forbiddenNorms.getContext()).iterator().nextDoc();
fail("Should load norms");
} catch (AssertionError e) {
// ok
@ -194,6 +194,6 @@ public class TestTermScorer extends LuceneTestCase {
weight = indexSearcher.createNormalizedWeight(termQuery, false);
// should not fail this time since norms are not necessary
weight.scorer(forbiddenNorms.getContext()).nextDoc();
weight.scorer(forbiddenNorms.getContext()).iterator().nextDoc();
}
}

View File

@ -236,18 +236,8 @@ public class TestTopFieldCollector extends LuceneTestCase {
}
@Override
public int nextDoc() throws IOException {
return scorer.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return scorer.advance(target);
}
@Override
public long cost() {
return scorer.cost();
public DocIdSetIterator iterator() {
return scorer.iterator();
}
};

View File

@ -34,10 +34,6 @@ final class JustCompileSearchSpans {
static final class JustCompileSpans extends Spans {
JustCompileSpans() {
super(null, null);
}
@Override
public int docID() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);

View File

@ -18,8 +18,6 @@ package org.apache.lucene.search.spans;
*/
import java.lang.reflect.Method;
import java.util.HashSet;
import java.util.Set;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Test;
@ -29,13 +27,9 @@ public class TestFilterSpans extends LuceneTestCase {
@Test
public void testOverrides() throws Exception {
// verify that all methods of Spans are overridden by FilterSpans,
// except those under the 'exclude' list
Set<Method> exclude = new HashSet<>();
exclude.add(FilterSpans.class.getMethod("freq"));
exclude.add(FilterSpans.class.getMethod("score"));
for (Method m : FilterSpans.class.getMethods()) {
if (m.getDeclaringClass() == Spans.class) {
assertTrue("method " + m.getName() + " not overridden!", exclude.contains(m));
fail("method " + m.getName() + " not overridden!");
}
}
}

View File

@ -194,7 +194,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
LeafReaderContext leave = topReaderContext.leaves().get(0);
Scorer s = w.scorer(leave);
assertEquals(1, s.advance(1));
assertEquals(1, s.iterator().advance(1));
}
public void testOverlappedOrderedSpan() throws Exception {

View File

@ -303,12 +303,12 @@ public class TestSpans extends LuceneTestCase {
searcher.setSimilarity(oldSim);
}
if (i == subIndex) {
assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue("first doc", spanScorer.iterator().nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("first doc number", spanScorer.docID() + ctx.docBase, 11);
float score = spanScorer.score();
assertTrue("first doc score should be zero, " + score, score == 0.0f);
} else {
assertTrue("no second doc", spanScorer == null || spanScorer.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
assertTrue("no second doc", spanScorer == null || spanScorer.iterator().nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
}
}
}

View File

@ -19,6 +19,7 @@ package org.apache.lucene.expressions;
import java.io.IOException;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
class FakeScorer extends Scorer {
@ -37,17 +38,7 @@ class FakeScorer extends Scorer {
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException();
}

View File

@ -128,17 +128,19 @@ public class TestExpressionValueSource extends LuceneTestCase {
// everything
ValueSourceScorer scorer = values.getRangeScorer(leaf.reader(), "4", "40", true, true);
assertEquals(-1, scorer.docID());
assertEquals(0, scorer.nextDoc());
assertEquals(1, scorer.nextDoc());
assertEquals(2, scorer.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, scorer.nextDoc());
DocIdSetIterator iter = scorer.iterator();
assertEquals(-1, iter.docID());
assertEquals(0, iter.nextDoc());
assertEquals(1, iter.nextDoc());
assertEquals(2, iter.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, iter.nextDoc());
// just the first doc
scorer = values.getRangeScorer(leaf.reader(), "4", "40", false, false);
iter = scorer.iterator();
assertEquals(-1, scorer.docID());
assertEquals(0, scorer.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, scorer.nextDoc());
assertEquals(0, iter.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, iter.nextDoc());
}
public void testEquals() throws Exception {

View File

@ -44,6 +44,7 @@ class DrillSidewaysScorer extends BulkScorer {
// DrillDown DocsEnums:
private final Scorer baseScorer;
private final DocIdSetIterator baseIterator;
private final LeafReaderContext context;
@ -60,13 +61,14 @@ class DrillSidewaysScorer extends BulkScorer {
this.dims = dims;
this.context = context;
this.baseScorer = baseScorer;
this.baseIterator = baseScorer.iterator();
this.drillDownCollector = drillDownCollector;
this.scoreSubDocsAtOnce = scoreSubDocsAtOnce;
}
@Override
public long cost() {
return baseScorer.cost();
return baseIterator.cost();
}
@Override
@ -94,13 +96,8 @@ class DrillSidewaysScorer extends BulkScorer {
dim.sidewaysLeafCollector.setScorer(scorer);
}
// TODO: if we ever allow null baseScorer ... it will
// mean we DO score docs out of order ... hmm, or if we
// change up the order of the conjuntions below
assert baseScorer != null;
// some scorers, eg ReqExlScorer, can hit NPE if cost is called after nextDoc
long baseQueryCost = baseScorer.cost();
long baseQueryCost = baseIterator.cost();
final int numDims = dims.length;
@ -115,7 +112,7 @@ class DrillSidewaysScorer extends BulkScorer {
}
// Position all scorers to their first matching doc:
baseScorer.nextDoc();
baseIterator.nextDoc();
for (DocsAndCost dim : dims) {
dim.approximation.nextDoc();
}
@ -157,7 +154,7 @@ class DrillSidewaysScorer extends BulkScorer {
nextDoc: while (docID != PostingsEnum.NO_MORE_DOCS) {
if (acceptDocs != null && acceptDocs.get(docID) == false) {
docID = baseScorer.nextDoc();
docID = baseIterator.nextDoc();
continue;
}
LeafCollector failedCollector = null;
@ -182,7 +179,7 @@ class DrillSidewaysScorer extends BulkScorer {
// More than one dim fails on this document, so
// it's neither a hit nor a near-miss; move to
// next doc:
docID = baseScorer.nextDoc();
docID = baseIterator.nextDoc();
continue nextDoc;
} else {
failedCollector = dim.sidewaysLeafCollector;
@ -204,7 +201,7 @@ class DrillSidewaysScorer extends BulkScorer {
collectNearMiss(failedCollector);
}
docID = baseScorer.nextDoc();
docID = baseIterator.nextDoc();
}
}
@ -316,9 +313,9 @@ class DrillSidewaysScorer extends BulkScorer {
int ddDocID = docIDs[slot0];
assert ddDocID != -1;
int baseDocID = baseScorer.docID();
int baseDocID = baseIterator.docID();
if (baseDocID < ddDocID) {
baseDocID = baseScorer.advance(ddDocID);
baseDocID = baseIterator.advance(ddDocID);
}
if (baseDocID == ddDocID) {
//if (DEBUG) {
@ -437,7 +434,7 @@ class DrillSidewaysScorer extends BulkScorer {
// System.out.println("\ncycle nextChunkStart=" + nextChunkStart + " docIds[0]=" + docIDs[0]);
//}
int filledCount = 0;
int docID = baseScorer.docID();
int docID = baseIterator.docID();
//if (DEBUG) {
// System.out.println(" base docID=" + docID);
//}
@ -456,7 +453,7 @@ class DrillSidewaysScorer extends BulkScorer {
missingDims[slot] = 0;
counts[slot] = 1;
}
docID = baseScorer.nextDoc();
docID = baseIterator.nextDoc();
}
if (filledCount == 0) {
@ -589,11 +586,6 @@ class DrillSidewaysScorer extends BulkScorer {
public FakeScorer() {
super(null);
}
@Override
public int advance(int target) {
throw new UnsupportedOperationException("FakeScorer doesn't support advance(int)");
}
@Override
public int docID() {
@ -606,7 +598,7 @@ class DrillSidewaysScorer extends BulkScorer {
}
@Override
public int nextDoc() {
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
}
@ -615,11 +607,6 @@ class DrillSidewaysScorer extends BulkScorer {
return collectScore;
}
@Override
public long cost() {
return baseScorer.cost();
}
@Override
public Collection<ChildScorer> getChildren() {
return Collections.singletonList(new ChildScorer(baseScorer, "MUST"));
@ -640,9 +627,9 @@ class DrillSidewaysScorer extends BulkScorer {
LeafCollector sidewaysLeafCollector;
DocsAndCost(Scorer scorer, Collector sidewaysCollector) {
final TwoPhaseIterator twoPhase = scorer.asTwoPhaseIterator();
final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
if (twoPhase == null) {
this.approximation = scorer;
this.approximation = scorer.iterator();
this.twoPhase = null;
} else {
this.approximation = twoPhase.approximation();

View File

@ -164,10 +164,11 @@ public final class DoubleRange extends Range {
if (fastMatchWeight == null) {
approximation = DocIdSetIterator.all(maxDoc);
} else {
approximation = fastMatchWeight.scorer(context);
if (approximation == null) {
Scorer s = fastMatchWeight.scorer(context);
if (s == null) {
return null;
}
approximation = s.iterator();
}
final FunctionValues values = valueSource.getValues(Collections.emptyMap(), context);

View File

@ -36,6 +36,7 @@ import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.NumericUtils;
@ -105,10 +106,11 @@ public class DoubleRangeFacetCounts extends RangeFacetCounts {
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight fastMatchWeight = searcher.createNormalizedWeight(fastMatchQuery, false);
fastMatchDocs = fastMatchWeight.scorer(hits.context);
if (fastMatchDocs == null) {
Scorer s = fastMatchWeight.scorer(hits.context);
if (s == null) {
continue;
}
fastMatchDocs = s.iterator();
} else {
fastMatchDocs = null;
}

View File

@ -156,10 +156,11 @@ public final class LongRange extends Range {
if (fastMatchWeight == null) {
approximation = DocIdSetIterator.all(maxDoc);
} else {
approximation = fastMatchWeight.scorer(context);
if (approximation == null) {
Scorer s = fastMatchWeight.scorer(context);
if (s == null) {
return null;
}
approximation = s.iterator();
}
final FunctionValues values = valueSource.getValues(Collections.emptyMap(), context);

View File

@ -33,6 +33,7 @@ import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
/** {@link Facets} implementation that computes counts for
@ -86,10 +87,11 @@ public class LongRangeFacetCounts extends RangeFacetCounts {
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight fastMatchWeight = searcher.createNormalizedWeight(fastMatchQuery, false);
fastMatchDocs = fastMatchWeight.scorer(hits.context);
if (fastMatchDocs == null) {
Scorer s = fastMatchWeight.scorer(hits.context);
if (s == null) {
continue;
}
fastMatchDocs = s.iterator();
} else {
fastMatchDocs = null;
}

View File

@ -19,6 +19,7 @@ package org.apache.lucene.facet.taxonomy;
import java.io.IOException;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
class FakeScorer extends Scorer {
@ -37,17 +38,7 @@ class FakeScorer extends Scorer {
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException();
}

View File

@ -478,7 +478,12 @@ public class BlockGroupingCollector extends SimpleCollector {
subDocUpto = 0;
docBase = readerContext.docBase;
//System.out.println("setNextReader base=" + docBase + " r=" + readerContext.reader);
lastDocPerGroupBits = lastDocPerGroup.scorer(readerContext);
Scorer s = lastDocPerGroup.scorer(readerContext);
if (s == null) {
lastDocPerGroupBits = null;
} else {
lastDocPerGroupBits = s.iterator();
}
groupEndDocID = -1;
currentReaderContext = readerContext;

View File

@ -19,6 +19,7 @@ package org.apache.lucene.search.grouping;
import java.io.IOException;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
class FakeScorer extends Scorer {
@ -37,17 +38,7 @@ class FakeScorer extends Scorer {
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException();
}

View File

@ -22,21 +22,20 @@ import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.LongBitSet;
import java.io.IOException;
abstract class BaseGlobalOrdinalScorer extends Scorer {
final SortedDocValues values;
final Scorer approximationScorer;
final DocIdSetIterator approximation;
float score;
public BaseGlobalOrdinalScorer(Weight weight, SortedDocValues values, Scorer approximationScorer) {
public BaseGlobalOrdinalScorer(Weight weight, SortedDocValues values, DocIdSetIterator approximationScorer) {
super(weight);
this.values = values;
this.approximationScorer = approximationScorer;
this.approximation = approximationScorer;
}
@Override
@ -46,45 +45,19 @@ abstract class BaseGlobalOrdinalScorer extends Scorer {
@Override
public int docID() {
return approximationScorer.docID();
return approximation.docID();
}
@Override
public int nextDoc() throws IOException {
return advance(approximationScorer.docID() + 1);
public DocIdSetIterator iterator() {
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
}
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
final DocIdSetIterator approximation = new DocIdSetIterator() {
@Override
public int docID() {
return approximationScorer.docID();
}
@Override
public int nextDoc() throws IOException {
return approximationScorer.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return approximationScorer.advance(target);
}
@Override
public long cost() {
return approximationScorer.cost();
}
};
public TwoPhaseIterator twoPhaseIterator() {
return createTwoPhaseIterator(approximation);
}
@Override
public long cost() {
return approximationScorer.cost();
}
@Override
public int freq() throws IOException {
return 1;

View File

@ -19,6 +19,7 @@ package org.apache.lucene.search.join;
import java.io.IOException;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
class FakeScorer extends Scorer {
@ -37,17 +38,7 @@ class FakeScorer extends Scorer {
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException();
}

View File

@ -135,9 +135,9 @@ final class GlobalOrdinalsQuery extends Query {
return null;
}
if (globalOrds != null) {
return new OrdinalMapScorer(this, score(), foundOrds, values, approximationScorer, globalOrds.getGlobalOrds(context.ord));
return new OrdinalMapScorer(this, score(), foundOrds, values, approximationScorer.iterator(), globalOrds.getGlobalOrds(context.ord));
} {
return new SegmentOrdinalScorer(this, score(), foundOrds, values, approximationScorer);
return new SegmentOrdinalScorer(this, score(), foundOrds, values, approximationScorer.iterator());
}
}
@ -148,34 +148,20 @@ final class GlobalOrdinalsQuery extends Query {
final LongBitSet foundOrds;
final LongValues segmentOrdToGlobalOrdLookup;
public OrdinalMapScorer(Weight weight, float score, LongBitSet foundOrds, SortedDocValues values, Scorer approximationScorer, LongValues segmentOrdToGlobalOrdLookup) {
public OrdinalMapScorer(Weight weight, float score, LongBitSet foundOrds, SortedDocValues values, DocIdSetIterator approximationScorer, LongValues segmentOrdToGlobalOrdLookup) {
super(weight, values, approximationScorer);
this.score = score;
this.foundOrds = foundOrds;
this.segmentOrdToGlobalOrdLookup = segmentOrdToGlobalOrdLookup;
}
@Override
public int advance(int target) throws IOException {
for (int docID = approximationScorer.advance(target); docID < NO_MORE_DOCS; docID = approximationScorer.nextDoc()) {
final long segmentOrd = values.getOrd(docID);
if (segmentOrd != -1) {
final long globalOrd = segmentOrdToGlobalOrdLookup.get(segmentOrd);
if (foundOrds.get(globalOrd)) {
return docID;
}
}
}
return NO_MORE_DOCS;
}
@Override
protected TwoPhaseIterator createTwoPhaseIterator(DocIdSetIterator approximation) {
return new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
final long segmentOrd = values.getOrd(approximationScorer.docID());
final long segmentOrd = values.getOrd(approximation.docID());
if (segmentOrd != -1) {
final long globalOrd = segmentOrdToGlobalOrdLookup.get(segmentOrd);
if (foundOrds.get(globalOrd)) {
@ -197,32 +183,19 @@ final class GlobalOrdinalsQuery extends Query {
final LongBitSet foundOrds;
public SegmentOrdinalScorer(Weight weight, float score, LongBitSet foundOrds, SortedDocValues values, Scorer approximationScorer) {
public SegmentOrdinalScorer(Weight weight, float score, LongBitSet foundOrds, SortedDocValues values, DocIdSetIterator approximationScorer) {
super(weight, values, approximationScorer);
this.score = score;
this.foundOrds = foundOrds;
}
@Override
public int advance(int target) throws IOException {
for (int docID = approximationScorer.advance(target); docID < NO_MORE_DOCS; docID = approximationScorer.nextDoc()) {
final long segmentOrd = values.getOrd(docID);
if (segmentOrd != -1) {
if (foundOrds.get(segmentOrd)) {
return docID;
}
}
}
return NO_MORE_DOCS;
}
@Override
protected TwoPhaseIterator createTwoPhaseIterator(DocIdSetIterator approximation) {
return new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
final long segmentOrd = values.getOrd(approximationScorer.docID());
final long segmentOrd = values.getOrd(approximation.docID());
if (segmentOrd != -1) {
if (foundOrds.get(segmentOrd)) {
return true;

View File

@ -160,9 +160,9 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
if (approximationScorer == null) {
return null;
} else if (globalOrds != null) {
return new OrdinalMapScorer(this, collector, values, approximationScorer, globalOrds.getGlobalOrds(context.ord));
return new OrdinalMapScorer(this, collector, values, approximationScorer.iterator(), globalOrds.getGlobalOrds(context.ord));
} else {
return new SegmentOrdinalScorer(this, collector, values, approximationScorer);
return new SegmentOrdinalScorer(this, collector, values, approximationScorer.iterator());
}
}
@ -173,34 +173,19 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
final LongValues segmentOrdToGlobalOrdLookup;
final GlobalOrdinalsWithScoreCollector collector;
public OrdinalMapScorer(Weight weight, GlobalOrdinalsWithScoreCollector collector, SortedDocValues values, Scorer approximationScorer, LongValues segmentOrdToGlobalOrdLookup) {
super(weight, values, approximationScorer);
public OrdinalMapScorer(Weight weight, GlobalOrdinalsWithScoreCollector collector, SortedDocValues values, DocIdSetIterator approximation, LongValues segmentOrdToGlobalOrdLookup) {
super(weight, values, approximation);
this.segmentOrdToGlobalOrdLookup = segmentOrdToGlobalOrdLookup;
this.collector = collector;
}
@Override
public int advance(int target) throws IOException {
for (int docID = approximationScorer.advance(target); docID < NO_MORE_DOCS; docID = approximationScorer.nextDoc()) {
final long segmentOrd = values.getOrd(docID);
if (segmentOrd != -1) {
final int globalOrd = (int) segmentOrdToGlobalOrdLookup.get(segmentOrd);
if (collector.match(globalOrd)) {
score = collector.score(globalOrd);
return docID;
}
}
}
return NO_MORE_DOCS;
}
@Override
protected TwoPhaseIterator createTwoPhaseIterator(DocIdSetIterator approximation) {
return new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
final long segmentOrd = values.getOrd(approximationScorer.docID());
final long segmentOrd = values.getOrd(approximation.docID());
if (segmentOrd != -1) {
final int globalOrd = (int) segmentOrdToGlobalOrdLookup.get(segmentOrd);
if (collector.match(globalOrd)) {
@ -223,32 +208,18 @@ final class GlobalOrdinalsWithScoreQuery extends Query {
final GlobalOrdinalsWithScoreCollector collector;
public SegmentOrdinalScorer(Weight weight, GlobalOrdinalsWithScoreCollector collector, SortedDocValues values, Scorer approximationScorer) {
super(weight, values, approximationScorer);
public SegmentOrdinalScorer(Weight weight, GlobalOrdinalsWithScoreCollector collector, SortedDocValues values, DocIdSetIterator approximation) {
super(weight, values, approximation);
this.collector = collector;
}
@Override
public int advance(int target) throws IOException {
for (int docID = approximationScorer.advance(target); docID < NO_MORE_DOCS; docID = approximationScorer.nextDoc()) {
final int segmentOrd = values.getOrd(docID);
if (segmentOrd != -1) {
if (collector.match(segmentOrd)) {
score = collector.score(segmentOrd);
return docID;
}
}
}
return NO_MORE_DOCS;
}
@Override
protected TwoPhaseIterator createTwoPhaseIterator(DocIdSetIterator approximation) {
return new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
final int segmentOrd = values.getOrd(approximationScorer.docID());
final int segmentOrd = values.getOrd(approximation.docID());
if (segmentOrd != -1) {
if (collector.match(segmentOrd)) {
score = collector.score(segmentOrd);

View File

@ -27,9 +27,9 @@ import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
@ -68,12 +68,12 @@ public class QueryBitSetProducer implements BitSetProducer {
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(query, false);
final DocIdSetIterator it = weight.scorer(context);
final Scorer s = weight.scorer(context);
if (it == null) {
if (s == null) {
docIdSet = DocIdSet.EMPTY;
} else {
docIdSet = new BitDocIdSet(BitSet.of(it, context.reader().maxDoc()));
docIdSet = new BitDocIdSet(BitSet.of(s.iterator(), context.reader().maxDoc()));
}
cache.put(key, docIdSet);
}

View File

@ -180,8 +180,6 @@ class TermsIncludingScoreQuery extends Query {
final float[] scores;
final long cost;
int currentDoc = -1;
SVInOrderScorer(Weight weight, TermsEnum termsEnum, int maxDoc, long cost) throws IOException {
super(weight);
FixedBitSet matchingDocs = new FixedBitSet(maxDoc);
@ -210,7 +208,7 @@ class TermsIncludingScoreQuery extends Query {
@Override
public float score() throws IOException {
return scores[currentDoc];
return scores[docID()];
}
@Override
@ -220,23 +218,14 @@ class TermsIncludingScoreQuery extends Query {
@Override
public int docID() {
return currentDoc;
return matchingDocsIterator.docID();
}
@Override
public int nextDoc() throws IOException {
return currentDoc = matchingDocsIterator.nextDoc();
public DocIdSetIterator iterator() {
return matchingDocsIterator;
}
@Override
public int advance(int target) throws IOException {
return currentDoc = matchingDocsIterator.advance(target);
}
@Override
public long cost() {
return cost;
}
}
// This scorer deals with the fact that a document can have more than one score from multiple related documents.

View File

@ -26,6 +26,7 @@ import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@ -141,7 +142,7 @@ public class ToChildBlockJoinQuery extends Query {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
ToChildBlockJoinScorer scorer = (ToChildBlockJoinScorer) scorer(context);
if (scorer != null && scorer.advance(doc) == doc) {
if (scorer != null && scorer.iterator().advance(doc) == doc) {
int parentDoc = scorer.getParentDoc();
return Explanation.match(
scorer.score(),
@ -155,6 +156,7 @@ public class ToChildBlockJoinQuery extends Query {
static class ToChildBlockJoinScorer extends Scorer {
private final Scorer parentScorer;
private final DocIdSetIterator parentIt;
private final BitSet parentBits;
private final boolean doScores;
@ -169,6 +171,7 @@ public class ToChildBlockJoinQuery extends Query {
this.doScores = doScores;
this.parentBits = parentBits;
this.parentScorer = parentScorer;
this.parentIt = parentScorer.iterator();
}
@Override
@ -177,69 +180,127 @@ public class ToChildBlockJoinQuery extends Query {
}
@Override
public int nextDoc() throws IOException {
//System.out.println("Q.nextDoc() parentDoc=" + parentDoc + " childDoc=" + childDoc);
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
@Override
public int docID() {
return childDoc;
}
@Override
public int nextDoc() throws IOException {
//System.out.println("Q.nextDoc() parentDoc=" + parentDoc + " childDoc=" + childDoc);
while (true) {
if (childDoc+1 == parentDoc) {
// OK, we are done iterating through all children
// matching this one parent doc, so we now nextDoc()
// the parent. Use a while loop because we may have
// to skip over some number of parents w/ no
// children:
while (true) {
parentDoc = parentScorer.nextDoc();
validateParentDoc();
if (childDoc+1 == parentDoc) {
// OK, we are done iterating through all children
// matching this one parent doc, so we now nextDoc()
// the parent. Use a while loop because we may have
// to skip over some number of parents w/ no
// children:
while (true) {
parentDoc = parentIt.nextDoc();
validateParentDoc();
if (parentDoc == 0) {
// Degenerate but allowed: first parent doc has no children
// TODO: would be nice to pull initial parent
// into ctor so we can skip this if... but it's
// tricky because scorer must return -1 for
// .doc() on init...
parentDoc = parentScorer.nextDoc();
validateParentDoc();
}
if (parentDoc == 0) {
// Degenerate but allowed: first parent doc has no children
// TODO: would be nice to pull initial parent
// into ctor so we can skip this if... but it's
// tricky because scorer must return -1 for
// .doc() on init...
parentDoc = parentIt.nextDoc();
validateParentDoc();
}
if (parentDoc == NO_MORE_DOCS) {
childDoc = NO_MORE_DOCS;
//System.out.println(" END");
return childDoc;
}
if (parentDoc == NO_MORE_DOCS) {
childDoc = NO_MORE_DOCS;
//System.out.println(" END");
return childDoc;
}
// Go to first child for this next parentDoc:
childDoc = 1 + parentBits.prevSetBit(parentDoc-1);
// Go to first child for this next parentDoc:
childDoc = 1 + parentBits.prevSetBit(parentDoc-1);
if (childDoc == parentDoc) {
// This parent has no children; continue
// parent loop so we move to next parent
continue;
}
if (childDoc == parentDoc) {
// This parent has no children; continue
// parent loop so we move to next parent
continue;
}
if (childDoc < parentDoc) {
if (doScores) {
parentScore = parentScorer.score();
parentFreq = parentScorer.freq();
if (childDoc < parentDoc) {
if (doScores) {
parentScore = parentScorer.score();
parentFreq = parentScorer.freq();
}
//System.out.println(" " + childDoc);
return childDoc;
} else {
// Degenerate but allowed: parent has no children
}
}
} else {
assert childDoc < parentDoc: "childDoc=" + childDoc + " parentDoc=" + parentDoc;
childDoc++;
//System.out.println(" " + childDoc);
return childDoc;
} else {
// Degenerate but allowed: parent has no children
}
}
} else {
assert childDoc < parentDoc: "childDoc=" + childDoc + " parentDoc=" + parentDoc;
childDoc++;
}
@Override
public int advance(int childTarget) throws IOException {
if (childTarget >= parentDoc) {
if (childTarget == NO_MORE_DOCS) {
return childDoc = parentDoc = NO_MORE_DOCS;
}
parentDoc = parentIt.advance(childTarget + 1);
validateParentDoc();
if (parentDoc == NO_MORE_DOCS) {
return childDoc = NO_MORE_DOCS;
}
// scan to the first parent that has children
while (true) {
final int firstChild = parentBits.prevSetBit(parentDoc-1) + 1;
if (firstChild != parentDoc) {
// this parent has children
childTarget = Math.max(childTarget, firstChild);
break;
}
// parent with no children, move to the next one
parentDoc = parentIt.nextDoc();
validateParentDoc();
if (parentDoc == NO_MORE_DOCS) {
return childDoc = NO_MORE_DOCS;
}
}
if (doScores) {
parentScore = parentScorer.score();
parentFreq = parentScorer.freq();
}
}
assert childTarget < parentDoc;
assert !parentBits.get(childTarget);
childDoc = childTarget;
//System.out.println(" " + childDoc);
return childDoc;
}
}
@Override
public long cost() {
return parentIt.cost();
}
};
}
/** Detect mis-use, where provided parent query in fact
* sometimes returns child documents. */
private void validateParentDoc() {
if (parentDoc != NO_MORE_DOCS && !parentBits.get(parentDoc)) {
if (parentDoc != DocIdSetIterator.NO_MORE_DOCS && !parentBits.get(parentDoc)) {
throw new IllegalStateException(INVALID_QUERY_MESSAGE + parentDoc);
}
}
@ -258,53 +319,6 @@ public class ToChildBlockJoinQuery extends Query {
public int freq() throws IOException {
return parentFreq;
}
@Override
public int advance(int childTarget) throws IOException {
if (childTarget >= parentDoc) {
if (childTarget == NO_MORE_DOCS) {
return childDoc = parentDoc = NO_MORE_DOCS;
}
parentDoc = parentScorer.advance(childTarget + 1);
validateParentDoc();
if (parentDoc == NO_MORE_DOCS) {
return childDoc = NO_MORE_DOCS;
}
// scan to the first parent that has children
while (true) {
final int firstChild = parentBits.prevSetBit(parentDoc-1) + 1;
if (firstChild != parentDoc) {
// this parent has children
childTarget = Math.max(childTarget, firstChild);
break;
}
// parent with no children, move to the next one
parentDoc = parentScorer.nextDoc();
validateParentDoc();
if (parentDoc == NO_MORE_DOCS) {
return childDoc = NO_MORE_DOCS;
}
}
if (doScores) {
parentScore = parentScorer.score();
parentFreq = parentScorer.freq();
}
}
assert childTarget < parentDoc;
assert !parentBits.get(childTarget);
childDoc = childTarget;
//System.out.println(" " + childDoc);
return childDoc;
}
@Override
public long cost() {
return parentScorer.cost();
}
int getParentDoc() {
return parentDoc;

View File

@ -61,7 +61,8 @@ public class ToParentBlockJoinIndexSearcher extends IndexSearcher {
final LeafCollector leafCollector = collector.getLeafCollector(ctx);
leafCollector.setScorer(scorer);
final Bits liveDocs = ctx.reader().getLiveDocs();
for (int doc = scorer.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = scorer.nextDoc()) {
final DocIdSetIterator it = scorer.iterator();
for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
if (liveDocs == null || liveDocs.get(doc)) {
leafCollector.collect(doc);
}

View File

@ -163,7 +163,7 @@ public class ToParentBlockJoinQuery extends Query {
return null;
}
final int firstChildDoc = childScorer.nextDoc();
final int firstChildDoc = childScorer.iterator().nextDoc();
if (firstChildDoc == DocIdSetIterator.NO_MORE_DOCS) {
// No matches
return null;
@ -184,7 +184,7 @@ public class ToParentBlockJoinQuery extends Query {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
BlockJoinScorer scorer = (BlockJoinScorer) scorer(context);
if (scorer != null && scorer.advance(doc) == doc) {
if (scorer != null && scorer.iterator().advance(doc) == doc) {
return scorer.explain(context.docBase);
}
return Explanation.noMatch("Not a match");
@ -250,86 +250,142 @@ public class ToParentBlockJoinQuery extends Query {
}
@Override
public int nextDoc() throws IOException {
//System.out.println("Q.nextDoc() nextChildDoc=" + nextChildDoc);
if (nextChildDoc == NO_MORE_DOCS) {
//System.out.println(" end");
return parentDoc = NO_MORE_DOCS;
}
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
final DocIdSetIterator childIt = childScorer.iterator();
// Gather all children sharing the same parent as
// nextChildDoc
parentDoc = parentBits.nextSetBit(nextChildDoc);
// Parent & child docs are supposed to be
// orthogonal:
if (nextChildDoc == parentDoc) {
throw new IllegalStateException("child query must only match non-parent docs, but parent docID=" + nextChildDoc + " matched childScorer=" + childScorer.getClass());
}
//System.out.println(" parentDoc=" + parentDoc);
assert parentDoc != DocIdSetIterator.NO_MORE_DOCS;
float totalScore = 0;
float maxScore = Float.NEGATIVE_INFINITY;
float minScore = Float.POSITIVE_INFINITY;
childDocUpto = 0;
parentFreq = 0;
do {
//System.out.println(" c=" + nextChildDoc);
if (pendingChildDocs != null && pendingChildDocs.length == childDocUpto) {
pendingChildDocs = ArrayUtil.grow(pendingChildDocs);
}
if (pendingChildScores != null && scoreMode != ScoreMode.None && pendingChildScores.length == childDocUpto) {
pendingChildScores = ArrayUtil.grow(pendingChildScores);
}
if (pendingChildDocs != null) {
pendingChildDocs[childDocUpto] = nextChildDoc;
}
if (scoreMode != ScoreMode.None) {
// TODO: specialize this into dedicated classes per-scoreMode
final float childScore = childScorer.score();
final int childFreq = childScorer.freq();
if (pendingChildScores != null) {
pendingChildScores[childDocUpto] = childScore;
@Override
public int nextDoc() throws IOException {
//System.out.println("Q.nextDoc() nextChildDoc=" + nextChildDoc);
if (nextChildDoc == NO_MORE_DOCS) {
//System.out.println(" end");
return parentDoc = NO_MORE_DOCS;
}
maxScore = Math.max(childScore, maxScore);
minScore = Math.min(childScore, minScore);
totalScore += childScore;
parentFreq += childFreq;
// Gather all children sharing the same parent as
// nextChildDoc
parentDoc = parentBits.nextSetBit(nextChildDoc);
// Parent & child docs are supposed to be
// orthogonal:
if (nextChildDoc == parentDoc) {
throw new IllegalStateException("child query must only match non-parent docs, but parent docID=" + nextChildDoc + " matched childScorer=" + childScorer.getClass());
}
//System.out.println(" parentDoc=" + parentDoc);
assert parentDoc != DocIdSetIterator.NO_MORE_DOCS;
float totalScore = 0;
float maxScore = Float.NEGATIVE_INFINITY;
float minScore = Float.POSITIVE_INFINITY;
childDocUpto = 0;
parentFreq = 0;
do {
//System.out.println(" c=" + nextChildDoc);
if (pendingChildDocs != null && pendingChildDocs.length == childDocUpto) {
pendingChildDocs = ArrayUtil.grow(pendingChildDocs);
}
if (pendingChildScores != null && scoreMode != ScoreMode.None && pendingChildScores.length == childDocUpto) {
pendingChildScores = ArrayUtil.grow(pendingChildScores);
}
if (pendingChildDocs != null) {
pendingChildDocs[childDocUpto] = nextChildDoc;
}
if (scoreMode != ScoreMode.None) {
// TODO: specialize this into dedicated classes per-scoreMode
final float childScore = childScorer.score();
final int childFreq = childScorer.freq();
if (pendingChildScores != null) {
pendingChildScores[childDocUpto] = childScore;
}
maxScore = Math.max(childScore, maxScore);
minScore = Math.min(childScore, minScore);
totalScore += childScore;
parentFreq += childFreq;
}
childDocUpto++;
nextChildDoc = childIt.nextDoc();
} while (nextChildDoc < parentDoc);
// Parent & child docs are supposed to be
// orthogonal:
if (nextChildDoc == parentDoc) {
throw new IllegalStateException("child query must only match non-parent docs, but parent docID=" + nextChildDoc + " matched childScorer=" + childScorer.getClass());
}
switch(scoreMode) {
case Avg:
parentScore = totalScore / childDocUpto;
break;
case Max:
parentScore = maxScore;
break;
case Min:
parentScore = minScore;
break;
case Total:
parentScore = totalScore;
break;
case None:
break;
}
//System.out.println(" return parentDoc=" + parentDoc + " childDocUpto=" + childDocUpto);
return parentDoc;
}
childDocUpto++;
nextChildDoc = childScorer.nextDoc();
} while (nextChildDoc < parentDoc);
// Parent & child docs are supposed to be
// orthogonal:
if (nextChildDoc == parentDoc) {
throw new IllegalStateException("child query must only match non-parent docs, but parent docID=" + nextChildDoc + " matched childScorer=" + childScorer.getClass());
}
@Override
public int advance(int parentTarget) throws IOException {
switch(scoreMode) {
case Avg:
parentScore = totalScore / childDocUpto;
break;
case Max:
parentScore = maxScore;
break;
case Min:
parentScore = minScore;
break;
case Total:
parentScore = totalScore;
break;
case None:
break;
}
//System.out.println("Q.advance parentTarget=" + parentTarget);
if (parentTarget == NO_MORE_DOCS) {
return parentDoc = NO_MORE_DOCS;
}
//System.out.println(" return parentDoc=" + parentDoc + " childDocUpto=" + childDocUpto);
return parentDoc;
if (parentTarget == 0) {
// Callers should only be passing in a docID from
// the parent space, so this means this parent
// has no children (it got docID 0), so it cannot
// possibly match. We must handle this case
// separately otherwise we pass invalid -1 to
// prevSetBit below:
return nextDoc();
}
prevParentDoc = parentBits.prevSetBit(parentTarget-1);
//System.out.println(" rolled back to prevParentDoc=" + prevParentDoc + " vs parentDoc=" + parentDoc);
assert prevParentDoc >= parentDoc;
if (prevParentDoc > nextChildDoc) {
nextChildDoc = childIt.advance(prevParentDoc);
// System.out.println(" childScorer advanced to child docID=" + nextChildDoc);
//} else {
//System.out.println(" skip childScorer advance");
}
// Parent & child docs are supposed to be orthogonal:
if (nextChildDoc == prevParentDoc) {
throw new IllegalStateException("child query must only match non-parent docs, but parent docID=" + nextChildDoc + " matched childScorer=" + childScorer.getClass());
}
final int nd = nextDoc();
//System.out.println(" return nextParentDoc=" + nd);
return nd;
}
@Override
public int docID() {
return parentDoc;
}
@Override
public long cost() {
return childIt.cost();
}
};
}
@Override
@ -347,45 +403,6 @@ public class ToParentBlockJoinQuery extends Query {
return parentFreq;
}
@Override
public int advance(int parentTarget) throws IOException {
//System.out.println("Q.advance parentTarget=" + parentTarget);
if (parentTarget == NO_MORE_DOCS) {
return parentDoc = NO_MORE_DOCS;
}
if (parentTarget == 0) {
// Callers should only be passing in a docID from
// the parent space, so this means this parent
// has no children (it got docID 0), so it cannot
// possibly match. We must handle this case
// separately otherwise we pass invalid -1 to
// prevSetBit below:
return nextDoc();
}
prevParentDoc = parentBits.prevSetBit(parentTarget-1);
//System.out.println(" rolled back to prevParentDoc=" + prevParentDoc + " vs parentDoc=" + parentDoc);
assert prevParentDoc >= parentDoc;
if (prevParentDoc > nextChildDoc) {
nextChildDoc = childScorer.advance(prevParentDoc);
// System.out.println(" childScorer advanced to child docID=" + nextChildDoc);
//} else {
//System.out.println(" skip childScorer advance");
}
// Parent & child docs are supposed to be orthogonal:
if (nextChildDoc == prevParentDoc) {
throw new IllegalStateException("child query must only match non-parent docs, but parent docID=" + nextChildDoc + " matched childScorer=" + childScorer.getClass());
}
final int nd = nextDoc();
//System.out.println(" return nextParentDoc=" + nd);
return nd;
}
public Explanation explain(int docBase) throws IOException {
int start = docBase + prevParentDoc + 1; // +1 b/c prevParentDoc is previous parent doc
int end = docBase + parentDoc - 1; // -1 b/c parentDoc is parent doc
@ -393,11 +410,6 @@ public class ToParentBlockJoinQuery extends Query {
);
}
@Override
public long cost() {
return childScorer.cost();
}
/**
* Instructs this scorer to keep track of the child docIds and score ids for retrieval purposes.
*/

View File

@ -63,6 +63,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.search.RandomApproximationQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
@ -1143,8 +1144,8 @@ public class TestBlockJoin extends LuceneTestCase {
ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg);
Weight weight = s.createNormalizedWeight(q, true);
DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0));
assertEquals(1, disi.advance(1));
Scorer sc = weight.scorer(s.getIndexReader().leaves().get(0));
assertEquals(1, sc.iterator().advance(1));
r.close();
dir.close();
}
@ -1177,8 +1178,8 @@ public class TestBlockJoin extends LuceneTestCase {
ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg);
Weight weight = s.createNormalizedWeight(q, true);
DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0));
assertEquals(2, disi.advance(0));
Scorer sc = weight.scorer(s.getIndexReader().leaves().get(0));
assertEquals(2, sc.iterator().advance(0));
r.close();
dir.close();
}
@ -1657,12 +1658,12 @@ public class TestBlockJoin extends LuceneTestCase {
ToChildBlockJoinQuery parentJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentFilter);
Weight weight = s.createNormalizedWeight(parentJoinQuery, random().nextBoolean());
DocIdSetIterator advancingScorer = weight.scorer(s.getIndexReader().leaves().get(0));
DocIdSetIterator nextDocScorer = weight.scorer(s.getIndexReader().leaves().get(0));
Scorer advancingScorer = weight.scorer(s.getIndexReader().leaves().get(0));
Scorer nextDocScorer = weight.scorer(s.getIndexReader().leaves().get(0));
final int firstKid = nextDocScorer.nextDoc();
final int firstKid = nextDocScorer.iterator().nextDoc();
assertTrue("firstKid not found", DocIdSetIterator.NO_MORE_DOCS != firstKid);
assertEquals(firstKid, advancingScorer.advance(0));
assertEquals(firstKid, advancingScorer.iterator().advance(0));
r.close();
dir.close();

View File

@ -140,7 +140,7 @@ public class TestBlockJoinValidation extends LuceneTestCase {
} while (parentDocs.get(target + 1));
try {
scorer.advance(target);
scorer.iterator().advance(target);
fail();
} catch (IllegalStateException expected) {
assertTrue(expected.getMessage() != null && expected.getMessage().contains(ToChildBlockJoinQuery.INVALID_QUERY_MESSAGE));

View File

@ -24,6 +24,7 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.Directory;
@ -134,9 +135,9 @@ public class PKIndexSplitter {
final int maxDoc = in.maxDoc();
final FixedBitSet bits = new FixedBitSet(maxDoc);
// ignore livedocs here, as we filter them later:
final DocIdSetIterator preserveIt = preserveWeight.scorer(context);
if (preserveIt != null) {
bits.or(preserveIt);
final Scorer preverveScorer = preserveWeight.scorer(context);
if (preverveScorer != null) {
bits.or(preverveScorer.iterator());
}
if (negateFilter) {
bits.flip(0, maxDoc);

View File

@ -20,6 +20,7 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.LeafFieldComparator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
@ -269,18 +270,7 @@ final class Sorter {
return doc;
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException();
}

View File

@ -122,11 +122,11 @@ public class BlockJoinComparatorSource extends FieldComparatorSource {
IndexSearcher searcher = new IndexSearcher(ReaderUtil.getTopLevelContext(context));
searcher.setQueryCache(null);
final Weight weight = searcher.createNormalizedWeight(parentsFilter, false);
final DocIdSetIterator parents = weight.scorer(context);
final Scorer parents = weight.scorer(context);
if (parents == null) {
throw new IllegalStateException("LeafReader " + context.reader() + " contains no parents!");
}
parentBits = BitSet.of(parents, context.reader().maxDoc());
parentBits = BitSet.of(parents.iterator(), context.reader().maxDoc());
parentLeafComparators = new LeafFieldComparator[parentComparators.length];
for (int i = 0; i < parentComparators.length; i++) {
parentLeafComparators[i] = parentComparators[i].getLeafComparator(context);

View File

@ -27,9 +27,9 @@ import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.search.BlockJoinComparatorSource;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
@ -37,7 +37,6 @@ import org.apache.lucene.search.Weight;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.Ignore;
public class TestBlockJoinSorter extends LuceneTestCase {
@ -74,8 +73,8 @@ public class TestBlockJoinSorter extends LuceneTestCase {
final Query parentsFilter = new TermQuery(new Term("parent", "true"));
final Weight weight = searcher.createNormalizedWeight(parentsFilter, false);
final DocIdSetIterator parents = weight.scorer(indexReader.leaves().get(0));
final BitSet parentBits = BitSet.of(parents, reader.maxDoc());
final Scorer parents = weight.scorer(indexReader.leaves().get(0));
final BitSet parentBits = BitSet.of(parents.iterator(), reader.maxDoc());
final NumericDocValues parentValues = reader.getNumericDocValues("parent_val");
final NumericDocValues childValues = reader.getNumericDocValues("child_val");

View File

@ -112,9 +112,9 @@ public class BoostingQuery extends Query {
if (contextScorer == null) {
return matchScorer;
}
TwoPhaseIterator contextTwoPhase = contextScorer.asTwoPhaseIterator();
TwoPhaseIterator contextTwoPhase = contextScorer.twoPhaseIterator();
DocIdSetIterator contextApproximation = contextTwoPhase == null
? contextScorer
? contextScorer.iterator()
: contextTwoPhase.approximation();
return new FilterScorer(matchScorer) {
@Override

View File

@ -294,7 +294,7 @@ public class CustomScoreQuery extends Query implements Cloneable {
int doc = docID();
if (doc > valSrcDocID) {
for (Scorer valSrcScorer : valSrcScorers) {
valSrcScorer.advance(doc);
valSrcScorer.iterator().advance(doc);
}
valSrcDocID = doc;
}

View File

@ -24,6 +24,7 @@ import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@ -94,7 +95,7 @@ public class FunctionQuery extends Query {
final FunctionWeight weight;
final int maxDoc;
final float qWeight;
int doc=-1;
final DocIdSetIterator iterator;
final FunctionValues vals;
public AllScorer(LeafReaderContext context, FunctionWeight w, float qWeight) throws IOException {
@ -103,35 +104,23 @@ public class FunctionQuery extends Query {
this.qWeight = qWeight;
this.reader = context.reader();
this.maxDoc = reader.maxDoc();
iterator = DocIdSetIterator.all(context.reader().maxDoc());
vals = func.getValues(weight.context, context);
}
@Override
public DocIdSetIterator iterator() {
return iterator;
}
@Override
public int docID() {
return doc;
}
// instead of matching all docs, we could also embed a query.
// the score could either ignore the subscore, or boost it.
// Containment: floatline(foo:myTerm, "myFloatField", 1.0, 0.0f)
// Boost: foo:myTerm^floatline("myFloatField",1.0,0.0f)
@Override
public int nextDoc() throws IOException {
++doc;
if (doc>=maxDoc) {
return doc=NO_MORE_DOCS;
}
return doc;
}
@Override
public int advance(int target) throws IOException {
return slowAdvance(target);
return iterator.docID();
}
@Override
public float score() throws IOException {
float score = qWeight * vals.floatVal(doc);
float score = qWeight * vals.floatVal(docID());
// Current Lucene priority queues can't handle NaN and -Infinity, so
// map to -Float.MAX_VALUE. This conditional handles both -infinity
@ -139,11 +128,6 @@ public class FunctionQuery extends Query {
return score>Float.NEGATIVE_INFINITY ? score : -Float.MAX_VALUE;
}
@Override
public long cost() {
return maxDoc;
}
@Override
public int freq() throws IOException {
return 1;

View File

@ -132,7 +132,7 @@ public class FunctionRangeQuery extends Query {
// which can be slow since if that doc doesn't match, it has to linearly find the next matching
ValueSourceScorer scorer = scorer(context);
if (scorer.matches(doc)) {
scorer.advance(doc);
scorer.iterator().advance(doc);
return Explanation.match(scorer.score(), FunctionRangeQuery.this.toString(), functionValues.explain(doc));
} else {
return Explanation.noMatch(FunctionRangeQuery.this.toString(), functionValues.explain(doc));

View File

@ -48,10 +48,11 @@ public abstract class ValueSourceScorer extends Scorer {
protected ValueSourceScorer(IndexReader reader, FunctionValues values) {
super(null);//no weight
this.values = values;
this.twoPhaseIterator = new TwoPhaseIterator(DocIdSetIterator.all(reader.maxDoc())) { // no approximation!
final DocIdSetIterator approximation = DocIdSetIterator.all(reader.maxDoc()); // no approximation!
this.twoPhaseIterator = new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
return ValueSourceScorer.this.matches(docID());
return ValueSourceScorer.this.matches(approximation.docID());
}
@Override
@ -66,7 +67,12 @@ public abstract class ValueSourceScorer extends Scorer {
public abstract boolean matches(int doc);
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
public DocIdSetIterator iterator() {
return disi;
}
@Override
public TwoPhaseIterator twoPhaseIterator() {
return twoPhaseIterator;
}
@ -75,16 +81,6 @@ public abstract class ValueSourceScorer extends Scorer {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
return disi.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return disi.advance(target);
}
@Override
public float score() throws IOException {
// (same as FunctionQuery, but no qWeight) TODO consider adding configurable qWeight
@ -100,8 +96,4 @@ public abstract class ValueSourceScorer extends Scorer {
return 1;
}
@Override
public long cost() {
return disi.cost();
}
}

View File

@ -25,11 +25,11 @@ import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.mutable.MutableValue;
import org.apache.lucene.util.mutable.MutableValueFloat;
@ -86,6 +86,7 @@ class QueryDocValues extends FloatDocValues {
final Query q;
Scorer scorer;
DocIdSetIterator it;
int scorerDoc; // the document the scorer is on
boolean noMatches=false;
@ -129,12 +130,13 @@ class QueryDocValues extends FloatDocValues {
noMatches = true;
return defVal;
}
it = scorer.iterator();
scorerDoc = -1;
}
lastDocRequested = doc;
if (scorerDoc < doc) {
scorerDoc = scorer.advance(doc);
scorerDoc = it.advance(doc);
}
if (scorerDoc > doc) {
@ -161,11 +163,12 @@ class QueryDocValues extends FloatDocValues {
noMatches = true;
return false;
}
it = scorer.iterator();
}
lastDocRequested = doc;
if (scorerDoc < doc) {
scorerDoc = scorer.advance(doc);
scorerDoc = it.advance(doc);
}
if (scorerDoc > doc) {
@ -221,10 +224,11 @@ class QueryDocValues extends FloatDocValues {
mval.exists = false;
return;
}
it = scorer.iterator();
lastDocRequested = doc;
if (scorerDoc < doc) {
scorerDoc = scorer.advance(doc);
scorerDoc = it.advance(doc);
}
if (scorerDoc > doc) {

View File

@ -27,11 +27,13 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.similarities.ClassicSimilarity;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.spans.FilterSpans;
import org.apache.lucene.search.spans.SpanCollector;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanScorer;
import org.apache.lucene.search.spans.SpanWeight;
import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.util.BytesRef;
@ -132,11 +134,13 @@ public class PayloadScoreQuery extends SpanQuery {
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
public PayloadSpanScorer scorer(LeafReaderContext context) throws IOException {
Spans spans = getSpans(context, Postings.PAYLOADS);
if (spans == null)
return null;
return new PayloadSpans(spans, this, innerWeight.getSimScorer(context));
SimScorer docScorer = innerWeight.getSimScorer(context);
PayloadSpans payloadSpans = new PayloadSpans(spans, docScorer);
return new PayloadSpanScorer(this, payloadSpans, docScorer);
}
@Override
@ -156,8 +160,8 @@ public class PayloadScoreQuery extends SpanQuery {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
PayloadSpans scorer = (PayloadSpans) scorer(context);
if (scorer == null || scorer.advance(doc) != doc)
PayloadSpanScorer scorer = scorer(context);
if (scorer == null || scorer.iterator().advance(doc) != doc)
return Explanation.noMatch("No match");
scorer.freq(); // force freq calculation
@ -173,53 +177,28 @@ public class PayloadScoreQuery extends SpanQuery {
}
}
private class PayloadSpans extends Spans implements SpanCollector {
private class PayloadSpans extends FilterSpans implements SpanCollector {
private int payloadsSeen;
private float payloadScore;
private final Spans in;
private final SimScorer docScorer;
public int payloadsSeen;
public float payloadScore;
private PayloadSpans(Spans spans, SpanWeight weight, Similarity.SimScorer docScorer) throws IOException {
super(weight, docScorer);
this.in = spans;
private PayloadSpans(Spans in, SimScorer docScorer) {
super(in);
this.docScorer = docScorer;
}
@Override
public int nextStartPosition() throws IOException {
return in.nextStartPosition();
protected AcceptStatus accept(Spans candidate) throws IOException {
return AcceptStatus.YES;
}
@Override
public int startPosition() {
return in.startPosition();
}
@Override
public int endPosition() {
return in.endPosition();
}
@Override
public int width() {
return in.width();
}
@Override
public void collect(SpanCollector collector) throws IOException {
in.collect(collector);
}
@Override
protected void doStartCurrentDoc() {
payloadScore = 0;
payloadsSeen = 0;
}
@Override
protected void doCurrentSpans() throws IOException {
in.collect(this);
}
@Override
public void collectLeaf(PostingsEnum postings, int position, Term term) throws IOException {
BytesRef payload = postings.getPayload();
@ -231,12 +210,30 @@ public class PayloadScoreQuery extends SpanQuery {
payloadsSeen++;
}
@Override
public void reset() {}
@Override
protected void doCurrentSpans() throws IOException {
in.collect(this);
}
}
private class PayloadSpanScorer extends SpanScorer {
private final PayloadSpans spans;
private PayloadSpanScorer(SpanWeight weight, PayloadSpans spans, Similarity.SimScorer docScorer) throws IOException {
super(weight, spans, docScorer);
this.spans = spans;
}
protected float getPayloadScore() {
return function.docScore(docID(), getField(), payloadsSeen, payloadScore);
return function.docScore(docID(), getField(), spans.payloadsSeen, spans.payloadScore);
}
protected Explanation getPayloadExplanation() {
return function.explain(docID(), getField(), payloadsSeen, payloadScore);
return function.explain(docID(), getField(), spans.payloadsSeen, spans.payloadScore);
}
protected float getSpanScore() throws IOException {
@ -250,35 +247,6 @@ public class PayloadScoreQuery extends SpanQuery {
return getPayloadScore();
}
@Override
public void reset() {
}
@Override
public int docID() {
return in.docID();
}
@Override
public int nextDoc() throws IOException {
return in.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return in.advance(target);
}
@Override
public long cost() {
return in.cost();
}
@Override
public float positionsCost() {
return in.positionsCost();
}
}
}

Some files were not shown because too many files have changed in this diff Show More