LUCENE-1630: switch from Weight (interface) to QueryWeight (abstract class); mate in/out-of docID order scoring between Collector & Scorer

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@787772 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2009-06-23 18:11:42 +00:00
parent 87483858d7
commit f03d77b558
61 changed files with 1085 additions and 644 deletions

View File

@ -53,6 +53,17 @@ Changes in backwards compatibility policy
which was unlikely done, because there is no possibility to change which was unlikely done, because there is no possibility to change
Lucene's FieldCache implementation. (Grant Ingersoll, Uwe Schindler) Lucene's FieldCache implementation. (Grant Ingersoll, Uwe Schindler)
3. LUCENE-1630: Deprecate Weight in favor of QueryWeight: added
matching methods to Searcher to take QueryWeight and deprecated
those taking Weight. If you have a Weight implementation, you can
turn it into a QueryWeight with QueryWeightWrapper (will be
removed in 3.0). All of the Weight-based methods were implemented
by calling the QueryWeight variants by wrapping the given Weight.
Going forward Searchable will be kept for convenience only and may
be changed between minor releases without any deprecation
process. It is not recommended to implement it, but rather extend
Searcher. (Shai Erera via Mike McCandless)
Changes in runtime behavior Changes in runtime behavior
1. LUCENE-1424: QueryParser now by default uses constant score query 1. LUCENE-1424: QueryParser now by default uses constant score query
@ -225,6 +236,24 @@ API Changes
NumericRangeQuery and its new indexing format for numeric or NumericRangeQuery and its new indexing format for numeric or
date values. (Uwe Schindler) date values. (Uwe Schindler)
23. LUCENE-1630: Deprecate Weight in favor of QueryWeight, which adds
a scorer(IndexReader, boolean /* scoreDocsInOrder */, boolean /*
topScorer */) method instead of scorer(IndexReader) (now
deprecated). The new method is used by IndexSearcher to mate
between Collector and Scorer orderness of doc IDs. Some Scorers
(like BooleanScorer) are much more efficient if out-of-order
documents scoring is allowed by a Collector. Collector must now
implement acceptsDocsOutOfOrder. If you write a Collector which
does not care about doc ID orderness, it is recommended that you
return true. QueryWeight has the scoresDocsOutOfOrder method,
which by default returns false. If you create a QueryWeight which
will score documents out of order if that's requested, you should
override that method to return true. Also deprecated
BooleanQuery's setAllowDocsOutOfOrder and getAllowDocsOutOfOrder
as they are not needed anymore. BooleanQuery will now score docs
out of order when used with a Collector that can accept docs out
of order. (Shai Erera via Mike McCandless)
Bug fixes Bug fixes
1. LUCENE-1415: MultiPhraseQuery has incorrect hashCode() and equals() 1. LUCENE-1415: MultiPhraseQuery has incorrect hashCode() and equals()

View File

@ -146,6 +146,9 @@ public class TestFieldNormModifier extends TestCase {
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
searcher.close(); searcher.close();
@ -174,6 +177,9 @@ public class TestFieldNormModifier extends TestCase {
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
searcher.close(); searcher.close();
@ -219,6 +225,9 @@ public class TestFieldNormModifier extends TestCase {
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
searcher.close(); searcher.close();

View File

@ -153,6 +153,9 @@ public class TestLengthNormModifier extends TestCase {
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
searcher.close(); searcher.close();
@ -187,6 +190,9 @@ public class TestLengthNormModifier extends TestCase {
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
searcher.close(); searcher.close();

View File

@ -48,10 +48,15 @@ public class RemoteSearchable
/** @deprecated use {@link #search(Weight, Filter, Collector)} instead. */ /** @deprecated use {@link #search(Weight, Filter, Collector)} instead. */
public void search(Weight weight, Filter filter, HitCollector results) public void search(Weight weight, Filter filter, HitCollector results)
throws IOException { throws IOException {
local.search(weight, filter, results); search(new QueryWeightWrapper(weight), filter, new HitCollectorWrapper(results));
} }
public void search(Weight weight, Filter filter, Collector results) public void search(Weight weight, Filter filter, Collector results)
throws IOException {
search(new QueryWeightWrapper(weight), filter, results);
}
public void search(QueryWeight weight, Filter filter, Collector results)
throws IOException { throws IOException {
local.search(weight, filter, results); local.search(weight, filter, results);
} }
@ -74,11 +79,19 @@ public class RemoteSearchable
} }
public TopDocs search(Weight weight, Filter filter, int n) throws IOException { public TopDocs search(Weight weight, Filter filter, int n) throws IOException {
return search(new QueryWeightWrapper(weight), filter, n);
}
public TopDocs search(QueryWeight weight, Filter filter, int n) throws IOException {
return local.search(weight, filter, n); return local.search(weight, filter, n);
} }
public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
throws IOException {
return search(new QueryWeightWrapper(weight), filter, n, sort);
}
public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort) public TopFieldDocs search(QueryWeight weight, Filter filter, int n, Sort sort)
throws IOException { throws IOException {
return local.search (weight, filter, n, sort); return local.search (weight, filter, n, sort);
} }
@ -96,6 +109,10 @@ public class RemoteSearchable
} }
public Explanation explain(Weight weight, int doc) throws IOException { public Explanation explain(Weight weight, int doc) throws IOException {
return explain(new QueryWeightWrapper(weight), doc);
}
public Explanation explain(QueryWeight weight, int doc) throws IOException {
return local.explain(weight, doc); return local.explain(weight, doc);
} }

View File

@ -17,27 +17,27 @@ package org.apache.lucene.index;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.ArrayUtil;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.ArrayList; import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.text.NumberFormat;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWeight;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.ArrayUtil;
/** /**
* This class accepts multiple added documents and directly * This class accepts multiple added documents and directly
@ -172,7 +172,7 @@ final class DocumentsWriter {
void setNext(DocWriter next) { void setNext(DocWriter next) {
this.next = next; this.next = next;
} }
}; }
/** /**
* The IndexingChain must define the {@link #getChain(DocumentsWriter)} method * The IndexingChain must define the {@link #getChain(DocumentsWriter)} method
@ -303,7 +303,7 @@ final class DocumentsWriter {
synchronized void setAllowMinus1Position() { synchronized void setAllowMinus1Position() {
for(int i=0;i<threadStates.length;i++) for(int i=0;i<threadStates.length;i++)
threadStates[i].docState.allowMinus1Position = true;; threadStates[i].docState.allowMinus1Position = true;
} }
/** Set how much RAM we can use before flushing. */ /** Set how much RAM we can use before flushing. */
@ -989,8 +989,8 @@ final class DocumentsWriter {
Entry entry = (Entry) iter.next(); Entry entry = (Entry) iter.next();
Query query = (Query) entry.getKey(); Query query = (Query) entry.getKey();
int limit = ((Integer) entry.getValue()).intValue(); int limit = ((Integer) entry.getValue()).intValue();
Weight weight = query.weight(searcher); QueryWeight weight = query.queryWeight(searcher);
Scorer scorer = weight.scorer(reader); Scorer scorer = weight.scorer(reader, true, false);
while(true) { while(true) {
int doc = scorer.nextDoc(); int doc = scorer.nextDoc();
if (((long) docIDStart) + doc >= limit) if (((long) docIDStart) + doc >= limit)
@ -1144,7 +1144,7 @@ final class DocumentsWriter {
/* Initial chunks size of the shared byte[] blocks used to /* Initial chunks size of the shared byte[] blocks used to
store postings data */ store postings data */
final static int BYTE_BLOCK_SHIFT = 15; final static int BYTE_BLOCK_SHIFT = 15;
final static int BYTE_BLOCK_SIZE = (int) (1 << BYTE_BLOCK_SHIFT); final static int BYTE_BLOCK_SIZE = 1 << BYTE_BLOCK_SHIFT;
final static int BYTE_BLOCK_MASK = BYTE_BLOCK_SIZE - 1; final static int BYTE_BLOCK_MASK = BYTE_BLOCK_SIZE - 1;
final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK; final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK;
@ -1187,7 +1187,7 @@ final class DocumentsWriter {
/* Initial chunks size of the shared int[] blocks used to /* Initial chunks size of the shared int[] blocks used to
store postings data */ store postings data */
final static int INT_BLOCK_SHIFT = 13; final static int INT_BLOCK_SHIFT = 13;
final static int INT_BLOCK_SIZE = (int) (1 << INT_BLOCK_SHIFT); final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1; final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
private ArrayList freeIntBlocks = new ArrayList(); private ArrayList freeIntBlocks = new ArrayList();
@ -1234,7 +1234,7 @@ final class DocumentsWriter {
/* Initial chunk size of the shared char[] blocks used to /* Initial chunk size of the shared char[] blocks used to
store term text */ store term text */
final static int CHAR_BLOCK_SHIFT = 14; final static int CHAR_BLOCK_SHIFT = 14;
final static int CHAR_BLOCK_SIZE = (int) (1 << CHAR_BLOCK_SHIFT); final static int CHAR_BLOCK_SIZE = 1 << CHAR_BLOCK_SHIFT;
final static int CHAR_BLOCK_MASK = CHAR_BLOCK_SIZE - 1; final static int CHAR_BLOCK_MASK = CHAR_BLOCK_SIZE - 1;
final static int MAX_TERM_LENGTH = CHAR_BLOCK_SIZE-1; final static int MAX_TERM_LENGTH = CHAR_BLOCK_SIZE-1;
@ -1283,7 +1283,7 @@ final class DocumentsWriter {
void balanceRAM() { void balanceRAM() {
// We flush when we've used our target usage // We flush when we've used our target usage
final long flushTrigger = (long) ramBufferSize; final long flushTrigger = ramBufferSize;
if (numBytesAlloc > freeTrigger) { if (numBytesAlloc > freeTrigger) {

View File

@ -30,7 +30,6 @@ import java.util.*;
*/ */
public class BooleanQuery extends Query { public class BooleanQuery extends Query {
private static int maxClauseCount = 1024; private static int maxClauseCount = 1024;
/** Thrown when an attempt is made to add more than {@link /** Thrown when an attempt is made to add more than {@link
@ -173,7 +172,7 @@ public class BooleanQuery extends Query {
/** Returns the list of clauses in this query. */ /** Returns the list of clauses in this query. */
public List clauses() { return clauses; } public List clauses() { return clauses; }
private class BooleanWeight implements Weight { private class BooleanWeight extends QueryWeight {
protected Similarity similarity; protected Similarity similarity;
protected ArrayList weights; protected ArrayList weights;
@ -183,7 +182,7 @@ public class BooleanQuery extends Query {
weights = new ArrayList(clauses.size()); weights = new ArrayList(clauses.size());
for (int i = 0 ; i < clauses.size(); i++) { for (int i = 0 ; i < clauses.size(); i++) {
BooleanClause c = (BooleanClause)clauses.get(i); BooleanClause c = (BooleanClause)clauses.get(i);
weights.add(c.getQuery().createWeight(searcher)); weights.add(c.getQuery().createQueryWeight(searcher));
} }
} }
@ -194,7 +193,7 @@ public class BooleanQuery extends Query {
float sum = 0.0f; float sum = 0.0f;
for (int i = 0 ; i < weights.size(); i++) { for (int i = 0 ; i < weights.size(); i++) {
BooleanClause c = (BooleanClause)clauses.get(i); BooleanClause c = (BooleanClause)clauses.get(i);
Weight w = (Weight)weights.get(i); QueryWeight w = (QueryWeight)weights.get(i);
// call sumOfSquaredWeights for all clauses in case of side effects // call sumOfSquaredWeights for all clauses in case of side effects
float s = w.sumOfSquaredWeights(); // sum sub weights float s = w.sumOfSquaredWeights(); // sum sub weights
if (!c.isProhibited()) if (!c.isProhibited())
@ -210,39 +209,13 @@ public class BooleanQuery extends Query {
public void normalize(float norm) { public void normalize(float norm) {
norm *= getBoost(); // incorporate boost norm *= getBoost(); // incorporate boost
for (int i = 0 ; i < weights.size(); i++) { for (Iterator iter = weights.iterator(); iter.hasNext();) {
Weight w = (Weight)weights.get(i); QueryWeight w = (QueryWeight) iter.next();
// normalize all clauses, (even if prohibited in case of side affects) // normalize all clauses, (even if prohibited in case of side affects)
w.normalize(norm); w.normalize(norm);
} }
} }
/**
* @return Returns BooleanScorer2 that uses and provides advance(), and
* scores documents in document number order.
*/
public Scorer scorer(IndexReader reader) throws IOException {
// TODO (3.0): instantiate either BS or BS2, according to
// allowDocsOutOfOrder (basically, try to inline BS2.score(Collector)'s
// logic.
BooleanScorer2 result = new BooleanScorer2(similarity,
minNrShouldMatch,
allowDocsOutOfOrder);
for (int i = 0 ; i < weights.size(); i++) {
BooleanClause c = (BooleanClause)clauses.get(i);
Weight w = (Weight)weights.get(i);
Scorer subScorer = w.scorer(reader);
if (subScorer != null)
result.add(subScorer, c.isRequired(), c.isProhibited());
else if (c.isRequired())
return null;
}
return result;
}
public Explanation explain(IndexReader reader, int doc) public Explanation explain(IndexReader reader, int doc)
throws IOException { throws IOException {
final int minShouldMatch = final int minShouldMatch =
@ -256,7 +229,7 @@ public class BooleanQuery extends Query {
int shouldMatchCount = 0; int shouldMatchCount = 0;
for (int i = 0 ; i < weights.size(); i++) { for (int i = 0 ; i < weights.size(); i++) {
BooleanClause c = (BooleanClause)clauses.get(i); BooleanClause c = (BooleanClause)clauses.get(i);
Weight w = (Weight)weights.get(i); QueryWeight w = (QueryWeight)weights.get(i);
Explanation e = w.explain(reader, doc); Explanation e = w.explain(reader, doc);
if (!c.isProhibited()) maxCoord++; if (!c.isProhibited()) maxCoord++;
if (e.isMatch()) { if (e.isMatch()) {
@ -310,33 +283,90 @@ public class BooleanQuery extends Query {
return result; return result;
} }
} }
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer)
throws IOException {
List required = new ArrayList();
List prohibited = new ArrayList();
List optional = new ArrayList();
for (Iterator wIter = weights.iterator(), cIter = clauses.iterator(); wIter.hasNext();) {
QueryWeight w = (QueryWeight) wIter.next();
BooleanClause c = (BooleanClause) cIter.next();
Scorer subScorer = w.scorer(reader, true, false);
if (subScorer == null) {
return null;
} else if (c.isRequired()) {
required.add(subScorer);
} else if (c.isProhibited()) {
prohibited.add(subScorer);
} else {
optional.add(subScorer);
}
} }
/** Whether hit docs may be collected out of docid order. */ // Check if we can return a BooleanScorer
private static boolean allowDocsOutOfOrder = false; scoreDocsInOrder |= !allowDocsOutOfOrder; // until it is removed, factor in the static setting.
if (!scoreDocsInOrder && topScorer && required.size() == 0 && prohibited.size() < 32) {
return new BooleanScorer(similarity, minNrShouldMatch, optional, prohibited);
}
// Return a BooleanScorer2
return new BooleanScorer2(similarity, minNrShouldMatch, required, prohibited, optional);
}
public boolean scoresDocsOutOfOrder() {
int numProhibited = 0;
for (Iterator cIter = clauses.iterator(); cIter.hasNext();) {
BooleanClause c = (BooleanClause) cIter.next();
if (c.isRequired()) {
return false; // BS2 (in-order) will be used by scorer()
} else if (c.isProhibited()) {
++numProhibited;
}
}
if (numProhibited > 32) { // cannot use BS
return false;
}
// scorer() will return an out-of-order scorer if requested.
return true;
}
}
/** /**
* Expert: Indicates whether hit docs may be collected out of docid * Whether hit docs may be collected out of docid order.
* order. *
* @deprecated this will not be needed anymore, as
* {@link QueryWeight#scoresDocsOutOfOrder()} is used.
*/
private static boolean allowDocsOutOfOrder = true;
/**
* Expert: Indicates whether hit docs may be collected out of docid order.
* *
* <p> * <p>
* Background: although the contract of the Scorer class requires that * Background: although the contract of the Scorer class requires that
* documents be iterated in order of doc id, this was not true in early * documents be iterated in order of doc id, this was not true in early
* versions of Lucene. Many pieces of functionality in the current * versions of Lucene. Many pieces of functionality in the current Lucene code
* Lucene code base have undefined behavior if this contract is not * base have undefined behavior if this contract is not upheld, but in some
* upheld, but in some specific simple cases may be faster. (For * specific simple cases may be faster. (For example: disjunction queries with
* example: disjunction queries with less than 32 prohibited clauses; * less than 32 prohibited clauses; This setting has no effect for other
* This setting has no effect for other queries.) * queries.)
* </p> * </p>
* *
* <p> * <p>
* Specifics: By setting this option to true, docid N might be scored * Specifics: By setting this option to true, docid N might be scored for a
* for a single segment before docid N-1. Across multiple segments, * single segment before docid N-1. Across multiple segments, docs may be
* docs may be scored out of order regardless of this setting - it only * scored out of order regardless of this setting - it only applies to scoring
* applies to scoring a single segment. * a single segment.
* *
* Being static, this setting is system wide. * Being static, this setting is system wide.
* </p> * </p>
*
* @deprecated this is not needed anymore, as
* {@link QueryWeight#scoresDocsOutOfOrder()} is used.
*/ */
public static void setAllowDocsOutOfOrder(boolean allow) { public static void setAllowDocsOutOfOrder(boolean allow) {
allowDocsOutOfOrder = allow; allowDocsOutOfOrder = allow;
@ -344,7 +374,10 @@ public class BooleanQuery extends Query {
/** /**
* Whether hit docs may be collected out of docid order. * Whether hit docs may be collected out of docid order.
*
* @see #setAllowDocsOutOfOrder(boolean) * @see #setAllowDocsOutOfOrder(boolean)
* @deprecated this is not needed anymore, as
* {@link QueryWeight#scoresDocsOutOfOrder()} is used.
*/ */
public static boolean getAllowDocsOutOfOrder() { public static boolean getAllowDocsOutOfOrder() {
return allowDocsOutOfOrder; return allowDocsOutOfOrder;
@ -364,7 +397,7 @@ public class BooleanQuery extends Query {
return getAllowDocsOutOfOrder(); return getAllowDocsOutOfOrder();
} }
protected Weight createWeight(Searcher searcher) throws IOException { public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
return new BooleanWeight(searcher); return new BooleanWeight(searcher);
} }

View File

@ -92,9 +92,15 @@ final class BooleanScorer extends Scorer {
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
// not needed by this implementation // not needed by this implementation
} }
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
// An internal class which is used in score(Collector, int) for setting the // An internal class which is used in score(Collector, int) for setting the

View File

@ -30,9 +30,10 @@ import java.util.List;
* <br>Uses ConjunctionScorer, DisjunctionScorer, ReqOptScorer and ReqExclScorer. * <br>Uses ConjunctionScorer, DisjunctionScorer, ReqOptScorer and ReqExclScorer.
*/ */
class BooleanScorer2 extends Scorer { class BooleanScorer2 extends Scorer {
private ArrayList requiredScorers = new ArrayList();
private ArrayList optionalScorers = new ArrayList(); private final List requiredScorers;
private ArrayList prohibitedScorers = new ArrayList(); private final List optionalScorers;
private final List prohibitedScorers;
private class Coordinator { private class Coordinator {
float[] coordFactors = null; float[] coordFactors = null;
@ -54,86 +55,48 @@ class BooleanScorer2 extends Scorer {
/** The scorer to which all scoring will be delegated, /** The scorer to which all scoring will be delegated,
* except for computing and using the coordination factor. * except for computing and using the coordination factor.
*/ */
private Scorer countingSumScorer = null; private final Scorer countingSumScorer;
/** The number of optionalScorers that need to match (if there are any) */ /** The number of optionalScorers that need to match (if there are any) */
private final int minNrShouldMatch; private final int minNrShouldMatch;
/** Whether it is allowed to return documents out of order.
* This can accelerate the scoring of disjunction queries.
*/
private boolean allowDocsOutOfOrder;
private int doc = -1; private int doc = -1;
/** Create a BooleanScorer2. /**
* @param similarity The similarity to be used. * Creates a {@link Scorer} with the given similarity and lists of required,
* @param minNrShouldMatch The minimum number of optional added scorers * prohibited and optional scorers. In no required scorers are added, at least
* that should match during the search. * one of the optional scorers will have to match during the search.
* In case no required scorers are added, *
* at least one of the optional scorers will have to * @param similarity
* match during the search. * The similarity to be used.
* @param allowDocsOutOfOrder Whether it is allowed to return documents out of order. * @param minNrShouldMatch
* This can accelerate the scoring of disjunction queries. * The minimum number of optional added scorers that should match
* during the search. In case no required scorers are added, at least
* one of the optional scorers will have to match during the search.
* @param required
* the list of required scorers.
* @param prohibited
* the list of prohibited scorers.
* @param optional
* the list of optional scorers.
*/ */
public BooleanScorer2(Similarity similarity, int minNrShouldMatch, boolean allowDocsOutOfOrder) throws IOException { public BooleanScorer2(Similarity similarity, int minNrShouldMatch,
List required, List prohibited, List optional) throws IOException {
super(similarity); super(similarity);
if (minNrShouldMatch < 0) { if (minNrShouldMatch < 0) {
throw new IllegalArgumentException("Minimum number of optional scorers should not be negative"); throw new IllegalArgumentException("Minimum number of optional scorers should not be negative");
} }
coordinator = new Coordinator(); coordinator = new Coordinator();
this.minNrShouldMatch = minNrShouldMatch; this.minNrShouldMatch = minNrShouldMatch;
this.allowDocsOutOfOrder = allowDocsOutOfOrder;
}
/** Create a BooleanScorer2. optionalScorers = optional;
* In no required scorers are added, coordinator.maxCoord += optional.size();
* at least one of the optional scorers will have to match during the search.
* @param similarity The similarity to be used.
* @param minNrShouldMatch The minimum number of optional added scorers
* that should match during the search.
* In case no required scorers are added,
* at least one of the optional scorers will have to
* match during the search.
*/
public BooleanScorer2(Similarity similarity, int minNrShouldMatch) throws IOException {
this(similarity, minNrShouldMatch, false);
}
/** Create a BooleanScorer2. requiredScorers = required;
* In no required scorers are added, coordinator.maxCoord += required.size();
* at least one of the optional scorers will have to match during the search.
* @param similarity The similarity to be used.
*/
public BooleanScorer2(Similarity similarity) throws IOException {
this(similarity, 0, false);
}
public void add(final Scorer scorer, boolean required, boolean prohibited) throws IOException { prohibitedScorers = prohibited;
if (!prohibited) {
coordinator.maxCoord++;
}
if (required) {
if (prohibited) {
throw new IllegalArgumentException("scorer cannot be required and prohibited");
}
requiredScorers.add(scorer);
} else if (prohibited) {
prohibitedScorers.add(scorer);
} else {
optionalScorers.add(scorer);
}
}
/** Initialize the match counting scorer that sums all the
* scores. <p>
* When "counting" is used in a name it means counting the number
* of matching scorers.<br>
* When "sum" is used in a name it means score value summing
* over the matching scorers
*/
private void initCountingSumScorer() throws IOException {
coordinator.init(); coordinator.init();
countingSumScorer = makeCountingSumScorer(); countingSumScorer = makeCountingSumScorer();
} }
@ -333,21 +296,12 @@ class BooleanScorer2 extends Scorer {
* <br>When this method is used the {@link #explain(int)} method should not be used. * <br>When this method is used the {@link #explain(int)} method should not be used.
*/ */
public void score(Collector collector) throws IOException { public void score(Collector collector) throws IOException {
if (allowDocsOutOfOrder && requiredScorers.size() == 0
&& prohibitedScorers.size() < 32) {
new BooleanScorer(getSimilarity(), minNrShouldMatch, optionalScorers,
prohibitedScorers).score(collector);
} else {
if (countingSumScorer == null) {
initCountingSumScorer();
}
collector.setScorer(this); collector.setScorer(this);
int doc; int doc;
while ((doc = countingSumScorer.nextDoc()) != NO_MORE_DOCS) { while ((doc = countingSumScorer.nextDoc()) != NO_MORE_DOCS) {
collector.collect(doc); collector.collect(doc);
} }
} }
}
/** Expert: Collects matching documents in a range. /** Expert: Collects matching documents in a range.
* <br>Note that {@link #next()} must be called once before this method is * <br>Note that {@link #next()} must be called once before this method is
@ -386,9 +340,6 @@ class BooleanScorer2 extends Scorer {
} }
public int nextDoc() throws IOException { public int nextDoc() throws IOException {
if (countingSumScorer == null) {
initCountingSumScorer();
}
return doc = countingSumScorer.nextDoc(); return doc = countingSumScorer.nextDoc();
} }
@ -404,9 +355,6 @@ class BooleanScorer2 extends Scorer {
} }
public int advance(int target) throws IOException { public int advance(int target) throws IOException {
if (countingSumScorer == null) {
initCountingSumScorer();
}
return doc = countingSumScorer.advance(target); return doc = countingSumScorer.advance(target);
} }

View File

@ -121,6 +121,8 @@ import org.apache.lucene.index.IndexReader;
* *
* <p><b>NOTE:</b> This API is experimental and might change * <p><b>NOTE:</b> This API is experimental and might change
* in incompatible ways in the next release.</p> * in incompatible ways in the next release.</p>
*
* @since 2.9
*/ */
public abstract class Collector { public abstract class Collector {
@ -157,4 +159,16 @@ public abstract class Collector {
*/ */
public abstract void setNextReader(IndexReader reader, int docBase) throws IOException; public abstract void setNextReader(IndexReader reader, int docBase) throws IOException;
/**
* Returns true iff this {@link Collector} can accept documents given to
* {@link #collect(int)} out of order.
* <p>
* NOTE: some collectors can work in either mode, with a more efficient
* implementation for in-order docs collection. If your collector can work in
* either mode, it is recommended that you create two variants of it, since
* some queries work much faster if out-of-order collection is supported by a
* {@link Collector}.
*/
public abstract boolean acceptsDocsOutOfOrder();
} }

View File

@ -50,7 +50,7 @@ public class ConstantScoreQuery extends Query {
// but may not be OK for highlighting // but may not be OK for highlighting
} }
protected class ConstantWeight implements Weight { protected class ConstantWeight extends QueryWeight {
private Similarity similarity; private Similarity similarity;
private float queryNorm; private float queryNorm;
private float queryWeight; private float queryWeight;
@ -77,13 +77,13 @@ public class ConstantScoreQuery extends Query {
queryWeight *= this.queryNorm; queryWeight *= this.queryNorm;
} }
public Scorer scorer(IndexReader reader) throws IOException { public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new ConstantScorer(similarity, reader, this); return new ConstantScorer(similarity, reader, this);
} }
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(IndexReader reader, int doc) throws IOException {
ConstantScorer cs = (ConstantScorer)scorer(reader); ConstantScorer cs = (ConstantScorer) scorer(reader, true, false);
boolean exists = cs.docIdSetIterator.advance(doc) == doc; boolean exists = cs.docIdSetIterator.advance(doc) == doc;
ComplexExplanation result = new ComplexExplanation(); ComplexExplanation result = new ComplexExplanation();
@ -110,7 +110,7 @@ public class ConstantScoreQuery extends Query {
final float theScore; final float theScore;
int doc = -1; int doc = -1;
public ConstantScorer(Similarity similarity, IndexReader reader, Weight w) throws IOException { public ConstantScorer(Similarity similarity, IndexReader reader, QueryWeight w) throws IOException {
super(similarity); super(similarity);
theScore = w.getValue(); theScore = w.getValue();
docIdSetIterator = filter.getDocIdSet(reader).iterator(); docIdSetIterator = filter.getDocIdSet(reader).iterator();
@ -152,7 +152,7 @@ public class ConstantScoreQuery extends Query {
} }
} }
protected Weight createWeight(Searcher searcher) { public QueryWeight createQueryWeight(Searcher searcher) {
return new ConstantScoreQuery.ConstantWeight(searcher); return new ConstantScoreQuery.ConstantWeight(searcher);
} }

View File

@ -86,7 +86,7 @@ public class DisjunctionMaxQuery extends Query {
} }
/* The Weight for DisjunctionMaxQuery's, used to normalize, score and explain these queries */ /* The Weight for DisjunctionMaxQuery's, used to normalize, score and explain these queries */
private class DisjunctionMaxWeight implements Weight { private class DisjunctionMaxWeight extends QueryWeight {
private Similarity similarity; // The similarity which we are associated. private Similarity similarity; // The similarity which we are associated.
private ArrayList weights = new ArrayList(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts private ArrayList weights = new ArrayList(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts
@ -94,8 +94,9 @@ public class DisjunctionMaxQuery extends Query {
/* Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */ /* Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */
public DisjunctionMaxWeight(Searcher searcher) throws IOException { public DisjunctionMaxWeight(Searcher searcher) throws IOException {
this.similarity = searcher.getSimilarity(); this.similarity = searcher.getSimilarity();
for (int i = 0; i < disjuncts.size(); i++) for (Iterator iter = disjuncts.iterator(); iter.hasNext();) {
weights.add(((Query) disjuncts.get(i)).createWeight(searcher)); weights.add(((Query) iter.next()).createQueryWeight(searcher));
}
} }
/* Return our associated DisjunctionMaxQuery */ /* Return our associated DisjunctionMaxQuery */
@ -107,28 +108,32 @@ public class DisjunctionMaxQuery extends Query {
/* Compute the sub of squared weights of us applied to our subqueries. Used for normalization. */ /* Compute the sub of squared weights of us applied to our subqueries. Used for normalization. */
public float sumOfSquaredWeights() throws IOException { public float sumOfSquaredWeights() throws IOException {
float max = 0.0f, sum = 0.0f; float max = 0.0f, sum = 0.0f;
for (int i = 0; i < weights.size(); i++) { for (Iterator iter = weights.iterator(); iter.hasNext();) {
float sub = ((Weight) weights.get(i)).sumOfSquaredWeights(); float sub = ((QueryWeight) iter.next()).sumOfSquaredWeights();
sum += sub; sum += sub;
max = Math.max(max, sub); max = Math.max(max, sub);
} }
return (((sum - max) * tieBreakerMultiplier * tieBreakerMultiplier) + max) * getBoost() * getBoost(); float boost = getBoost();
return (((sum - max) * tieBreakerMultiplier * tieBreakerMultiplier) + max) * boost * boost;
} }
/* Apply the computed normalization factor to our subqueries */ /* Apply the computed normalization factor to our subqueries */
public void normalize(float norm) { public void normalize(float norm) {
norm *= getBoost(); // Incorporate our boost norm *= getBoost(); // Incorporate our boost
for (int i = 0 ; i < weights.size(); i++) for (Iterator iter = weights.iterator(); iter.hasNext();) {
((Weight) weights.get(i)).normalize(norm); ((QueryWeight) iter.next()).normalize(norm);
}
} }
/* Create the scorer used to score our associated DisjunctionMaxQuery */ /* Create the scorer used to score our associated DisjunctionMaxQuery */
public Scorer scorer(IndexReader reader) throws IOException { public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder,
boolean topScorer) throws IOException {
Scorer[] scorers = new Scorer[weights.size()]; Scorer[] scorers = new Scorer[weights.size()];
int idx = 0; int idx = 0;
for (Iterator iter = weights.iterator(); iter.hasNext();) { for (Iterator iter = weights.iterator(); iter.hasNext();) {
Weight w = (Weight) iter.next(); QueryWeight w = (QueryWeight) iter.next();
Scorer subScorer = w.scorer(reader); Scorer subScorer = w.scorer(reader, true, false);
if (subScorer == null) { if (subScorer == null) {
return null; return null;
} else if (subScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { } else if (subScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
@ -142,12 +147,12 @@ public class DisjunctionMaxQuery extends Query {
/* Explain the score we computed for doc */ /* Explain the score we computed for doc */
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(IndexReader reader, int doc) throws IOException {
if ( disjuncts.size() == 1) return ((Weight) weights.get(0)).explain(reader,doc); if (disjuncts.size() == 1) return ((QueryWeight) weights.get(0)).explain(reader,doc);
ComplexExplanation result = new ComplexExplanation(); ComplexExplanation result = new ComplexExplanation();
float max = 0.0f, sum = 0.0f; float max = 0.0f, sum = 0.0f;
result.setDescription(tieBreakerMultiplier == 0.0f ? "max of:" : "max plus " + tieBreakerMultiplier + " times others of:"); result.setDescription(tieBreakerMultiplier == 0.0f ? "max of:" : "max plus " + tieBreakerMultiplier + " times others of:");
for (int i = 0 ; i < weights.size(); i++) { for (Iterator iter = weights.iterator(); iter.hasNext();) {
Explanation e = ((Weight) weights.get(i)).explain(reader, doc); Explanation e = ((QueryWeight) iter.next()).explain(reader, doc);
if (e.isMatch()) { if (e.isMatch()) {
result.setMatch(Boolean.TRUE); result.setMatch(Boolean.TRUE);
result.addDetail(e); result.addDetail(e);
@ -155,14 +160,14 @@ public class DisjunctionMaxQuery extends Query {
max = Math.max(max, e.getValue()); max = Math.max(max, e.getValue());
} }
} }
result.setValue(max + (sum - max)*tieBreakerMultiplier); result.setValue(max + (sum - max) * tieBreakerMultiplier);
return result; return result;
} }
} // end of DisjunctionMaxWeight inner class } // end of DisjunctionMaxWeight inner class
/* Create the Weight used to score us */ /* Create the QueryWeight used to score us */
protected Weight createWeight(Searcher searcher) throws IOException { public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
return new DisjunctionMaxWeight(searcher); return new DisjunctionMaxWeight(searcher);
} }
@ -170,7 +175,8 @@ public class DisjunctionMaxQuery extends Query {
* @param reader the IndexReader we query * @param reader the IndexReader we query
* @return an optimized copy of us (which may not be a copy if there is nothing to optimize) */ * @return an optimized copy of us (which may not be a copy if there is nothing to optimize) */
public Query rewrite(IndexReader reader) throws IOException { public Query rewrite(IndexReader reader) throws IOException {
if (disjuncts.size() == 1) { int numDisjunctions = disjuncts.size();
if (numDisjunctions == 1) {
Query singleton = (Query) disjuncts.get(0); Query singleton = (Query) disjuncts.get(0);
Query result = singleton.rewrite(reader); Query result = singleton.rewrite(reader);
if (getBoost() != 1.0f) { if (getBoost() != 1.0f) {
@ -180,7 +186,7 @@ public class DisjunctionMaxQuery extends Query {
return result; return result;
} }
DisjunctionMaxQuery clone = null; DisjunctionMaxQuery clone = null;
for (int i = 0 ; i < disjuncts.size(); i++) { for (int i = 0 ; i < numDisjunctions; i++) {
Query clause = (Query) disjuncts.get(i); Query clause = (Query) disjuncts.get(i);
Query rewrite = clause.rewrite(reader); Query rewrite = clause.rewrite(reader);
if (rewrite != clause) { if (rewrite != clause) {
@ -200,15 +206,13 @@ public class DisjunctionMaxQuery extends Query {
return clone; return clone;
} }
// inherit javadoc // inherit javadoc
public void extractTerms(Set terms) { public void extractTerms(Set terms) {
for (int i = 0; i < disjuncts.size(); i++) { for (Iterator iter = disjuncts.iterator(); iter.hasNext();) {
((Query)disjuncts.get(i)).extractTerms(terms); ((Query) iter.next()).extractTerms(terms);
} }
} }
/** Prettyprint us. /** Prettyprint us.
* @param field the field to which we are applied * @param field the field to which we are applied
* @return a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost" * @return a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost"
@ -216,7 +220,8 @@ public class DisjunctionMaxQuery extends Query {
public String toString(String field) { public String toString(String field) {
StringBuffer buffer = new StringBuffer(); StringBuffer buffer = new StringBuffer();
buffer.append("("); buffer.append("(");
for (int i = 0 ; i < disjuncts.size(); i++) { int numDisjunctions = disjuncts.size();
for (int i = 0 ; i < numDisjunctions; i++) {
Query subquery = (Query) disjuncts.get(i); Query subquery = (Query) disjuncts.get(i);
if (subquery instanceof BooleanQuery) { // wrap sub-bools in parens if (subquery instanceof BooleanQuery) { // wrap sub-bools in parens
buffer.append("("); buffer.append("(");
@ -224,7 +229,7 @@ public class DisjunctionMaxQuery extends Query {
buffer.append(")"); buffer.append(")");
} }
else buffer.append(subquery.toString(field)); else buffer.append(subquery.toString(field));
if (i != disjuncts.size()-1) buffer.append(" | "); if (i != numDisjunctions-1) buffer.append(" | ");
} }
buffer.append(")"); buffer.append(")");
if (tieBreakerMultiplier != 0.0f) { if (tieBreakerMultiplier != 0.0f) {

View File

@ -22,8 +22,8 @@ import org.apache.lucene.index.*;
final class ExactPhraseScorer extends PhraseScorer { final class ExactPhraseScorer extends PhraseScorer {
ExactPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, ExactPhraseScorer(QueryWeight weight, TermPositions[] tps, int[] offsets,
byte[] norms) { Similarity similarity, byte[] norms) {
super(weight, tps, offsets, similarity, norms); super(weight, tps, offsets, similarity, norms);
} }
@ -43,13 +43,13 @@ final class ExactPhraseScorer extends PhraseScorer {
while (first.position < last.position) { // scan forward in first while (first.position < last.position) { // scan forward in first
do { do {
if (!first.nextPosition()) if (!first.nextPosition())
return (float)freq; return freq;
} while (first.position < last.position); } while (first.position < last.position);
firstToLast(); firstToLast();
} }
freq++; // all equal: a match freq++; // all equal: a match
} while (last.nextPosition()); } while (last.nextPosition());
return (float)freq; return freq;
} }
} }

View File

@ -54,16 +54,14 @@ extends Query {
this.filter = filter; this.filter = filter;
} }
/** /**
* Returns a Weight that applies the filter to the enclosed query's Weight. * Returns a Weight that applies the filter to the enclosed query's Weight.
* This is accomplished by overriding the Scorer returned by the Weight. * This is accomplished by overriding the Scorer returned by the Weight.
*/ */
protected Weight createWeight (final Searcher searcher) throws IOException { public QueryWeight createQueryWeight(final Searcher searcher) throws IOException {
final Weight weight = query.createWeight (searcher); final QueryWeight weight = query.createQueryWeight (searcher);
final Similarity similarity = query.getSimilarity(searcher); final Similarity similarity = query.getSimilarity(searcher);
return new Weight() { return new QueryWeight() {
private float value; private float value;
// pass these methods through to enclosed query's weight // pass these methods through to enclosed query's weight
@ -99,8 +97,9 @@ extends Query {
public Query getQuery() { return FilteredQuery.this; } public Query getQuery() { return FilteredQuery.this; }
// return a filtering scorer // return a filtering scorer
public Scorer scorer (IndexReader indexReader) throws IOException { public Scorer scorer(IndexReader indexReader, boolean scoreDocsInOrder, boolean topScorer)
final Scorer scorer = weight.scorer(indexReader); throws IOException {
final Scorer scorer = weight.scorer(indexReader, scoreDocsInOrder, false);
final DocIdSetIterator docIdSetIterator = filter.getDocIdSet(indexReader).iterator(); final DocIdSetIterator docIdSetIterator = filter.getDocIdSet(indexReader).iterator();
return new Scorer(similarity) { return new Scorer(similarity) {

View File

@ -25,7 +25,9 @@ import org.apache.lucene.index.IndexReader;
* Wrapper for ({@link HitCollector}) implementations, which * Wrapper for ({@link HitCollector}) implementations, which
* simply re-bases the incoming docID before calling {@link * simply re-bases the incoming docID before calling {@link
* HitCollector#collect}. * HitCollector#collect}.
* @deprecated this class will be removed when {@link HitCollector} is removed. * @deprecated this class will be removed when {@link
* HitCollector} is removed. Please migrate custom
* HitCollectors to the new {@link Collector} class.
*/ */
public class HitCollectorWrapper extends Collector { public class HitCollectorWrapper extends Collector {
private HitCollector collector; private HitCollector collector;
@ -47,4 +49,9 @@ public class HitCollectorWrapper extends Collector {
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
} }
public boolean acceptsDocsOutOfOrder() {
return false;
}
} }

View File

@ -19,8 +19,8 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import java.util.ConcurrentModificationException; import java.util.ConcurrentModificationException;
import java.util.Vector;
import java.util.Iterator; import java.util.Iterator;
import java.util.Vector;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
@ -53,7 +53,7 @@ import org.apache.lucene.index.CorruptIndexException;
* </pre> * </pre>
*/ */
public final class Hits { public final class Hits {
private Weight weight; private QueryWeight weight;
private Searcher searcher; private Searcher searcher;
private Filter filter = null; private Filter filter = null;
private Sort sort = null; private Sort sort = null;
@ -73,7 +73,7 @@ public final class Hits {
boolean debugCheckedForDeletions = false; // for test purposes. boolean debugCheckedForDeletions = false; // for test purposes.
Hits(Searcher s, Query q, Filter f) throws IOException { Hits(Searcher s, Query q, Filter f) throws IOException {
weight = q.weight(s); weight = q.queryWeight(s);
searcher = s; searcher = s;
filter = f; filter = f;
nDeletions = countDeletions(s); nDeletions = countDeletions(s);
@ -82,7 +82,7 @@ public final class Hits {
} }
Hits(Searcher s, Query q, Filter f, Sort o) throws IOException { Hits(Searcher s, Query q, Filter f, Sort o) throws IOException {
weight = q.weight(s); weight = q.queryWeight(s);
searcher = s; searcher = s;
filter = f; filter = f;
sort = o; sort = o;

View File

@ -161,38 +161,33 @@ public class IndexSearcher extends Searcher {
} }
// inherit javadoc // inherit javadoc
public TopDocs search(Weight weight, Filter filter, final int nDocs) public TopDocs search(QueryWeight weight, Filter filter, final int nDocs) throws IOException {
throws IOException {
if (nDocs <= 0) // null might be returned from hq.top() below. if (nDocs <= 0) {
throw new IllegalArgumentException("nDocs must be > 0"); throw new IllegalArgumentException("nDocs must be > 0");
}
// TODO: The following should be changed to first obtain a Scorer and then ask it TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.scoresDocsOutOfOrder());
// if it's going to return in-order or out-of-order docs, and create TSDC
// accordingly.
TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, false);
search(weight, filter, collector); search(weight, filter, collector);
return collector.topDocs(); return collector.topDocs();
} }
// inherit javadoc public TopFieldDocs search(QueryWeight weight, Filter filter,
public TopFieldDocs search(Weight weight, Filter filter, final int nDocs, final int nDocs, Sort sort) throws IOException {
Sort sort)
throws IOException {
return search(weight, filter, nDocs, sort, true); return search(weight, filter, nDocs, sort, true);
} }
/** /**
* Just like {@link #search(Weight, Filter, int, Sort)}, but you choose * Just like {@link #search(QueryWeight, Filter, int, Sort)}, but you choose
* whether or not the fields in the returned {@link FieldDoc} instances should * whether or not the fields in the returned {@link FieldDoc} instances should
* be set by specifying fillFields.<br> * be set by specifying fillFields.<br>
* <b>NOTE:</b> currently, this method tracks document scores and sets them in * <b>NOTE:</b> currently, this method tracks document scores and sets them in
* the returned {@link FieldDoc}, however in 3.0 it will move to not track * the returned {@link FieldDoc}, however in 3.0 it will move to not track
* document scores. If document scores tracking is still needed, you can use * document scores. If document scores tracking is still needed, you can use
* {@link #search(Weight, Filter, Collector)} and pass in a * {@link #search(QueryWeight, Filter, Collector)} and pass in a
* {@link TopFieldCollector} instance. * {@link TopFieldCollector} instance.
*/ */
public TopFieldDocs search(Weight weight, Filter filter, final int nDocs, public TopFieldDocs search(QueryWeight weight, Filter filter, final int nDocs,
Sort sort, boolean fillFields) Sort sort, boolean fillFields)
throws IOException { throws IOException {
@ -222,51 +217,51 @@ public class IndexSearcher extends Searcher {
TopDocCollector collector = new TopFieldDocCollector(reader, sort, nDocs); TopDocCollector collector = new TopFieldDocCollector(reader, sort, nDocs);
HitCollectorWrapper hcw = new HitCollectorWrapper(collector); HitCollectorWrapper hcw = new HitCollectorWrapper(collector);
hcw.setNextReader(reader, 0); hcw.setNextReader(reader, 0);
doSearch(reader, weight, filter, hcw); if (filter == null) {
Scorer scorer = weight.scorer(reader, true, true);
scorer.score(hcw);
} else {
searchWithFilter(reader, weight, filter, hcw);
}
return (TopFieldDocs) collector.topDocs(); return (TopFieldDocs) collector.topDocs();
} }
// Search each sub-reader
// TODO: The following should be changed to first obtain a Scorer and then ask it
// if it's going to return in-order or out-of-order docs, and create TSDC
// accordingly.
TopFieldCollector collector = TopFieldCollector.create(sort, nDocs, TopFieldCollector collector = TopFieldCollector.create(sort, nDocs,
fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, false); fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.scoresDocsOutOfOrder());
search(weight, filter, collector); search(weight, filter, collector);
return (TopFieldDocs) collector.topDocs(); return (TopFieldDocs) collector.topDocs();
} }
// inherit javadoc public void search(QueryWeight weight, Filter filter, Collector collector)
/** @deprecated use {@link #search(Weight, Filter, Collector)} instead. */
public void search(Weight weight, Filter filter, HitCollector results)
throws IOException {
search(weight, filter, new HitCollectorWrapper(results));
}
// inherit javadoc
public void search(Weight weight, Filter filter, Collector collector)
throws IOException { throws IOException {
if (filter == null) {
for (int i = 0; i < subReaders.length; i++) { // search each subreader for (int i = 0; i < subReaders.length; i++) { // search each subreader
collector.setNextReader(subReaders[i], docStarts[i]); collector.setNextReader(subReaders[i], docStarts[i]);
doSearch(subReaders[i], weight, filter, collector); Scorer scorer = weight.scorer(subReaders[i], !collector.acceptsDocsOutOfOrder(), true);
scorer.score(collector);
}
} else {
for (int i = 0; i < subReaders.length; i++) { // search each subreader
collector.setNextReader(subReaders[i], docStarts[i]);
searchWithFilter(subReaders[i], weight, filter, collector);
}
} }
} }
private void doSearch(IndexReader reader, Weight weight, Filter filter, private void searchWithFilter(IndexReader reader, QueryWeight weight,
final Collector collector) throws IOException { final Filter filter, final Collector collector) throws IOException {
Scorer scorer = weight.scorer(reader); assert filter != null;
if (scorer == null)
Scorer scorer = weight.scorer(reader, true, false);
if (scorer == null) {
return; return;
}
int docID = scorer.docID(); int docID = scorer.docID();
assert docID == -1 || docID == DocIdSetIterator.NO_MORE_DOCS; assert docID == -1 || docID == DocIdSetIterator.NO_MORE_DOCS;
if (filter == null) {
scorer.score(collector);
return;
}
// CHECKME: use ConjunctionScorer here? // CHECKME: use ConjunctionScorer here?
DocIdSetIterator filterIter = filter.getDocIdSet(reader).iterator(); DocIdSetIterator filterIter = filter.getDocIdSet(reader).iterator();
@ -300,7 +295,7 @@ public class IndexSearcher extends Searcher {
return query; return query;
} }
public Explanation explain(Weight weight, int doc) throws IOException { public Explanation explain(QueryWeight weight, int doc) throws IOException {
return weight.explain(reader, doc); return weight.explain(reader, doc);
} }

View File

@ -49,7 +49,7 @@ public class MatchAllDocsQuery extends Query {
final byte[] norms; final byte[] norms;
private int doc = -1; private int doc = -1;
MatchAllScorer(IndexReader reader, Similarity similarity, Weight w, MatchAllScorer(IndexReader reader, Similarity similarity, QueryWeight w,
byte[] norms) throws IOException { byte[] norms) throws IOException {
super(similarity); super(similarity);
this.termDocs = reader.termDocs(null); this.termDocs = reader.termDocs(null);
@ -93,7 +93,7 @@ public class MatchAllDocsQuery extends Query {
} }
} }
private class MatchAllDocsWeight implements Weight { private class MatchAllDocsWeight extends QueryWeight {
private Similarity similarity; private Similarity similarity;
private float queryWeight; private float queryWeight;
private float queryNorm; private float queryNorm;
@ -124,7 +124,7 @@ public class MatchAllDocsQuery extends Query {
queryWeight *= this.queryNorm; queryWeight *= this.queryNorm;
} }
public Scorer scorer(IndexReader reader) throws IOException { public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new MatchAllScorer(reader, similarity, this, return new MatchAllScorer(reader, similarity, this,
normsField != null ? reader.norms(normsField) : null); normsField != null ? reader.norms(normsField) : null);
} }
@ -142,7 +142,7 @@ public class MatchAllDocsQuery extends Query {
} }
} }
protected Weight createWeight(Searcher searcher) { public QueryWeight createQueryWeight(Searcher searcher) {
return new MatchAllDocsWeight(searcher); return new MatchAllDocsWeight(searcher);
} }

View File

@ -123,7 +123,7 @@ public class MultiPhraseQuery extends Query {
} }
private class MultiPhraseWeight implements Weight { private class MultiPhraseWeight extends QueryWeight {
private Similarity similarity; private Similarity similarity;
private float value; private float value;
private float idf; private float idf;
@ -158,7 +158,7 @@ public class MultiPhraseQuery extends Query {
value = queryWeight * idf; // idf for document value = queryWeight * idf; // idf for document
} }
public Scorer scorer(IndexReader reader) throws IOException { public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
if (termArrays.size() == 0) // optimize zero-term case if (termArrays.size() == 0) // optimize zero-term case
return null; return null;
@ -217,7 +217,7 @@ public class MultiPhraseQuery extends Query {
fieldExpl.setDescription("fieldWeight("+getQuery()+" in "+doc+ fieldExpl.setDescription("fieldWeight("+getQuery()+" in "+doc+
"), product of:"); "), product of:");
Explanation tfExpl = scorer(reader).explain(doc); Explanation tfExpl = scorer(reader, true, false).explain(doc);
fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(tfExpl);
fieldExpl.addDetail(idfExpl); fieldExpl.addDetail(idfExpl);
@ -261,7 +261,7 @@ public class MultiPhraseQuery extends Query {
} }
} }
protected Weight createWeight(Searcher searcher) throws IOException { public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
return new MultiPhraseWeight(searcher); return new MultiPhraseWeight(searcher);
} }

View File

@ -35,10 +35,11 @@ import java.util.Set;
* or {@link #search(Query,Filter)} methods. * or {@link #search(Query,Filter)} methods.
*/ */
public class MultiSearcher extends Searcher { public class MultiSearcher extends Searcher {
/** /**
* Document Frequency cache acting as a Dummy-Searcher. * Document Frequency cache acting as a Dummy-Searcher. This class is no
* This class is no full-fledged Searcher, but only supports * full-fledged Searcher, but only supports the methods necessary to
* the methods necessary to initialize Weights. * initialize Weights.
*/ */
private static class CachedDfSource extends Searcher { private static class CachedDfSource extends Searcher {
private Map dfMap; // Map from Terms to corresponding doc freqs private Map dfMap; // Map from Terms to corresponding doc freqs
@ -93,34 +94,28 @@ public class MultiSearcher extends Searcher {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
public Explanation explain(Weight weight,int doc) { public Explanation explain(QueryWeight weight,int doc) {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
/** @deprecated use {@link #search(Weight, Filter, Collector)} instead. */ public void search(QueryWeight weight, Filter filter, Collector results) {
public void search(Weight weight, Filter filter, HitCollector results) {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
public void search(Weight weight, Filter filter, Collector collector) { public TopDocs search(QueryWeight weight,Filter filter,int n) {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
public TopDocs search(Weight weight,Filter filter,int n) { public TopFieldDocs search(QueryWeight weight,Filter filter,int n,Sort sort) {
throw new UnsupportedOperationException();
}
public TopFieldDocs search(Weight weight,Filter filter,int n,Sort sort) {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
} }
private Searchable[] searchables; private Searchable[] searchables;
private int[] starts; private int[] starts;
private int maxDoc = 0; private int maxDoc = 0;
/** Creates a searcher which searches <i>searchables</i>. */ /** Creates a searcher which searches <i>searchers</i>. */
public MultiSearcher(Searchable[] searchables) throws IOException { public MultiSearcher(Searchable[] searchables) throws IOException {
this.searchables = searchables; this.searchables = searchables;
@ -200,7 +195,7 @@ public class MultiSearcher extends Searcher {
return maxDoc; return maxDoc;
} }
public TopDocs search(Weight weight, Filter filter, int nDocs) public TopDocs search(QueryWeight weight, Filter filter, int nDocs)
throws IOException { throws IOException {
HitQueue hq = new HitQueue(nDocs, false); HitQueue hq = new HitQueue(nDocs, false);
@ -227,7 +222,7 @@ public class MultiSearcher extends Searcher {
return new TopDocs(totalHits, scoreDocs, maxScore); return new TopDocs(totalHits, scoreDocs, maxScore);
} }
public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort) public TopFieldDocs search (QueryWeight weight, Filter filter, int n, Sort sort)
throws IOException { throws IOException {
FieldDocSortedHitQueue hq = null; FieldDocSortedHitQueue hq = null;
int totalHits = 0; int totalHits = 0;
@ -269,14 +264,7 @@ public class MultiSearcher extends Searcher {
} }
// inherit javadoc // inherit javadoc
/** @deprecated use {@link #search(Weight, Filter, Collector)} instead. */ public void search(QueryWeight weight, Filter filter, final Collector collector)
public void search(Weight weight, Filter filter, final HitCollector results)
throws IOException {
search(weight, filter, new HitCollectorWrapper(results));
}
// inherit javadoc
public void search(Weight weight, Filter filter, final Collector collector)
throws IOException { throws IOException {
for (int i = 0; i < searchables.length; i++) { for (int i = 0; i < searchables.length; i++) {
@ -292,6 +280,9 @@ public class MultiSearcher extends Searcher {
public void setNextReader(IndexReader reader, int docBase) throws IOException { public void setNextReader(IndexReader reader, int docBase) throws IOException {
collector.setNextReader(reader, start + docBase); collector.setNextReader(reader, start + docBase);
} }
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
}; };
searchables[i].search(weight, filter, hc); searchables[i].search(weight, filter, hc);
@ -306,9 +297,9 @@ public class MultiSearcher extends Searcher {
return queries[0].combine(queries); return queries[0].combine(queries);
} }
public Explanation explain(Weight weight, int doc) throws IOException { public Explanation explain(QueryWeight weight, int doc) throws IOException {
int i = subSearcher(doc); // find searcher index int i = subSearcher(doc); // find searcher index
return searchables[i].explain(weight,doc-starts[i]); // dispatch to searcher return searchables[i].explain(weight, doc - starts[i]); // dispatch to searcher
} }
/** /**
@ -326,7 +317,7 @@ public class MultiSearcher extends Searcher {
* *
* @return rewritten queries * @return rewritten queries
*/ */
protected Weight createWeight(Query original) throws IOException { protected QueryWeight createQueryWeight(Query original) throws IOException {
// step 1 // step 1
Query rewrittenQuery = rewrite(original); Query rewrittenQuery = rewrite(original);
@ -354,7 +345,7 @@ public class MultiSearcher extends Searcher {
int numDocs = maxDoc(); int numDocs = maxDoc();
CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, getSimilarity()); CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, getSimilarity());
return rewrittenQuery.weight(cacheSim); return rewrittenQuery.queryWeight(cacheSim);
} }
} }

View File

@ -33,11 +33,11 @@ public class ParallelMultiSearcher extends MultiSearcher {
private Searchable[] searchables; private Searchable[] searchables;
private int[] starts; private int[] starts;
/** Creates a searcher which searches <i>searchables</i>. */ /** Creates a searchable which searches <i>searchables</i>. */
public ParallelMultiSearcher(Searchable[] searchables) throws IOException { public ParallelMultiSearcher(Searchable[] searchables) throws IOException {
super(searchables); super(searchables);
this.searchables=searchables; this.searchables = searchables;
this.starts=getStarts(); this.starts = getStarts();
} }
/** /**
@ -52,24 +52,16 @@ public class ParallelMultiSearcher extends MultiSearcher {
* Searchable, waits for each search to complete and merge * Searchable, waits for each search to complete and merge
* the results back together. * the results back together.
*/ */
public TopDocs search(Weight weight, Filter filter, int nDocs) public TopDocs search(QueryWeight weight, Filter filter, int nDocs)
throws IOException { throws IOException {
HitQueue hq = new HitQueue(nDocs, false); HitQueue hq = new HitQueue(nDocs, false);
int totalHits = 0; int totalHits = 0;
MultiSearcherThread[] msta = MultiSearcherThread[] msta =
new MultiSearcherThread[searchables.length]; new MultiSearcherThread[searchables.length];
for (int i = 0; i < searchables.length; i++) { // search each searcher for (int i = 0; i < searchables.length; i++) { // search each searchable
// Assume not too many searchables and cost of creating a thread is by far inferior to a search // Assume not too many searchables and cost of creating a thread is by far inferior to a search
msta[i] = msta[i] = new MultiSearcherThread(searchables[i], weight, filter, nDocs,
new MultiSearcherThread( hq, i, starts, "MultiSearcher thread #" + (i + 1));
searchables[i],
weight,
filter,
nDocs,
hq,
i,
starts,
"MultiSearcher thread #" + (i + 1));
msta[i].start(); msta[i].start();
} }
@ -105,25 +97,16 @@ public class ParallelMultiSearcher extends MultiSearcher {
* Searchable, waits for each search to complete and merges * Searchable, waits for each search to complete and merges
* the results back together. * the results back together.
*/ */
public TopFieldDocs search(Weight weight, Filter filter, int nDocs, Sort sort) public TopFieldDocs search(QueryWeight weight, Filter filter, int nDocs, Sort sort)
throws IOException { throws IOException {
// don't specify the fields - we'll wait to do this until we get results // don't specify the fields - we'll wait to do this until we get results
FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue (null, nDocs); FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue (null, nDocs);
int totalHits = 0; int totalHits = 0;
MultiSearcherThread[] msta = new MultiSearcherThread[searchables.length]; MultiSearcherThread[] msta = new MultiSearcherThread[searchables.length];
for (int i = 0; i < searchables.length; i++) { // search each searcher for (int i = 0; i < searchables.length; i++) { // search each searchable
// Assume not too many searchables and cost of creating a thread is by far inferior to a search // Assume not too many searchables and cost of creating a thread is by far inferior to a search
msta[i] = msta[i] = new MultiSearcherThread(searchables[i], weight, filter, nDocs,
new MultiSearcherThread( hq, sort, i, starts, "MultiSearcher thread #" + (i + 1));
searchables[i],
weight,
filter,
nDocs,
hq,
sort,
i,
starts,
"MultiSearcher thread #" + (i + 1));
msta[i].start(); msta[i].start();
} }
@ -155,28 +138,6 @@ public class ParallelMultiSearcher extends MultiSearcher {
return new TopFieldDocs(totalHits, scoreDocs, hq.getFields(), maxScore); return new TopFieldDocs(totalHits, scoreDocs, hq.getFields(), maxScore);
} }
/** Lower-level search API.
*
* <p>{@link HitCollector#collect(int,float)} is called for every matching
* document.
*
* <p>Applications should only use this if they need <i>all</i> of the
* matching documents. The high-level search API ({@link
* Searcher#search(Query)}) is usually more efficient, as it skips
* non-high-scoring hits.
*
* @param weight to match documents
* @param filter if non-null, a bitset used to eliminate some documents
* @param results to receive hits
*
* @todo parallelize this one too
* @deprecated use {@link #search(Weight, Filter, Collector)} instead.
*/
public void search(Weight weight, Filter filter, final HitCollector results)
throws IOException {
search(weight, filter, new HitCollectorWrapper(results));
}
/** Lower-level search API. /** Lower-level search API.
* *
* <p>{@link Collector#collect(int)} is called for every matching document. * <p>{@link Collector#collect(int)} is called for every matching document.
@ -192,7 +153,7 @@ public class ParallelMultiSearcher extends MultiSearcher {
* *
* @todo parallelize this one too * @todo parallelize this one too
*/ */
public void search(Weight weight, Filter filter, final Collector collector) public void search(QueryWeight weight, Filter filter, final Collector collector)
throws IOException { throws IOException {
for (int i = 0; i < searchables.length; i++) { for (int i = 0; i < searchables.length; i++) {
@ -205,10 +166,12 @@ public class ParallelMultiSearcher extends MultiSearcher {
public void collect(int doc) throws IOException { public void collect(int doc) throws IOException {
collector.collect(doc); collector.collect(doc);
} }
public void setNextReader(IndexReader reader, int docBase) throws IOException { public void setNextReader(IndexReader reader, int docBase) throws IOException {
collector.setNextReader(reader, start + docBase); collector.setNextReader(reader, start + docBase);
} }
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
}; };
searchables[i].search(weight, filter, hc); searchables[i].search(weight, filter, hc);
@ -231,7 +194,7 @@ public class ParallelMultiSearcher extends MultiSearcher {
class MultiSearcherThread extends Thread { class MultiSearcherThread extends Thread {
private Searchable searchable; private Searchable searchable;
private Weight weight; private QueryWeight weight;
private Filter filter; private Filter filter;
private int nDocs; private int nDocs;
private TopDocs docs; private TopDocs docs;
@ -241,15 +204,8 @@ class MultiSearcherThread extends Thread {
private IOException ioe; private IOException ioe;
private Sort sort; private Sort sort;
public MultiSearcherThread( public MultiSearcherThread(Searchable searchable, QueryWeight weight, Filter filter,
Searchable searchable, int nDocs, HitQueue hq, int i, int[] starts, String name) {
Weight weight,
Filter filter,
int nDocs,
HitQueue hq,
int i,
int[] starts,
String name) {
super(name); super(name);
this.searchable = searchable; this.searchable = searchable;
this.weight = weight; this.weight = weight;
@ -260,16 +216,9 @@ class MultiSearcherThread extends Thread {
this.starts = starts; this.starts = starts;
} }
public MultiSearcherThread( public MultiSearcherThread(Searchable searchable, QueryWeight weight,
Searchable searchable, Filter filter, int nDocs, FieldDocSortedHitQueue hq, Sort sort, int i,
Weight weight, int[] starts, String name) {
Filter filter,
int nDocs,
FieldDocSortedHitQueue hq,
Sort sort,
int i,
int[] starts,
String name) {
super(name); super(name);
this.searchable = searchable; this.searchable = searchable;
this.weight = weight; this.weight = weight;
@ -298,7 +247,7 @@ class MultiSearcherThread extends Thread {
TopFieldDocs docsFields = (TopFieldDocs) docs; TopFieldDocs docsFields = (TopFieldDocs) docs;
// If one of the Sort fields is FIELD_DOC, need to fix its values, so that // If one of the Sort fields is FIELD_DOC, need to fix its values, so that
// it will break ties by doc Id properly. Otherwise, it will compare to // it will break ties by doc Id properly. Otherwise, it will compare to
// 'relative' doc Ids, that belong to two different searchers. // 'relative' doc Ids, that belong to two different searchables.
for (int j = 0; j < docsFields.fields.length; j++) { for (int j = 0; j < docsFields.fields.length; j++) {
if (docsFields.fields[j].getType() == SortField.DOC) { if (docsFields.fields[j].getType() == SortField.DOC) {
// iterate over the score docs and change their fields value // iterate over the score docs and change their fields value

View File

@ -106,7 +106,7 @@ public class PhraseQuery extends Query {
return result; return result;
} }
private class PhraseWeight implements Weight { private class PhraseWeight extends QueryWeight {
private Similarity similarity; private Similarity similarity;
private float value; private float value;
private float idf; private float idf;
@ -136,7 +136,7 @@ public class PhraseQuery extends Query {
value = queryWeight * idf; // idf for document value = queryWeight * idf; // idf for document
} }
public Scorer scorer(IndexReader reader) throws IOException { public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
if (terms.size() == 0) // optimize zero-term case if (terms.size() == 0) // optimize zero-term case
return null; return null;
@ -209,7 +209,7 @@ public class PhraseQuery extends Query {
fieldExpl.setDescription("fieldWeight("+field+":"+query+" in "+doc+ fieldExpl.setDescription("fieldWeight("+field+":"+query+" in "+doc+
"), product of:"); "), product of:");
Explanation tfExpl = scorer(reader).explain(doc); Explanation tfExpl = scorer(reader, true, false).explain(doc);
fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(tfExpl);
fieldExpl.addDetail(idfExpl); fieldExpl.addDetail(idfExpl);
@ -237,12 +237,12 @@ public class PhraseQuery extends Query {
} }
} }
protected Weight createWeight(Searcher searcher) throws IOException { public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
if (terms.size() == 1) { // optimize one-term case if (terms.size() == 1) { // optimize one-term case
Term term = (Term)terms.get(0); Term term = (Term)terms.get(0);
Query termQuery = new TermQuery(term); Query termQuery = new TermQuery(term);
termQuery.setBoost(getBoost()); termQuery.setBoost(getBoost());
return termQuery.createWeight(searcher); return termQuery.createQueryWeight(searcher);
} }
return new PhraseWeight(searcher); return new PhraseWeight(searcher);
} }

View File

@ -19,7 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.*; import org.apache.lucene.index.TermPositions;
/** Expert: Scoring functionality for phrase queries. /** Expert: Scoring functionality for phrase queries.
* <br>A document is considered matching if it contains the phrase-query terms * <br>A document is considered matching if it contains the phrase-query terms
@ -32,7 +32,7 @@ import org.apache.lucene.index.*;
* means a match. * means a match.
*/ */
abstract class PhraseScorer extends Scorer { abstract class PhraseScorer extends Scorer {
private Weight weight; private QueryWeight weight;
protected byte[] norms; protected byte[] norms;
protected float value; protected float value;
@ -43,7 +43,7 @@ abstract class PhraseScorer extends Scorer {
private float freq; //prhase frequency in current doc as computed by phraseFreq(). private float freq; //prhase frequency in current doc as computed by phraseFreq().
PhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, PhraseScorer(QueryWeight weight, TermPositions[] tps, int[] offsets,
Similarity similarity, byte[] norms) { Similarity similarity, byte[] norms) {
super(similarity); super(similarity);
this.norms = norms; this.norms = norms;

View File

@ -26,7 +26,6 @@ import org.apache.lucene.index.IndexReader;
* {@link Collector} and makes sure only documents with * {@link Collector} and makes sure only documents with
* scores &gt; 0 are collected. * scores &gt; 0 are collected.
*/ */
public class PositiveScoresOnlyCollector extends Collector { public class PositiveScoresOnlyCollector extends Collector {
final private Collector c; final private Collector c;
@ -53,4 +52,8 @@ public class PositiveScoresOnlyCollector extends Collector {
c.setScorer(this.scorer); c.setScorer(this.scorer);
} }
public boolean acceptsDocsOutOfOrder() {
return c.acceptsDocsOutOfOrder();
}
} }

View File

@ -80,25 +80,51 @@ public abstract class Query implements java.io.Serializable, Cloneable {
return toString(""); return toString("");
} }
/** Expert: Constructs an appropriate Weight implementation for this query. /**
* Expert: Constructs an appropriate Weight implementation for this query.
* *
* <p>Only implemented by primitive queries, which re-write to themselves. * <p>
* Only implemented by primitive queries, which re-write to themselves.
* @deprecated use {@link #createQueryWeight(Searcher)} instead.
*/ */
protected Weight createWeight(Searcher searcher) throws IOException { protected Weight createWeight(Searcher searcher) throws IOException {
return createQueryWeight(searcher);
}
/**
* Expert: Constructs an appropriate {@link QueryWeight} implementation for
* this query.
*
* <p>
* Only implemented by primitive queries, which re-write to themselves.
*/
public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
/** Expert: Constructs and initializes a Weight for a top-level query. */ /**
public Weight weight(Searcher searcher) * Expert: Constructs and initializes a Weight for a top-level query.
throws IOException { *
* @deprecated use {@link #queryWeight(Searcher)} instead.
*/
public Weight weight(Searcher searcher) throws IOException {
return queryWeight(searcher);
}
/**
* Expert: Constructs and initializes a {@link QueryWeight} for a top-level
* query.
*/
public QueryWeight queryWeight(Searcher searcher) throws IOException {
Query query = searcher.rewrite(this); Query query = searcher.rewrite(this);
Weight weight = query.createWeight(searcher); QueryWeight weight = query.createQueryWeight(searcher);
float sum = weight.sumOfSquaredWeights(); float sum = weight.sumOfSquaredWeights();
float norm = getSimilarity(searcher).queryNorm(sum); float norm = getSimilarity(searcher).queryNorm(sum);
weight.normalize(norm); weight.normalize(norm);
return weight; return weight;
} }
/** Expert: called to re-write queries into primitive queries. For example, /** Expert: called to re-write queries into primitive queries. For example,
* a PrefixQuery will be rewritten into a BooleanQuery that consists * a PrefixQuery will be rewritten into a BooleanQuery that consists
* of TermQuerys. * of TermQuerys.
@ -107,6 +133,7 @@ public abstract class Query implements java.io.Serializable, Cloneable {
return this; return this;
} }
/** Expert: called when re-writing queries under MultiSearcher. /** Expert: called when re-writing queries under MultiSearcher.
* *
* Create a single query suitable for use by all subsearchers (in 1-1 * Create a single query suitable for use by all subsearchers (in 1-1
@ -152,6 +179,7 @@ public abstract class Query implements java.io.Serializable, Cloneable {
return result; return result;
} }
/** /**
* Expert: adds all terms occuring in this query to the terms set. Only * Expert: adds all terms occuring in this query to the terms set. Only
* works if this query is in its {@link #rewrite rewritten} form. * works if this query is in its {@link #rewrite rewritten} form.
@ -164,6 +192,7 @@ public abstract class Query implements java.io.Serializable, Cloneable {
} }
/** Expert: merges the clauses of a set of BooleanQuery's into a single /** Expert: merges the clauses of a set of BooleanQuery's into a single
* BooleanQuery. * BooleanQuery.
* *
@ -188,6 +217,7 @@ public abstract class Query implements java.io.Serializable, Cloneable {
return result; return result;
} }
/** Expert: Returns the Similarity implementation to be used for this query. /** Expert: Returns the Similarity implementation to be used for this query.
* Subclasses may override this method to specify their own Similarity * Subclasses may override this method to specify their own Similarity
* implementation, perhaps one that delegates through that of the Searcher. * implementation, perhaps one that delegates through that of the Searcher.
@ -199,7 +229,7 @@ public abstract class Query implements java.io.Serializable, Cloneable {
/** Returns a clone of this query. */ /** Returns a clone of this query. */
public Object clone() { public Object clone() {
try { try {
return (Query)super.clone(); return super.clone();
} catch (CloneNotSupportedException e) { } catch (CloneNotSupportedException e) {
throw new RuntimeException("Clone not supported: " + e.getMessage()); throw new RuntimeException("Clone not supported: " + e.getMessage());
} }

View File

@ -0,0 +1,119 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Serializable;
import org.apache.lucene.index.IndexReader;
/**
* Expert: Calculate query weights and build query scorers.
* <p>
* The purpose of {@link QueryWeight} is to ensure searching does not
* modify a {@link Query}, so that a {@link Query} instance can be reused. <br>
* {@link Searcher} dependent state of the query should reside in the
* {@link QueryWeight}. <br>
* {@link IndexReader} dependent state should reside in the {@link Scorer}.
* <p>
* A <code>QueryWeight</code> is used in the following way:
* <ol>
* <li>A <code>QueryWeight</code> is constructed by a top-level query, given a
* <code>Searcher</code> ({@link Query#createWeight(Searcher)}).
* <li>The {@link #sumOfSquaredWeights()} method is called on the
* <code>QueryWeight</code> to compute the query normalization factor
* {@link Similarity#queryNorm(float)} of the query clauses contained in the
* query.
* <li>The query normalization factor is passed to {@link #normalize(float)}. At
* this point the weighting is complete.
* <li>A <code>Scorer</code> is constructed by {@link #scorer(IndexReader)}.
* </ol>
*
* @since 2.9
*/
public abstract class QueryWeight implements Weight, Serializable {
/** An explanation of the score computation for the named document. */
public abstract Explanation explain(IndexReader reader, int doc) throws IOException;
/** The query that this concerns. */
public abstract Query getQuery();
/** The weight for this query. */
public abstract float getValue();
/** Assigns the query normalization factor to this. */
public abstract void normalize(float norm);
/**
* @deprecated use {@link #scorer(IndexReader, boolean, boolean)} instead.
* Currently this defaults to asking a scorer in out-of-order
* mode, but will be removed in 3.0.
*/
public Scorer scorer(IndexReader reader) throws IOException {
return scorer(reader, true, false);
}
/**
* Returns a {@link Scorer} which scores documents in/out-of order according
* to <code>scoreDocsInOrder</code>.
* <p>
* <b>NOTE:</b> even if <code>scoreDocsInOrder</code> is false, it is
* recommended to check whether the returned <code>Scorer</code> indeed scores
* documents out of order (i.e., call {@link #scoresDocsOutOfOrder()}), as some
* <code>Scorer</code> implementations will always return documents in-order.
*
* @param reader
* the {@link IndexReader} for which to return the {@link Scorer}.
* @param scoreDocsInOrder
* specifies whether in-order scoring of documents is required. Note
* that if set to false (i.e., out-of-order scoring is required),
* this method can return whatever scoring mode it supports, as every
* in-order scorer is also an out-of-order one. However, an
* out-of-order scorer may not support {@link Scorer#nextDoc()}
* and/or {@link Scorer#advance(int)}, therfore it is recommended to
* request an in-order scorer if use of these methods is required.
* @param topScorer
* specifies whether the returned {@link Scorer} will be used as a
* top scorer or as in iterator. I.e., if true,
* {@link Scorer#score(Collector)} will be called; if false,
* {@link Scorer#nextDoc()} and/or {@link Scorer#advance(int)} will
* be called.
* @return a {@link Scorer} which scores documents in/out-of order.
* @throws IOException
*/
public abstract Scorer scorer(IndexReader reader, boolean scoreDocsInOrder,
boolean topScorer) throws IOException;
/** The sum of squared weights of contained query clauses. */
public abstract float sumOfSquaredWeights() throws IOException;
/**
* Returns true iff this implementation scores docs only out of order. This
* method is used in conjunction with {@link Collector}'s
* {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
* {@link #scorer(org.apache.lucene.index.IndexReader, boolean, boolean)} to
* create a matching {@link Scorer} instance for a given {@link Collector}, or
* vice versa.
* <p>
* <b>NOTE:</b> the default implementation returns <code>false</code>, i.e.
* the <code>Scorer</code> scores documents in-order.
*/
public boolean scoresDocsOutOfOrder() { return false; }
}

View File

@ -0,0 +1,68 @@
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
/**
* A wrapper class for the deprecated {@link Weight}.
* Please re-implement any custom Weight classes as {@link
* QueryWeight} instead.
*
* @deprecated will be removed in 3.0
*/
public class QueryWeightWrapper extends QueryWeight {
private Weight weight;
public QueryWeightWrapper(Weight weight) {
this.weight = weight;
}
public Explanation explain(IndexReader reader, int doc) throws IOException {
return weight.explain(reader, doc);
}
public Query getQuery() {
return weight.getQuery();
}
public float getValue() {
return weight.getValue();
}
public void normalize(float norm) {
weight.normalize(norm);
}
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer)
throws IOException {
return weight.scorer(reader);
}
public float sumOfSquaredWeights() throws IOException {
return weight.sumOfSquaredWeights();
}
public Scorer scorer(IndexReader reader) throws IOException {
return weight.scorer(reader);
}
}

View File

@ -61,15 +61,18 @@ public class QueryWrapperFilter extends Filter {
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
return bits; return bits;
} }
public DocIdSet getDocIdSet(final IndexReader reader) throws IOException { public DocIdSet getDocIdSet(final IndexReader reader) throws IOException {
final Weight weight = query.weight(new IndexSearcher(reader)); final QueryWeight weight = query.queryWeight(new IndexSearcher(reader));
return new DocIdSet() { return new DocIdSet() {
public DocIdSetIterator iterator() throws IOException { public DocIdSetIterator iterator() throws IOException {
return weight.scorer(reader); return weight.scorer(reader, true, false);
} }
}; };
} }

View File

@ -17,25 +17,32 @@ package org.apache.lucene.search;
* limitations under the License. * limitations under the License.
*/ */
import java.io.IOException;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.IndexReader; // for javadoc
import org.apache.lucene.index.Term;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.Term;
import java.io.IOException; // for javadoc /**
* The interface for search implementations.
/** The interface for search implementations.
* *
* <p>Searchable is the abstract network protocol for searching. * <p>
* Implementations provide search over a single index, over multiple * Searchable is the abstract network protocol for searching. Implementations
* indices, and over indices on remote servers. * provide search over a single index, over multiple indices, and over indices
* on remote servers.
* *
* <p>Queries, filters and sort criteria are designed to be compact so that * <p>
* they may be efficiently passed to a remote index, with only the top-scoring * Queries, filters and sort criteria are designed to be compact so that they
* hits being returned, rather than every matching hit. * may be efficiently passed to a remote index, with only the top-scoring hits
* being returned, rather than every matching hit.
*
* <b>NOTE:</b> this interface is kept public for convenience. Since it is not
* expected to be implemented directly, it may be changed unexpectedly between
* releases.
*/ */
public interface Searchable { public interface Searchable {
/** Lower-level search API. /** Lower-level search API.
* *
* <p>{@link HitCollector#collect(int,float)} is called for every non-zero * <p>{@link HitCollector#collect(int,float)} is called for every non-zero
@ -51,7 +58,7 @@ public interface Searchable {
* @param filter if non-null, used to permit documents to be collected. * @param filter if non-null, used to permit documents to be collected.
* @param results to receive hits * @param results to receive hits
* @throws BooleanQuery.TooManyClauses * @throws BooleanQuery.TooManyClauses
* @deprecated use {@link #search(Weight, Filter, Collector)} instead. * @deprecated use {@link #search(QueryWeight, Filter, Collector)} instead.
*/ */
void search(Weight weight, Filter filter, HitCollector results) void search(Weight weight, Filter filter, HitCollector results)
throws IOException; throws IOException;
@ -75,9 +82,33 @@ public interface Searchable {
* @param collector * @param collector
* to receive hits * to receive hits
* @throws BooleanQuery.TooManyClauses * @throws BooleanQuery.TooManyClauses
*
* @deprecated use {@link #search(QueryWeight, Filter, Collector)} instead.
*/ */
void search(Weight weight, Filter filter, Collector collector) throws IOException; void search(Weight weight, Filter filter, Collector collector) throws IOException;
/**
* Lower-level search API.
*
* <p>
* {@link Collector#collect(int)} is called for every document. <br>
* Collector-based access to remote indexes is discouraged.
*
* <p>
* Applications should only use this if they need <i>all</i> of the matching
* documents. The high-level search API ({@link Searcher#search(Query)}) is
* usually more efficient, as it skips non-high-scoring hits.
*
* @param weight
* to match documents
* @param filter
* if non-null, used to permit documents to be collected.
* @param collector
* to receive hits
* @throws BooleanQuery.TooManyClauses
*/
void search(QueryWeight weight, Filter filter, Collector collector) throws IOException;
/** Frees resources associated with this Searcher. /** Frees resources associated with this Searcher.
* Be careful not to call this method while you are still using objects * Be careful not to call this method while you are still using objects
* like {@link Hits}. * like {@link Hits}.
@ -86,7 +117,7 @@ public interface Searchable {
/** Expert: Returns the number of documents containing <code>term</code>. /** Expert: Returns the number of documents containing <code>term</code>.
* Called by search code to compute term weights. * Called by search code to compute term weights.
* @see IndexReader#docFreq(Term) * @see org.apache.lucene.index.IndexReader#docFreq(Term)
*/ */
int docFreq(Term term) throws IOException; int docFreq(Term term) throws IOException;
@ -98,7 +129,7 @@ public interface Searchable {
/** Expert: Returns one greater than the largest possible document number. /** Expert: Returns one greater than the largest possible document number.
* Called by search code to compute term weights. * Called by search code to compute term weights.
* @see IndexReader#maxDoc() * @see org.apache.lucene.index.IndexReader#maxDoc()
*/ */
int maxDoc() throws IOException; int maxDoc() throws IOException;
@ -110,12 +141,24 @@ public interface Searchable {
* <p>Applications should usually call {@link Searcher#search(Query)} or * <p>Applications should usually call {@link Searcher#search(Query)} or
* {@link Searcher#search(Query,Filter)} instead. * {@link Searcher#search(Query,Filter)} instead.
* @throws BooleanQuery.TooManyClauses * @throws BooleanQuery.TooManyClauses
* @deprecated use {@link #search(QueryWeight, Filter, int)} instead.
*/ */
TopDocs search(Weight weight, Filter filter, int n) throws IOException; TopDocs search(Weight weight, Filter filter, int n) throws IOException;
/** Expert: Low-level search implementation. Finds the top <code>n</code>
* hits for <code>query</code>, applying <code>filter</code> if non-null.
*
* <p>Called by {@link Hits}.
*
* <p>Applications should usually call {@link Searcher#search(Query)} or
* {@link Searcher#search(Query,Filter)} instead.
* @throws BooleanQuery.TooManyClauses
*/
TopDocs search(QueryWeight weight, Filter filter, int n) throws IOException;
/** Expert: Returns the stored fields of document <code>i</code>. /** Expert: Returns the stored fields of document <code>i</code>.
* Called by {@link HitCollector} implementations. * Called by {@link HitCollector} implementations.
* @see IndexReader#document(int) * @see org.apache.lucene.index.IndexReader#document(int)
* @throws CorruptIndexException if the index is corrupt * @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error * @throws IOException if there is a low-level IO error
*/ */
@ -136,7 +179,7 @@ public interface Searchable {
* @throws CorruptIndexException if the index is corrupt * @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error * @throws IOException if there is a low-level IO error
* *
* @see IndexReader#document(int, FieldSelector) * @see org.apache.lucene.index.IndexReader#document(int, FieldSelector)
* @see org.apache.lucene.document.Fieldable * @see org.apache.lucene.document.Fieldable
* @see org.apache.lucene.document.FieldSelector * @see org.apache.lucene.document.FieldSelector
* @see org.apache.lucene.document.SetBasedFieldSelector * @see org.apache.lucene.document.SetBasedFieldSelector
@ -159,10 +202,23 @@ public interface Searchable {
* entire index. * entire index.
* <p>Applications should call {@link Searcher#explain(Query, int)}. * <p>Applications should call {@link Searcher#explain(Query, int)}.
* @throws BooleanQuery.TooManyClauses * @throws BooleanQuery.TooManyClauses
* @deprecated use {@link #explain(QueryWeight, int)} instead.
*/ */
Explanation explain(Weight weight, int doc) throws IOException; Explanation explain(Weight weight, int doc) throws IOException;
// TODO: change the javadoc in 3.0 to remove the last NOTE section. /** Expert: low-level implementation method
* Returns an Explanation that describes how <code>doc</code> scored against
* <code>weight</code>.
*
* <p>This is intended to be used in developing Similarity implementations,
* and, for good performance, should not be displayed with every hit.
* Computing an explanation is as expensive as executing the query over the
* entire index.
* <p>Applications should call {@link Searcher#explain(Query, int)}.
* @throws BooleanQuery.TooManyClauses
*/
Explanation explain(QueryWeight weight, int doc) throws IOException;
/** Expert: Low-level search implementation with arbitrary sorting. Finds /** Expert: Low-level search implementation with arbitrary sorting. Finds
* the top <code>n</code> hits for <code>query</code>, applying * the top <code>n</code> hits for <code>query</code>, applying
* <code>filter</code> if non-null, and sorting the hits by the criteria in * <code>filter</code> if non-null, and sorting the hits by the criteria in
@ -171,15 +227,23 @@ public interface Searchable {
* <p>Applications should usually call {@link * <p>Applications should usually call {@link
* Searcher#search(Query,Filter,Sort)} instead. * Searcher#search(Query,Filter,Sort)} instead.
* *
* <b>NOTE:</b> currently, this method tracks document scores and sets them in
* the returned {@link FieldDoc}, however in 3.0 it will move to not track
* document scores. If document scores tracking is still needed, you can use
* {@link #search(Weight, Filter, Collector)} and pass in a
* {@link TopFieldCollector} instance.
*
* @throws BooleanQuery.TooManyClauses * @throws BooleanQuery.TooManyClauses
* @deprecated use {@link #search(QueryWeight, Filter, int, Sort)} instead.
*/ */
TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
throws IOException; throws IOException;
/** Expert: Low-level search implementation with arbitrary sorting. Finds
* the top <code>n</code> hits for <code>query</code>, applying
* <code>filter</code> if non-null, and sorting the hits by the criteria in
* <code>sort</code>.
*
* <p>Applications should usually call {@link
* Searcher#search(Query,Filter,Sort)} instead.
*
* @throws BooleanQuery.TooManyClauses
*/
TopFieldDocs search(QueryWeight weight, Filter filter, int n, Sort sort)
throws IOException;
} }

View File

@ -19,15 +19,17 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.document.Document;
/** An abstract base class for search implementations. /**
* Implements the main search methods. * An abstract base class for search implementations. Implements the main search
* methods.
* *
* <p>Note that you can only access Hits from a Searcher as long as it is * <p>
* not yet closed, otherwise an IOException will be thrown. * Note that you can only access hits from a Searcher as long as it is not yet
* closed, otherwise an IOException will be thrown.
*/ */
public abstract class Searcher implements Searchable { public abstract class Searcher implements Searchable {
@ -87,7 +89,7 @@ public abstract class Searcher implements Searchable {
*/ */
public TopFieldDocs search(Query query, Filter filter, int n, public TopFieldDocs search(Query query, Filter filter, int n,
Sort sort) throws IOException { Sort sort) throws IOException {
return search(createWeight(query), filter, n, sort); return search(createQueryWeight(query), filter, n, sort);
} }
/** Lower-level search API. /** Lower-level search API.
@ -107,7 +109,7 @@ public abstract class Searcher implements Searchable {
*/ */
public void search(Query query, HitCollector results) public void search(Query query, HitCollector results)
throws IOException { throws IOException {
search(query, (Filter)null, results); search(createQueryWeight(query), null, new HitCollectorWrapper(results));
} }
/** Lower-level search API. /** Lower-level search API.
@ -125,7 +127,7 @@ public abstract class Searcher implements Searchable {
*/ */
public void search(Query query, Collector results) public void search(Query query, Collector results)
throws IOException { throws IOException {
search(query, (Filter)null, results); search(createQueryWeight(query), null, results);
} }
/** Lower-level search API. /** Lower-level search API.
@ -147,7 +149,7 @@ public abstract class Searcher implements Searchable {
*/ */
public void search(Query query, Filter filter, HitCollector results) public void search(Query query, Filter filter, HitCollector results)
throws IOException { throws IOException {
search(createWeight(query), filter, results); search(createQueryWeight(query), filter, new HitCollectorWrapper(results));
} }
/** Lower-level search API. /** Lower-level search API.
@ -168,7 +170,7 @@ public abstract class Searcher implements Searchable {
*/ */
public void search(Query query, Filter filter, Collector results) public void search(Query query, Filter filter, Collector results)
throws IOException { throws IOException {
search(createWeight(query), filter, results); search(createQueryWeight(query), filter, results);
} }
/** Finds the top <code>n</code> /** Finds the top <code>n</code>
@ -178,7 +180,7 @@ public abstract class Searcher implements Searchable {
*/ */
public TopDocs search(Query query, Filter filter, int n) public TopDocs search(Query query, Filter filter, int n)
throws IOException { throws IOException {
return search(createWeight(query), filter, n); return search(createQueryWeight(query), filter, n);
} }
/** Finds the top <code>n</code> /** Finds the top <code>n</code>
@ -200,7 +202,7 @@ public abstract class Searcher implements Searchable {
* entire index. * entire index.
*/ */
public Explanation explain(Query query, int doc) throws IOException { public Explanation explain(Query query, int doc) throws IOException {
return explain(createWeight(query), doc); return explain(createQueryWeight(query), doc);
} }
/** The Similarity implementation used by this searcher. */ /** The Similarity implementation used by this searcher. */
@ -224,10 +226,15 @@ public abstract class Searcher implements Searchable {
/** /**
* creates a weight for <code>query</code> * creates a weight for <code>query</code>
* @return new weight *
* @deprecated use {@link #createQueryWeight(Query)} instead.
*/ */
protected Weight createWeight(Query query) throws IOException { protected Weight createWeight(Query query) throws IOException {
return query.weight(this); return createQueryWeight(query);
}
protected QueryWeight createQueryWeight(Query query) throws IOException {
return query.queryWeight(this);
} }
// inherit javadoc // inherit javadoc
@ -245,15 +252,34 @@ public abstract class Searcher implements Searchable {
/** /**
* @deprecated use {@link #search(Weight, Filter, Collector)} instead. * @deprecated use {@link #search(Weight, Filter, Collector)} instead.
*/ */
abstract public void search(Weight weight, Filter filter, HitCollector results) throws IOException; public void search(Weight weight, Filter filter, HitCollector results) throws IOException {
abstract public void search(Weight weight, Filter filter, Collector results) throws IOException; search(new QueryWeightWrapper(weight), filter, new HitCollectorWrapper(results));
}
/** @deprecated delete in 3.0. */
public void search(Weight weight, Filter filter, Collector collector)
throws IOException {
search(new QueryWeightWrapper(weight), filter, collector);
}
abstract public void search(QueryWeight weight, Filter filter, Collector results) throws IOException;
abstract public void close() throws IOException; abstract public void close() throws IOException;
abstract public int docFreq(Term term) throws IOException; abstract public int docFreq(Term term) throws IOException;
abstract public int maxDoc() throws IOException; abstract public int maxDoc() throws IOException;
abstract public TopDocs search(Weight weight, Filter filter, int n) throws IOException; /** @deprecated use {@link #search(QueryWeight, Filter, int)} instead. */
public TopDocs search(Weight weight, Filter filter, int n) throws IOException {
return search(new QueryWeightWrapper(weight), filter, n);
}
abstract public TopDocs search(QueryWeight weight, Filter filter, int n) throws IOException;
abstract public Document doc(int i) throws CorruptIndexException, IOException; abstract public Document doc(int i) throws CorruptIndexException, IOException;
abstract public Query rewrite(Query query) throws IOException; abstract public Query rewrite(Query query) throws IOException;
abstract public Explanation explain(Weight weight, int doc) throws IOException; /** @deprecated use {@link #explain(QueryWeight, int)} instead. */
abstract public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException; public Explanation explain(Weight weight, int doc) throws IOException {
return explain(new QueryWeightWrapper(weight), doc);
}
abstract public Explanation explain(QueryWeight weight, int doc) throws IOException;
/** @deprecated use {@link #search(QueryWeight, Filter, int, Sort)} instead. */
public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException {
return search(new QueryWeightWrapper(weight), filter, n, sort);
}
abstract public TopFieldDocs search(QueryWeight weight, Filter filter, int n, Sort sort) throws IOException;
/* End patch for GCJ bug #15411. */ /* End patch for GCJ bug #15411. */
} }

View File

@ -28,7 +28,7 @@ final class SloppyPhraseScorer extends PhraseScorer {
private PhrasePositions tmpPos[]; // for flipping repeating pps. private PhrasePositions tmpPos[]; // for flipping repeating pps.
private boolean checkedRepeats; private boolean checkedRepeats;
SloppyPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, SloppyPhraseScorer(QueryWeight weight, TermPositions[] tps, int[] offsets, Similarity similarity,
int slop, byte[] norms) { int slop, byte[] norms) {
super(weight, tps, offsets, similarity, norms); super(weight, tps, offsets, similarity, norms);
this.slop = slop; this.slop = slop;

View File

@ -31,7 +31,7 @@ import org.apache.lucene.util.ToStringUtils;
public class TermQuery extends Query { public class TermQuery extends Query {
private Term term; private Term term;
private class TermWeight implements Weight { private class TermWeight extends QueryWeight {
private Similarity similarity; private Similarity similarity;
private float value; private float value;
private float idf; private float idf;
@ -60,14 +60,13 @@ public class TermQuery extends Query {
value = queryWeight * idf; // idf for document value = queryWeight * idf; // idf for document
} }
public Scorer scorer(IndexReader reader) throws IOException { public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
TermDocs termDocs = reader.termDocs(term); TermDocs termDocs = reader.termDocs(term);
if (termDocs == null) if (termDocs == null)
return null; return null;
return new TermScorer(this, termDocs, similarity, return new TermScorer(this, termDocs, similarity, reader.norms(term.field()));
reader.norms(term.field()));
} }
public Explanation explain(IndexReader reader, int doc) public Explanation explain(IndexReader reader, int doc)
@ -104,7 +103,7 @@ public class TermQuery extends Query {
fieldExpl.setDescription("fieldWeight("+term+" in "+doc+ fieldExpl.setDescription("fieldWeight("+term+" in "+doc+
"), product of:"); "), product of:");
Explanation tfExpl = scorer(reader).explain(doc); Explanation tfExpl = scorer(reader, true, false).explain(doc);
fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(tfExpl);
fieldExpl.addDetail(idfExpl); fieldExpl.addDetail(idfExpl);
@ -142,7 +141,7 @@ public class TermQuery extends Query {
/** Returns the term of this query. */ /** Returns the term of this query. */
public Term getTerm() { return term; } public Term getTerm() { return term; }
protected Weight createWeight(Searcher searcher) throws IOException { public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
return new TermWeight(searcher); return new TermWeight(searcher);
} }

View File

@ -27,7 +27,7 @@ final class TermScorer extends Scorer {
private static final float[] SIM_NORM_DECODER = Similarity.getNormDecoder(); private static final float[] SIM_NORM_DECODER = Similarity.getNormDecoder();
private Weight weight; private QueryWeight weight;
private TermDocs termDocs; private TermDocs termDocs;
private byte[] norms; private byte[] norms;
private float weightValue; private float weightValue;
@ -41,13 +41,41 @@ final class TermScorer extends Scorer {
private static final int SCORE_CACHE_SIZE = 32; private static final int SCORE_CACHE_SIZE = 32;
private float[] scoreCache = new float[SCORE_CACHE_SIZE]; private float[] scoreCache = new float[SCORE_CACHE_SIZE];
/** Construct a <code>TermScorer</code>. /**
* @param weight The weight of the <code>Term</code> in the query. * Construct a <code>TermScorer</code>.
* @param td An iterator over the documents matching the <code>Term</code>. *
* @param similarity The </code>Similarity</code> implementation to be used for score computations. * @param weight
* @param norms The field norms of the document fields for the <code>Term</code>. * The weight of the <code>Term</code> in the query.
* @param td
* An iterator over the documents matching the <code>Term</code>.
* @param similarity
* The </code>Similarity</code> implementation to be used for score
* computations.
* @param norms
* The field norms of the document fields for the <code>Term</code>.
*
* @deprecated use delete in 3.0, kept around for TestTermScorer in tag which
* creates TermScorer directly, and cannot pass in a QueryWeight
* object.
*/ */
TermScorer(Weight weight, TermDocs td, Similarity similarity, TermScorer(Weight weight, TermDocs td, Similarity similarity, byte[] norms) {
this(new QueryWeightWrapper(weight), td, similarity, norms);
}
/**
* Construct a <code>TermScorer</code>.
*
* @param weight
* The weight of the <code>Term</code> in the query.
* @param td
* An iterator over the documents matching the <code>Term</code>.
* @param similarity
* The </code>Similarity</code> implementation to be used for score
* computations.
* @param norms
* The field norms of the document fields for the <code>Term</code>.
*/
TermScorer(QueryWeight weight, TermDocs td, Similarity similarity,
byte[] norms) { byte[] norms) {
super(similarity); super(similarity);
this.weight = weight; this.weight = weight;
@ -194,7 +222,7 @@ final class TermScorer extends Scorer {
* @param doc The document number for the explanation. * @param doc The document number for the explanation.
*/ */
public Explanation explain(int doc) throws IOException { public Explanation explain(int doc) throws IOException {
TermQuery query = (TermQuery)weight.getQuery(); TermQuery query = (TermQuery) weight.getQuery();
Explanation tfExplanation = new Explanation(); Explanation tfExplanation = new Explanation();
int tf = 0; int tf = 0;
while (pointer < pointerMax) { while (pointer < pointerMax) {

View File

@ -216,4 +216,8 @@ public class TimeLimitingCollector extends Collector {
collector.setScorer(scorer); collector.setScorer(scorer);
} }
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
} }

View File

@ -135,6 +135,11 @@ public abstract class TopFieldCollector extends TopDocsCollector {
} }
} }
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
/* /*
@ -240,6 +245,11 @@ public abstract class TopFieldCollector extends TopDocsCollector {
} }
} }
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
/* /*
@ -341,8 +351,12 @@ public abstract class TopFieldCollector extends TopDocsCollector {
comparator.setBottom(bottom.slot); comparator.setBottom(bottom.slot);
} }
} }
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
/* /*
@ -489,6 +503,11 @@ public abstract class TopFieldCollector extends TopDocsCollector {
} }
} }
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
/* /*
@ -632,6 +651,11 @@ public abstract class TopFieldCollector extends TopDocsCollector {
} }
} }
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
/* /*
@ -781,6 +805,11 @@ public abstract class TopFieldCollector extends TopDocsCollector {
this.scorer = scorer; this.scorer = scorer;
super.setScorer(scorer); super.setScorer(scorer);
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
private static final ScoreDoc[] EMPTY_SCOREDOCS = new ScoreDoc[0]; private static final ScoreDoc[] EMPTY_SCOREDOCS = new ScoreDoc[0];
@ -930,4 +959,8 @@ public abstract class TopFieldCollector extends TopDocsCollector {
return new TopFieldDocs(totalHits, results, ((FieldValueHitQueue) pq).getFields(), maxScore); return new TopFieldDocs(totalHits, results, ((FieldValueHitQueue) pq).getFields(), maxScore);
} }
public boolean acceptsDocsOutOfOrder() {
return false;
}
} }

View File

@ -55,6 +55,10 @@ public abstract class TopScoreDocCollector extends TopDocsCollector {
pqTop.score = score; pqTop.score = score;
pqTop = (ScoreDoc) pq.updateTop(); pqTop = (ScoreDoc) pq.updateTop();
} }
public boolean acceptsDocsOutOfOrder() {
return false;
}
} }
// Assumes docs are scored out of order. // Assumes docs are scored out of order.
@ -74,6 +78,10 @@ public abstract class TopScoreDocCollector extends TopDocsCollector {
pqTop.score = score; pqTop.score = score;
pqTop = (ScoreDoc) pq.updateTop(); pqTop = (ScoreDoc) pq.updateTop();
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
/** /**

View File

@ -40,6 +40,8 @@ import org.apache.lucene.index.IndexReader;
* At this point the weighting is complete. * At this point the weighting is complete.
* <li>A <code>Scorer</code> is constructed by {@link #scorer(IndexReader)}. * <li>A <code>Scorer</code> is constructed by {@link #scorer(IndexReader)}.
* </ol> * </ol>
*
* @deprecated use {@link QueryWeight} instead.
*/ */
public interface Weight extends java.io.Serializable { public interface Weight extends java.io.Serializable {
/** The query that this concerns. */ /** The query that this concerns. */

View File

@ -24,10 +24,10 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWeight;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.ToStringUtils;
/** /**
@ -271,19 +271,18 @@ public class CustomScoreQuery extends Query {
//=========================== W E I G H T ============================ //=========================== W E I G H T ============================
private class CustomWeight implements Weight { private class CustomWeight extends QueryWeight {
Similarity similarity; Similarity similarity;
Weight subQueryWeight; QueryWeight subQueryWeight;
Weight[] valSrcWeights; QueryWeight[] valSrcWeights;
boolean qStrict; boolean qStrict;
public CustomWeight(Searcher searcher) throws IOException { public CustomWeight(Searcher searcher) throws IOException {
this.similarity = getSimilarity(searcher); this.similarity = getSimilarity(searcher);
this.subQueryWeight = subQuery.weight(searcher); this.subQueryWeight = subQuery.queryWeight(searcher);
this.subQueryWeight = subQuery.weight(searcher); this.valSrcWeights = new QueryWeight[valSrcQueries.length];
this.valSrcWeights = new Weight[valSrcQueries.length];
for(int i = 0; i < valSrcQueries.length; i++) { for(int i = 0; i < valSrcQueries.length; i++) {
this.valSrcWeights[i] = valSrcQueries[i].createWeight(searcher); this.valSrcWeights[i] = valSrcQueries[i].createQueryWeight(searcher);
} }
this.qStrict = strict; this.qStrict = strict;
} }
@ -325,20 +324,28 @@ public class CustomScoreQuery extends Query {
} }
} }
/*(non-Javadoc) @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader) */ public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
public Scorer scorer(IndexReader reader) throws IOException { // Pass true for "scoresDocsInOrder", because we
Scorer subQueryScorer = subQueryWeight.scorer(reader); // require in-order scoring, even if caller does not,
// since we call advance on the valSrcScorers. Pass
// false for "topScorer" because we will not invoke
// score(Collector) on these scorers:
Scorer subQueryScorer = subQueryWeight.scorer(reader, true, false);
Scorer[] valSrcScorers = new Scorer[valSrcWeights.length]; Scorer[] valSrcScorers = new Scorer[valSrcWeights.length];
for(int i = 0; i < valSrcScorers.length; i++) { for(int i = 0; i < valSrcScorers.length; i++) {
valSrcScorers[i] = valSrcWeights[i].scorer(reader); valSrcScorers[i] = valSrcWeights[i].scorer(reader, true, false);
} }
return new CustomScorer(similarity, reader, this, subQueryScorer, valSrcScorers); return new CustomScorer(similarity, reader, this, subQueryScorer, valSrcScorers);
} }
/*(non-Javadoc) @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int) */
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(IndexReader reader, int doc) throws IOException {
return scorer(reader).explain(doc); return scorer(reader, true, false).explain(doc);
} }
public boolean scoresDocsOutOfOrder() {
return false;
}
} }
@ -435,8 +442,7 @@ public class CustomScoreQuery extends Query {
} }
} }
/*(non-Javadoc) @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher) */ public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
protected Weight createWeight(Searcher searcher) throws IOException {
return new CustomWeight(searcher); return new CustomWeight(searcher);
} }

View File

@ -62,7 +62,7 @@ public class ValueSourceQuery extends Query {
// no terms involved here // no terms involved here
} }
private class ValueSourceWeight implements Weight { private class ValueSourceWeight extends QueryWeight {
Similarity similarity; Similarity similarity;
float queryNorm; float queryNorm;
float queryWeight; float queryWeight;
@ -93,14 +93,13 @@ public class ValueSourceQuery extends Query {
queryWeight *= this.queryNorm; queryWeight *= this.queryNorm;
} }
/*(non-Javadoc) @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader) */ public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
public Scorer scorer(IndexReader reader) throws IOException {
return new ValueSourceScorer(similarity, reader, this); return new ValueSourceScorer(similarity, reader, this);
} }
/*(non-Javadoc) @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int) */ /*(non-Javadoc) @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int) */
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(IndexReader reader, int doc) throws IOException {
return scorer(reader).explain(doc); return scorer(reader, true, false).explain(doc);
} }
} }
@ -173,12 +172,10 @@ public class ValueSourceQuery extends Query {
} }
} }
/*(non-Javadoc) @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher) */ public QueryWeight createQueryWeight(Searcher searcher) {
protected Weight createWeight(Searcher searcher) {
return new ValueSourceQuery.ValueSourceWeight(searcher); return new ValueSourceQuery.ValueSourceWeight(searcher);
} }
/* (non-Javadoc) @see org.apache.lucene.search.Query#toString(java.lang.String) */
public String toString(String field) { public String toString(String field) {
return valSrc.toString() + ToStringUtils.boost(getBoost()); return valSrc.toString() + ToStringUtils.boost(getBoost());
} }

View File

@ -41,29 +41,23 @@ import java.io.IOException;
*/ */
public class BoostingTermQuery extends SpanTermQuery{ public class BoostingTermQuery extends SpanTermQuery{
public BoostingTermQuery(Term term) { public BoostingTermQuery(Term term) {
super(term); super(term);
} }
public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
protected Weight createWeight(Searcher searcher) throws IOException {
return new BoostingTermWeight(this, searcher); return new BoostingTermWeight(this, searcher);
} }
protected class BoostingTermWeight extends SpanWeight implements Weight { protected class BoostingTermWeight extends SpanWeight {
public BoostingTermWeight(BoostingTermQuery query, Searcher searcher) throws IOException { public BoostingTermWeight(BoostingTermQuery query, Searcher searcher) throws IOException {
super(query, searcher); super(query, searcher);
} }
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new BoostingSpanScorer((TermSpans) query.getSpans(reader), this,
similarity, reader.norms(query.getField()));
public Scorer scorer(IndexReader reader) throws IOException {
return new BoostingSpanScorer((TermSpans)query.getSpans(reader), this, similarity,
reader.norms(query.getField()));
} }
protected class BoostingSpanScorer extends SpanScorer { protected class BoostingSpanScorer extends SpanScorer {
@ -74,7 +68,7 @@ public class BoostingTermQuery extends SpanTermQuery{
protected float payloadScore; protected float payloadScore;
private int payloadsSeen; private int payloadsSeen;
public BoostingSpanScorer(TermSpans spans, Weight weight, public BoostingSpanScorer(TermSpans spans, QueryWeight weight,
Similarity similarity, byte[] norms) throws IOException { Similarity similarity, byte[] norms) throws IOException {
super(spans, weight, similarity, norms); super(spans, weight, similarity, norms);
positions = spans.getPositions(); positions = spans.getPositions();

View File

@ -23,9 +23,9 @@ import java.util.Set;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWeight;
import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.ToStringUtils;
/** /**
@ -97,6 +97,7 @@ public class FieldMaskingSpanQuery extends SpanQuery {
return maskedQuery.getPayloadSpans(reader); return maskedQuery.getPayloadSpans(reader);
} }
/** @deprecated use {@link #extractTerms(Set)} instead. */
public Collection getTerms() { public Collection getTerms() {
return maskedQuery.getTerms(); return maskedQuery.getTerms();
} }
@ -105,8 +106,8 @@ public class FieldMaskingSpanQuery extends SpanQuery {
maskedQuery.extractTerms(terms); maskedQuery.extractTerms(terms);
} }
protected Weight createWeight(Searcher searcher) throws IOException { public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
return maskedQuery.createWeight(searcher); return maskedQuery.createQueryWeight(searcher);
} }
public Similarity getSimilarity(Searcher searcher) { public Similarity getSimilarity(Searcher searcher) {

View File

@ -17,14 +17,14 @@ package org.apache.lucene.search.spans;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Weight;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWeight;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Weight;
/** Base class for span-based queries. */ /** Base class for span-based queries. */
public abstract class SpanQuery extends Query { public abstract class SpanQuery extends Query {
@ -46,7 +46,7 @@ public abstract class SpanQuery extends Query {
*/ */
public PayloadSpans getPayloadSpans(IndexReader reader) throws IOException{ public PayloadSpans getPayloadSpans(IndexReader reader) throws IOException{
return null; return null;
}; }
/** Returns the name of the field matched by this query.*/ /** Returns the name of the field matched by this query.*/
public abstract String getField(); public abstract String getField();
@ -57,9 +57,13 @@ public abstract class SpanQuery extends Query {
*/ */
public abstract Collection getTerms(); public abstract Collection getTerms();
/** @deprecated delete in 3.0. */
protected Weight createWeight(Searcher searcher) throws IOException { protected Weight createWeight(Searcher searcher) throws IOException {
return createQueryWeight(searcher);
}
public QueryWeight createQueryWeight(Searcher searcher) throws IOException {
return new SpanWeight(this, searcher); return new SpanWeight(this, searcher);
} }
} }

View File

@ -17,19 +17,21 @@ package org.apache.lucene.search.spans;
* limitations under the License. * limitations under the License.
*/ */
import java.io.IOException;
import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.QueryWeight;
import org.apache.lucene.search.QueryWeightWrapper;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
import java.io.IOException;
/** /**
* Public for extension only. * Public for extension only.
*/ */
public class SpanScorer extends Scorer { public class SpanScorer extends Scorer {
protected Spans spans; protected Spans spans;
protected Weight weight; protected QueryWeight weight;
protected byte[] norms; protected byte[] norms;
protected float value; protected float value;
@ -40,7 +42,13 @@ public class SpanScorer extends Scorer {
protected int doc; protected int doc;
protected float freq; protected float freq;
/** @deprecated use {@link #SpanScorer(Spans, QueryWeight, Similarity, byte[])} instead.*/
protected SpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms) protected SpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms)
throws IOException {
this(spans, new QueryWeightWrapper(weight), similarity, norms);
}
protected SpanScorer(Spans spans, QueryWeight weight, Similarity similarity, byte[] norms)
throws IOException { throws IOException {
super(similarity); super(similarity);
this.spans = spans; this.spans = spans;

View File

@ -29,7 +29,7 @@ import java.util.Set;
/** /**
* Expert-only. Public for use by other weight implementations * Expert-only. Public for use by other weight implementations
*/ */
public class SpanWeight implements Weight { public class SpanWeight extends QueryWeight {
protected Similarity similarity; protected Similarity similarity;
protected float value; protected float value;
protected float idf; protected float idf;
@ -63,10 +63,9 @@ public class SpanWeight implements Weight {
value = queryWeight * idf; // idf for document value = queryWeight * idf; // idf for document
} }
public Scorer scorer(IndexReader reader) throws IOException { public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new SpanScorer(query.getSpans(reader), this, return new SpanScorer(query.getSpans(reader), this, similarity, reader
similarity, .norms(query.getField()));
reader.norms(query.getField()));
} }
public Explanation explain(IndexReader reader, int doc) public Explanation explain(IndexReader reader, int doc)
@ -115,7 +114,7 @@ public class SpanWeight implements Weight {
fieldExpl.setDescription("fieldWeight("+field+":"+query.toString(field)+ fieldExpl.setDescription("fieldWeight("+field+":"+query.toString(field)+
" in "+doc+"), product of:"); " in "+doc+"), product of:");
Explanation tfExpl = scorer(reader).explain(doc); Explanation tfExpl = scorer(reader, true, false).explain(doc);
fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(tfExpl);
fieldExpl.addDetail(idfExpl); fieldExpl.addDetail(idfExpl);

View File

@ -1073,7 +1073,6 @@ public class TestIndexReaderReopen extends LuceneTestCase {
protected void setUp() throws Exception { protected void setUp() throws Exception {
// TODO Auto-generated method stub
super.setUp(); super.setUp();
String tempDir = System.getProperty("java.io.tmpdir"); String tempDir = System.getProperty("java.io.tmpdir");
if (tempDir == null) if (tempDir == null)

View File

@ -384,5 +384,8 @@ public class TestOmitTf extends LuceneTestCase {
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
this.docBase = docBase; this.docBase = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
} }

View File

@ -17,15 +17,15 @@ package org.apache.lucene.search;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.store.Directory;
import org.apache.lucene.index.IndexReader;
import junit.framework.TestCase;
import java.io.IOException; import java.io.IOException;
import java.util.Set; import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;
import junit.framework.Assert;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.store.Directory;
public class CheckHits { public class CheckHits {
/** /**
@ -55,9 +55,9 @@ public class CheckHits {
if (ignore.contains(new Integer(doc))) continue; if (ignore.contains(new Integer(doc))) continue;
Explanation exp = searcher.explain(q, doc); Explanation exp = searcher.explain(q, doc);
TestCase.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", Assert.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null",
exp); exp);
TestCase.assertEquals("Explanation of [["+d+"]] for #"+doc+ Assert.assertEquals("Explanation of [["+d+"]] for #"+doc+
" doesn't indicate non-match: " + exp.toString(), " doesn't indicate non-match: " + exp.toString(),
0.0f, exp.getValue(), 0.0f); 0.0f, exp.getValue(), 0.0f);
} }
@ -95,12 +95,14 @@ public class CheckHits {
public void collect(int doc) { public void collect(int doc) {
actual.add(new Integer(doc + base)); actual.add(new Integer(doc + base));
} }
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
TestCase.assertEquals(query.toString(defaultFieldName), correct, actual); Assert.assertEquals(query.toString(defaultFieldName), correct, actual);
QueryUtils.check(query,searcher); QueryUtils.check(query,searcher);
} }
@ -126,7 +128,7 @@ public class CheckHits {
int[] results) int[] results)
throws IOException { throws IOException {
if (searcher instanceof IndexSearcher) { if (searcher instanceof IndexSearcher) {
QueryUtils.check(query,(IndexSearcher)searcher); QueryUtils.check(query,searcher);
} }
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
@ -141,7 +143,7 @@ public class CheckHits {
actual.add(new Integer(hits[i].doc)); actual.add(new Integer(hits[i].doc));
} }
TestCase.assertEquals(query.toString(defaultFieldName), correct, actual); Assert.assertEquals(query.toString(defaultFieldName), correct, actual);
QueryUtils.check(query,searcher); QueryUtils.check(query,searcher);
} }
@ -149,9 +151,9 @@ public class CheckHits {
/** Tests that a Hits has an expected order of documents */ /** Tests that a Hits has an expected order of documents */
public static void checkDocIds(String mes, int[] results, ScoreDoc[] hits) public static void checkDocIds(String mes, int[] results, ScoreDoc[] hits)
throws IOException { throws IOException {
TestCase.assertEquals(mes + " nr of hits", hits.length, results.length); Assert.assertEquals(mes + " nr of hits", hits.length, results.length);
for (int i = 0; i < results.length; i++) { for (int i = 0; i < results.length; i++) {
TestCase.assertEquals(mes + " doc nrs for hit " + i, results[i], hits[i].doc); Assert.assertEquals(mes + " doc nrs for hit " + i, results[i], hits[i].doc);
} }
} }
@ -173,11 +175,11 @@ public class CheckHits {
public static void checkEqual(Query query, ScoreDoc[] hits1, ScoreDoc[] hits2) throws IOException { public static void checkEqual(Query query, ScoreDoc[] hits1, ScoreDoc[] hits2) throws IOException {
final float scoreTolerance = 1.0e-6f; final float scoreTolerance = 1.0e-6f;
if (hits1.length != hits2.length) { if (hits1.length != hits2.length) {
TestCase.fail("Unequal lengths: hits1="+hits1.length+",hits2="+hits2.length); Assert.fail("Unequal lengths: hits1="+hits1.length+",hits2="+hits2.length);
} }
for (int i = 0; i < hits1.length; i++) { for (int i = 0; i < hits1.length; i++) {
if (hits1[i].doc != hits2[i].doc) { if (hits1[i].doc != hits2[i].doc) {
TestCase.fail("Hit " + i + " docnumbers don't match\n" Assert.fail("Hit " + i + " docnumbers don't match\n"
+ hits2str(hits1, hits2,0,0) + hits2str(hits1, hits2,0,0)
+ "for query:" + query.toString()); + "for query:" + query.toString());
} }
@ -185,7 +187,7 @@ public class CheckHits {
if ((hits1[i].doc != hits2[i].doc) if ((hits1[i].doc != hits2[i].doc)
|| Math.abs(hits1[i].score - hits2[i].score) > scoreTolerance) || Math.abs(hits1[i].score - hits2[i].score) > scoreTolerance)
{ {
TestCase.fail("Hit " + i + ", doc nrs " + hits1[i].doc + " and " + hits2[i].doc Assert.fail("Hit " + i + ", doc nrs " + hits1[i].doc + " and " + hits2[i].doc
+ "\nunequal : " + hits1[i].score + "\nunequal : " + hits1[i].score
+ "\n and: " + hits2[i].score + "\n and: " + hits2[i].score
+ "\nfor query:" + query.toString()); + "\nfor query:" + query.toString());
@ -294,7 +296,7 @@ public class CheckHits {
boolean deep, boolean deep,
Explanation expl) { Explanation expl) {
float value = expl.getValue(); float value = expl.getValue();
TestCase.assertEquals(q+": score(doc="+doc+")="+score+ Assert.assertEquals(q+": score(doc="+doc+")="+score+
" != explanationScore="+value+" Explanation: "+expl, " != explanationScore="+value+" Explanation: "+expl,
score,value,EXPLAIN_SCORE_TOLERANCE_DELTA); score,value,EXPLAIN_SCORE_TOLERANCE_DELTA);
@ -331,7 +333,7 @@ public class CheckHits {
} }
} }
} }
TestCase.assertTrue( Assert.assertTrue(
q+": multi valued explanation description=\""+descr q+": multi valued explanation description=\""+descr
+"\" must be 'max of plus x times others' or end with 'product of'" +"\" must be 'max of plus x times others' or end with 'product of'"
+" or 'sum of:' or 'max of:' - "+expl, +" or 'sum of:' or 'max of:' - "+expl,
@ -356,9 +358,9 @@ public class CheckHits {
} else if (maxTimesOthers) { } else if (maxTimesOthers) {
combined = max + x * (sum - max); combined = max + x * (sum - max);
} else { } else {
TestCase.assertTrue("should never get here!",false); Assert.assertTrue("should never get here!",false);
} }
TestCase.assertEquals(q+": actual subDetails combined=="+combined+ Assert.assertEquals(q+": actual subDetails combined=="+combined+
" != value="+value+" Explanation: "+expl, " != value="+value+" Explanation: "+expl,
combined,value,EXPLAIN_SCORE_TOLERANCE_DELTA); combined,value,EXPLAIN_SCORE_TOLERANCE_DELTA);
} }
@ -466,14 +468,15 @@ public class CheckHits {
("exception in hitcollector of [["+d+"]] for #"+doc, e); ("exception in hitcollector of [["+d+"]] for #"+doc, e);
} }
TestCase.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", Assert.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", exp);
exp);
verifyExplanation(d,doc,scorer.score(),deep,exp); verifyExplanation(d,doc,scorer.score(),deep,exp);
} }
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
} }

View File

@ -38,7 +38,11 @@ final class JustCompileSearch {
private static final String UNSUPPORTED_MSG = "unsupported: used for back-compat testing only !"; private static final String UNSUPPORTED_MSG = "unsupported: used for back-compat testing only !";
static final class JustCompileSearchable implements Searchable { static final class JustCompileSearcher extends Searcher {
protected QueryWeight createQueryWeight(Query query) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
public void close() throws IOException { public void close() throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
@ -48,60 +52,41 @@ final class JustCompileSearch {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public Document doc(int n, FieldSelector fieldSelector)
throws CorruptIndexException, IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
public int docFreq(Term term) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
public int[] docFreqs(Term[] terms) throws IOException { public int[] docFreqs(Term[] terms) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public Explanation explain(Weight weight, int doc) throws IOException { public Explanation explain(Query query, int doc) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public int maxDoc() throws IOException { public Similarity getSimilarity() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public Query rewrite(Query query) throws IOException { public void search(Query query, Collector results) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public void search(Weight weight, Filter filter, HitCollector results) public void search(Query query, Filter filter, Collector results)
throws IOException { throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public void search(Weight weight, Filter filter, Collector collector) public TopDocs search(Query query, Filter filter, int n) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
public TopFieldDocs search(Query query, Filter filter, int n, Sort sort)
throws IOException { throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public TopDocs search(Weight weight, Filter filter, int n) public TopDocs search(Query query, int n) throws IOException {
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) public void setSimilarity(Similarity similarity) {
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
}
static final class JustCompileSearcher extends Searcher {
public void close() throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
public Document doc(int i) throws CorruptIndexException, IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
@ -109,7 +94,7 @@ final class JustCompileSearch {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public Explanation explain(Weight weight, int doc) throws IOException { public Explanation explain(QueryWeight weight, int doc) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
@ -121,22 +106,17 @@ final class JustCompileSearch {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public void search(Weight weight, Filter filter, HitCollector results) public void search(QueryWeight weight, Filter filter, Collector results)
throws IOException { throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public void search(Weight weight, Filter filter, Collector results) public TopDocs search(QueryWeight weight, Filter filter, int n)
throws IOException { throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public TopDocs search(Weight weight, Filter filter, int n) public TopFieldDocs search(QueryWeight weight, Filter filter, int n, Sort sort)
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
throws IOException { throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
@ -163,6 +143,10 @@ final class JustCompileSearch {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public boolean acceptsDocsOutOfOrder() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
} }
static final class JustCompileDocIdSet extends DocIdSet { static final class JustCompileDocIdSet extends DocIdSet {
@ -316,7 +300,7 @@ final class JustCompileSearch {
static final class JustCompilePhraseScorer extends PhraseScorer { static final class JustCompilePhraseScorer extends PhraseScorer {
JustCompilePhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, JustCompilePhraseScorer(QueryWeight weight, TermPositions[] tps, int[] offsets,
Similarity similarity, byte[] norms) { Similarity similarity, byte[] norms) {
super(weight, tps, offsets, similarity, norms); super(weight, tps, offsets, similarity, norms);
} }
@ -437,9 +421,13 @@ final class JustCompileSearch {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public boolean acceptsDocsOutOfOrder() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
static final class JustCompileWeight implements Weight { }
static final class JustCompileWeight extends QueryWeight {
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(IndexReader reader, int doc) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
@ -457,6 +445,7 @@ final class JustCompileSearch {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
/** @deprecated delete in 3.0 */
public Scorer scorer(IndexReader reader) throws IOException { public Scorer scorer(IndexReader reader) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
@ -465,6 +454,11 @@ final class JustCompileSearch {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer)
throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
} }
} }

View File

@ -105,7 +105,7 @@ public class QueryUtils {
* @throws IOException if serialization check fail. * @throws IOException if serialization check fail.
*/ */
private static void checkSerialization(Query q, Searcher s) throws IOException { private static void checkSerialization(Query q, Searcher s) throws IOException {
Weight w = q.weight(s); QueryWeight w = q.queryWeight(s);
try { try {
ByteArrayOutputStream bos = new ByteArrayOutputStream(); ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bos); ObjectOutputStream oos = new ObjectOutputStream(bos);
@ -150,8 +150,8 @@ public class QueryUtils {
//System.out.print("Order:");for (int i = 0; i < order.length; i++) System.out.print(order[i]==skip_op ? " skip()":" next()"); System.out.println(); //System.out.print("Order:");for (int i = 0; i < order.length; i++) System.out.print(order[i]==skip_op ? " skip()":" next()"); System.out.println();
final int opidx[] = {0}; final int opidx[] = {0};
final Weight w = q.weight(s); final QueryWeight w = q.queryWeight(s);
final Scorer scorer = w.scorer(s.getIndexReader()); final Scorer scorer = w.scorer(s.getIndexReader(), true, false);
// FUTURE: ensure scorer.doc()==-1 // FUTURE: ensure scorer.doc()==-1
@ -200,6 +200,9 @@ public class QueryUtils {
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
// make sure next call to scorer is false. // make sure next call to scorer is false.
@ -228,8 +231,8 @@ public class QueryUtils {
float score = scorer.score(); float score = scorer.score();
try { try {
for (int i=lastDoc[0]+1; i<=doc; i++) { for (int i=lastDoc[0]+1; i<=doc; i++) {
Weight w = q.weight(s); QueryWeight w = q.queryWeight(s);
Scorer scorer = w.scorer(s.getIndexReader()); Scorer scorer = w.scorer(s.getIndexReader(), true, false);
Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS); Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID()); Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
float skipToScore = scorer.score(); float skipToScore = scorer.score();
@ -244,9 +247,12 @@ public class QueryUtils {
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return false;
}
}); });
Weight w = q.weight(s); QueryWeight w = q.queryWeight(s);
Scorer scorer = w.scorer(s.getIndexReader()); Scorer scorer = w.scorer(s.getIndexReader(), true, false);
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
if (more) if (more)
Assert.assertFalse("query's last doc was "+lastDoc[0]+" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); Assert.assertFalse("query's last doc was "+lastDoc[0]+" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);

View File

@ -122,9 +122,8 @@ implements Serializable {
new SortField("publicationDate_"), new SortField("publicationDate_"),
SortField.FIELD_SCORE SortField.FIELD_SCORE
}); });
Searcher searcher = Searcher searcher = new MultiSearcher(new Searcher[] { new CustomSearcher(
new MultiSearcher(new Searchable[] { index, 2) });
new CustomSearcher (index, 2)});
// search and check hits // search and check hits
matchHits(searcher, custSort); matchHits(searcher, custSort);
} }

View File

@ -134,8 +134,8 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase{
QueryUtils.check(dq,s); QueryUtils.check(dq,s);
final Weight dw = dq.weight(s); final QueryWeight dw = dq.queryWeight(s);
final Scorer ds = dw.scorer(r); final Scorer ds = dw.scorer(r, true, false);
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS; final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
if (skipOk) { if (skipOk) {
fail("firsttime skipTo found a match? ... " + r.document(ds.docID()).get("id")); fail("firsttime skipTo found a match? ... " + r.document(ds.docID()).get("id"));
@ -149,14 +149,12 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase{
QueryUtils.check(dq,s); QueryUtils.check(dq,s);
final Weight dw = dq.weight(s); final QueryWeight dw = dq.queryWeight(s);
final Scorer ds = dw.scorer(r); final Scorer ds = dw.scorer(r, true, false);
assertTrue("firsttime skipTo found no match", ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS); assertTrue("firsttime skipTo found no match", ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id")); assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));
} }
public void testSimpleEqualScores1() throws Exception { public void testSimpleEqualScores1() throws Exception {
DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
@ -180,7 +178,6 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase{
throw e; throw e;
} }
} }
public void testSimpleEqualScores2() throws Exception { public void testSimpleEqualScores2() throws Exception {

View File

@ -80,6 +80,9 @@ public class TestDocBoost extends LuceneTestCase {
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
float lastScore = 0.0f; float lastScore = 0.0f;

View File

@ -180,6 +180,9 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
// //

View File

@ -99,6 +99,10 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase {
this.scorer = new ScoreCachingWrappingScorer(scorer); this.scorer = new ScoreCachingWrappingScorer(scorer);
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
private static final float[] scores = new float[] { 0.7767749f, 1.7839992f, private static final float[] scores = new float[] { 0.7767749f, 1.7839992f,

View File

@ -114,6 +114,9 @@ public class TestScorerPerf extends LuceneTestCase {
public void setNextReader(IndexReader reader, int base) { public void setNextReader(IndexReader reader, int base) {
docBase = base; docBase = base;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }

View File

@ -76,6 +76,9 @@ public class TestSetNorm extends LuceneTestCase {
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
float lastScore = 0.0f; float lastScore = 0.0f;

View File

@ -74,9 +74,7 @@ public class TestSimilarity extends LuceneTestCase {
Term b = new Term("field", "b"); Term b = new Term("field", "b");
Term c = new Term("field", "c"); Term c = new Term("field", "c");
searcher.search searcher.search(new TermQuery(b), new Collector() {
(new TermQuery(b),
new Collector() {
private Scorer scorer; private Scorer scorer;
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
@ -85,15 +83,16 @@ public class TestSimilarity extends LuceneTestCase {
assertTrue(scorer.score() == 1.0f); assertTrue(scorer.score() == 1.0f);
} }
public void setNextReader(IndexReader reader, int docBase) {} public void setNextReader(IndexReader reader, int docBase) {}
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
BooleanQuery bq = new BooleanQuery(); BooleanQuery bq = new BooleanQuery();
bq.add(new TermQuery(a), BooleanClause.Occur.SHOULD); bq.add(new TermQuery(a), BooleanClause.Occur.SHOULD);
bq.add(new TermQuery(b), BooleanClause.Occur.SHOULD); bq.add(new TermQuery(b), BooleanClause.Occur.SHOULD);
//System.out.println(bq.toString("field")); //System.out.println(bq.toString("field"));
searcher.search searcher.search(bq, new Collector() {
(bq,
new Collector() {
private int base = 0; private int base = 0;
private Scorer scorer; private Scorer scorer;
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
@ -106,6 +105,9 @@ public class TestSimilarity extends LuceneTestCase {
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
PhraseQuery pq = new PhraseQuery(); PhraseQuery pq = new PhraseQuery();
@ -124,13 +126,14 @@ public class TestSimilarity extends LuceneTestCase {
assertTrue(scorer.score() == 1.0f); assertTrue(scorer.score() == 1.0f);
} }
public void setNextReader(IndexReader reader, int docBase) {} public void setNextReader(IndexReader reader, int docBase) {}
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
pq.setSlop(2); pq.setSlop(2);
//System.out.println(pq.toString("field")); //System.out.println(pq.toString("field"));
searcher.search searcher.search(pq, new Collector() {
(pq,
new Collector() {
private Scorer scorer; private Scorer scorer;
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
@ -140,6 +143,9 @@ public class TestSimilarity extends LuceneTestCase {
assertTrue(scorer.score() == 2.0f); assertTrue(scorer.score() == 2.0f);
} }
public void setNextReader(IndexReader reader, int docBase) {} public void setNextReader(IndexReader reader, int docBase) {}
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
} }
} }

View File

@ -70,7 +70,7 @@ public class TestTermScorer extends LuceneTestCase
Term allTerm = new Term(FIELD, "all"); Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm); TermQuery termQuery = new TermQuery(allTerm);
Weight weight = termQuery.weight(indexSearcher); QueryWeight weight = termQuery.queryWeight(indexSearcher);
TermScorer ts = new TermScorer(weight, TermScorer ts = new TermScorer(weight,
indexReader.termDocs(allTerm), indexSearcher.getSimilarity(), indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
@ -98,6 +98,9 @@ public class TestTermScorer extends LuceneTestCase
public void setNextReader(IndexReader reader, int docBase) { public void setNextReader(IndexReader reader, int docBase) {
base = docBase; base = docBase;
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
}); });
assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2); assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2);
TestHit doc0 = (TestHit) docs.get(0); TestHit doc0 = (TestHit) docs.get(0);
@ -129,7 +132,7 @@ public class TestTermScorer extends LuceneTestCase
Term allTerm = new Term(FIELD, "all"); Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm); TermQuery termQuery = new TermQuery(allTerm);
Weight weight = termQuery.weight(indexSearcher); QueryWeight weight = termQuery.queryWeight(indexSearcher);
TermScorer ts = new TermScorer(weight, TermScorer ts = new TermScorer(weight,
indexReader.termDocs(allTerm), indexSearcher.getSimilarity(), indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
@ -146,14 +149,14 @@ public class TestTermScorer extends LuceneTestCase
Term allTerm = new Term(FIELD, "all"); Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm); TermQuery termQuery = new TermQuery(allTerm);
Weight weight = termQuery.weight(indexSearcher); QueryWeight weight = termQuery.queryWeight(indexSearcher);
TermScorer ts = new TermScorer(weight, TermScorer ts = new TermScorer(weight,
indexReader.termDocs(allTerm), indexSearcher.getSimilarity(), indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
indexReader.norms(FIELD)); indexReader.norms(FIELD));
assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS); assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
//The next doc should be doc 5 //The next doc should be doc 5
assertTrue("doc should be number 5", ts.doc() == 5); assertTrue("doc should be number 5", ts.docID() == 5);
} }
public void testExplain() throws Exception public void testExplain() throws Exception
@ -161,7 +164,7 @@ public class TestTermScorer extends LuceneTestCase
Term allTerm = new Term(FIELD, "all"); Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm); TermQuery termQuery = new TermQuery(allTerm);
Weight weight = termQuery.weight(indexSearcher); QueryWeight weight = termQuery.queryWeight(indexSearcher);
TermScorer ts = new TermScorer(weight, TermScorer ts = new TermScorer(weight,
indexReader.termDocs(allTerm), indexSearcher.getSimilarity(), indexReader.termDocs(allTerm), indexSearcher.getSimilarity(),
@ -179,7 +182,7 @@ public class TestTermScorer extends LuceneTestCase
Term dogsTerm = new Term(FIELD, "dogs"); Term dogsTerm = new Term(FIELD, "dogs");
termQuery = new TermQuery(dogsTerm); termQuery = new TermQuery(dogsTerm);
weight = termQuery.weight(indexSearcher); weight = termQuery.queryWeight(indexSearcher);
ts = new TermScorer(weight, indexReader.termDocs(dogsTerm), indexSearcher.getSimilarity(), ts = new TermScorer(weight, indexReader.termDocs(dogsTerm), indexSearcher.getSimilarity(),
indexReader.norms(FIELD)); indexReader.norms(FIELD));

View File

@ -332,6 +332,10 @@ public class TestTimeLimitingCollector extends LuceneTestCase {
docBase = base; docBase = base;
} }
public boolean acceptsDocsOutOfOrder() {
return false;
}
} }
} }

View File

@ -69,6 +69,10 @@ public class TestTopDocsCollector extends LuceneTestCase {
// Don't do anything. Assign scores in random // Don't do anything. Assign scores in random
} }
public boolean acceptsDocsOutOfOrder() {
return true;
}
} }
// Scores array to be used by MyTopDocsCollector. If it is changed, MAX_SCORE // Scores array to be used by MyTopDocsCollector. If it is changed, MAX_SCORE

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.QueryWeight;
import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
@ -69,6 +70,7 @@ final class JustCompileSearchSpans {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
/** @deprecated delete in 3.0. */
public Collection getTerms() { public Collection getTerms() {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
@ -113,11 +115,17 @@ final class JustCompileSearchSpans {
static final class JustCompileSpanScorer extends SpanScorer { static final class JustCompileSpanScorer extends SpanScorer {
/** @deprecated delete in 3.0 */
protected JustCompileSpanScorer(Spans spans, Weight weight, protected JustCompileSpanScorer(Spans spans, Weight weight,
Similarity similarity, byte[] norms) throws IOException { Similarity similarity, byte[] norms) throws IOException {
super(spans, weight, similarity, norms); super(spans, weight, similarity, norms);
} }
protected JustCompileSpanScorer(Spans spans, QueryWeight weight,
Similarity similarity, byte[] norms) throws IOException {
super(spans, weight, similarity, norms);
}
protected boolean setFreqCurrentDoc() throws IOException { protected boolean setFreqCurrentDoc() throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }

View File

@ -17,23 +17,18 @@ package org.apache.lucene.search.spans;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryWeight;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
public class TestNearSpansOrdered extends LuceneTestCase { public class TestNearSpansOrdered extends LuceneTestCase {
@ -163,8 +158,8 @@ public class TestNearSpansOrdered extends LuceneTestCase {
*/ */
public void testSpanNearScorerSkipTo1() throws Exception { public void testSpanNearScorerSkipTo1() throws Exception {
SpanNearQuery q = makeQuery(); SpanNearQuery q = makeQuery();
Weight w = q.createWeight(searcher); QueryWeight w = q.queryWeight(searcher);
Scorer s = w.scorer(searcher.getIndexReader()); Scorer s = w.scorer(searcher.getIndexReader(), true, false);
assertEquals(1, s.advance(1)); assertEquals(1, s.advance(1));
} }
/** /**
@ -173,8 +168,8 @@ public class TestNearSpansOrdered extends LuceneTestCase {
*/ */
public void testSpanNearScorerExplain() throws Exception { public void testSpanNearScorerExplain() throws Exception {
SpanNearQuery q = makeQuery(); SpanNearQuery q = makeQuery();
Weight w = q.createWeight(searcher); QueryWeight w = q.queryWeight(searcher);
Scorer s = w.scorer(searcher.getIndexReader()); Scorer s = w.scorer(searcher.getIndexReader(), true, false);
Explanation e = s.explain(1); Explanation e = s.explain(1);
assertTrue("Scorer explanation value for doc#1 isn't positive: " assertTrue("Scorer explanation value for doc#1 isn't positive: "
+ e.toString(), + e.toString(),