mirror of https://github.com/apache/lucene.git
LUCENE-3208: Renamed protected IndexSearcher.createWeight() to expert public method IndexSearcher.createNormalizedWeight() as this better describes what this method does. The old method is still there for backwards compatibility. Query.weight() was deprecated and simply delegates to IndexSearcher
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1136568 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
4c5c3c86e2
commit
edac2e90d2
|
@ -511,6 +511,15 @@ Bug fixes
|
||||||
ArrayIndexOutOfBoundsException (selckin, Robert Muir, Mike
|
ArrayIndexOutOfBoundsException (selckin, Robert Muir, Mike
|
||||||
McCandless)
|
McCandless)
|
||||||
|
|
||||||
|
API Changes
|
||||||
|
|
||||||
|
* LUCENE-3208: Renamed protected IndexSearcher.createWeight() to expert
|
||||||
|
public method IndexSearcher.createNormalizedWeight() as this better describes
|
||||||
|
what this method does. The old method is still there for backwards
|
||||||
|
compatibility. Query.weight() was deprecated and simply delegates to
|
||||||
|
IndexSearcher. Both deprecated methods will be removed in Lucene 4.0.
|
||||||
|
(Uwe Schindler, Robert Muir, Yonik Seeley)
|
||||||
|
|
||||||
New Features
|
New Features
|
||||||
|
|
||||||
* LUCENE-3140: Added experimental FST implementation to Lucene.
|
* LUCENE-3140: Added experimental FST implementation to Lucene.
|
||||||
|
|
|
@ -28,10 +28,10 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.DocIdSet;
|
||||||
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.QueryWrapperFilter;
|
||||||
import org.apache.lucene.search.Weight;
|
|
||||||
|
|
||||||
/* Tracks the stream of {@link BufferedDeletes}.
|
/* Tracks the stream of {@link BufferedDeletes}.
|
||||||
* When DocumentsWriterPerThread flushes, its buffered
|
* When DocumentsWriterPerThread flushes, its buffered
|
||||||
|
@ -434,18 +434,16 @@ class BufferedDeletesStream {
|
||||||
// Delete by query
|
// Delete by query
|
||||||
private synchronized long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, SegmentReader reader) throws IOException {
|
private synchronized long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, SegmentReader reader) throws IOException {
|
||||||
long delCount = 0;
|
long delCount = 0;
|
||||||
IndexSearcher searcher = new IndexSearcher(reader);
|
final AtomicReaderContext readerContext = (AtomicReaderContext) reader.getTopReaderContext();
|
||||||
assert searcher.getTopReaderContext().isAtomic;
|
for (QueryAndLimit ent : queriesIter) {
|
||||||
final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext();
|
Query query = ent.query;
|
||||||
try {
|
int limit = ent.limit;
|
||||||
for (QueryAndLimit ent : queriesIter) {
|
final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext);
|
||||||
Query query = ent.query;
|
if (docs != null) {
|
||||||
int limit = ent.limit;
|
final DocIdSetIterator it = docs.iterator();
|
||||||
Weight weight = query.weight(searcher);
|
if (it != null) {
|
||||||
Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def());
|
|
||||||
if (scorer != null) {
|
|
||||||
while(true) {
|
while(true) {
|
||||||
int doc = scorer.nextDoc();
|
int doc = it.nextDoc();
|
||||||
if (doc >= limit)
|
if (doc >= limit)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -459,8 +457,6 @@ class BufferedDeletesStream {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
searcher.close();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return delCount;
|
return delCount;
|
||||||
|
|
|
@ -289,7 +289,7 @@ public class IndexSearcher {
|
||||||
*/
|
*/
|
||||||
public TopDocs search(Query query, Filter filter, int n)
|
public TopDocs search(Query query, Filter filter, int n)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return search(createWeight(query), filter, n);
|
return search(createNormalizedWeight(query), filter, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Lower-level search API.
|
/** Lower-level search API.
|
||||||
|
@ -310,7 +310,7 @@ public class IndexSearcher {
|
||||||
*/
|
*/
|
||||||
public void search(Query query, Filter filter, Collector results)
|
public void search(Query query, Filter filter, Collector results)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
search(leafContexts, createWeight(query), filter, results);
|
search(leafContexts, createNormalizedWeight(query), filter, results);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Lower-level search API.
|
/** Lower-level search API.
|
||||||
|
@ -328,7 +328,7 @@ public class IndexSearcher {
|
||||||
*/
|
*/
|
||||||
public void search(Query query, Collector results)
|
public void search(Query query, Collector results)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
search(leafContexts, createWeight(query), null, results);
|
search(leafContexts, createNormalizedWeight(query), null, results);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Search implementation with arbitrary sorting. Finds
|
/** Search implementation with arbitrary sorting. Finds
|
||||||
|
@ -344,7 +344,7 @@ public class IndexSearcher {
|
||||||
*/
|
*/
|
||||||
public TopFieldDocs search(Query query, Filter filter, int n,
|
public TopFieldDocs search(Query query, Filter filter, int n,
|
||||||
Sort sort) throws IOException {
|
Sort sort) throws IOException {
|
||||||
return search(createWeight(query), filter, n, sort);
|
return search(createNormalizedWeight(query), filter, n, sort);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -357,7 +357,7 @@ public class IndexSearcher {
|
||||||
*/
|
*/
|
||||||
public TopFieldDocs search(Query query, int n,
|
public TopFieldDocs search(Query query, int n,
|
||||||
Sort sort) throws IOException {
|
Sort sort) throws IOException {
|
||||||
return search(createWeight(query), null, n, sort);
|
return search(createNormalizedWeight(query), null, n, sort);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Expert: Low-level search implementation. Finds the top <code>n</code>
|
/** Expert: Low-level search implementation. Finds the top <code>n</code>
|
||||||
|
@ -623,7 +623,7 @@ public class IndexSearcher {
|
||||||
* entire index.
|
* entire index.
|
||||||
*/
|
*/
|
||||||
public Explanation explain(Query query, int doc) throws IOException {
|
public Explanation explain(Query query, int doc) throws IOException {
|
||||||
return explain(createWeight(query), doc);
|
return explain(createNormalizedWeight(query), doc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Expert: low-level implementation method
|
/** Expert: low-level implementation method
|
||||||
|
@ -665,13 +665,23 @@ public class IndexSearcher {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* creates a weight for <code>query</code>
|
* Creates a normalized weight for a top-level {@link Query}.
|
||||||
* @return new weight
|
* The query is rewritten by this method and {@link Query#createWeight} called,
|
||||||
|
* afterwards the {@link Weight} is normalized. The returned {@code Weight}
|
||||||
|
* can then directly be used to get a {@link Scorer}.
|
||||||
|
* @lucene.internal
|
||||||
*/
|
*/
|
||||||
protected Weight createWeight(Query query) throws IOException {
|
public Weight createNormalizedWeight(Query query) throws IOException {
|
||||||
return query.weight(this);
|
query = rewrite(query);
|
||||||
|
Weight weight = query.createWeight(this);
|
||||||
|
float sum = weight.sumOfSquaredWeights();
|
||||||
|
float norm = getSimilarityProvider().queryNorm(sum);
|
||||||
|
if (Float.isInfinite(norm) || Float.isNaN(norm))
|
||||||
|
norm = 1.0f;
|
||||||
|
weight.normalize(norm);
|
||||||
|
return weight;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns this searchers the top-level {@link ReaderContext}.
|
* Returns this searchers the top-level {@link ReaderContext}.
|
||||||
* @see IndexReader#getTopReaderContext()
|
* @see IndexReader#getTopReaderContext()
|
||||||
|
|
|
@ -91,21 +91,6 @@ public abstract class Query implements Cloneable {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Expert: Constructs and initializes a Weight for a top-level query.
|
|
||||||
*/
|
|
||||||
public Weight weight(IndexSearcher searcher) throws IOException {
|
|
||||||
Query query = searcher.rewrite(this);
|
|
||||||
Weight weight = query.createWeight(searcher);
|
|
||||||
float sum = weight.sumOfSquaredWeights();
|
|
||||||
float norm = searcher.getSimilarityProvider().queryNorm(sum);
|
|
||||||
if (Float.isInfinite(norm) || Float.isNaN(norm))
|
|
||||||
norm = 1.0f;
|
|
||||||
weight.normalize(norm);
|
|
||||||
return weight;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/** Expert: called to re-write queries into primitive queries. For example,
|
/** Expert: called to re-write queries into primitive queries. For example,
|
||||||
* a PrefixQuery will be rewritten into a BooleanQuery that consists
|
* a PrefixQuery will be rewritten into a BooleanQuery that consists
|
||||||
* of TermQuerys.
|
* of TermQuerys.
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class QueryWrapperFilter extends Filter {
|
||||||
// get a private context that is used to rewrite, createWeight and score eventually
|
// get a private context that is used to rewrite, createWeight and score eventually
|
||||||
assert context.reader.getTopReaderContext().isAtomic;
|
assert context.reader.getTopReaderContext().isAtomic;
|
||||||
final AtomicReaderContext privateContext = (AtomicReaderContext) context.reader.getTopReaderContext();
|
final AtomicReaderContext privateContext = (AtomicReaderContext) context.reader.getTopReaderContext();
|
||||||
final Weight weight = query.weight(new IndexSearcher(privateContext));
|
final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query);
|
||||||
return new DocIdSet() {
|
return new DocIdSet() {
|
||||||
@Override
|
@Override
|
||||||
public DocIdSetIterator iterator() throws IOException {
|
public DocIdSetIterator iterator() throws IOException {
|
||||||
|
|
|
@ -187,7 +187,7 @@ public class CustomScoreQuery extends Query {
|
||||||
boolean qStrict;
|
boolean qStrict;
|
||||||
|
|
||||||
public CustomWeight(IndexSearcher searcher) throws IOException {
|
public CustomWeight(IndexSearcher searcher) throws IOException {
|
||||||
this.subQueryWeight = subQuery.weight(searcher);
|
this.subQueryWeight = subQuery.createWeight(searcher);
|
||||||
this.valSrcWeights = new Weight[valSrcQueries.length];
|
this.valSrcWeights = new Weight[valSrcQueries.length];
|
||||||
for(int i = 0; i < valSrcQueries.length; i++) {
|
for(int i = 0; i < valSrcQueries.length; i++) {
|
||||||
this.valSrcWeights[i] = valSrcQueries[i].createWeight(searcher);
|
this.valSrcWeights[i] = valSrcQueries[i].createWeight(searcher);
|
||||||
|
|
|
@ -0,0 +1,90 @@
|
||||||
|
package org.apache.lucene.search;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||||
|
import org.apache.lucene.index.IndexReader.ReaderContext;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper class that adds some extra checks to ensure correct
|
||||||
|
* usage of {@code IndexSearcher} and {@code Weight}.
|
||||||
|
* TODO: Extend this by more checks, that's just a start.
|
||||||
|
*/
|
||||||
|
public class AssertingIndexSearcher extends IndexSearcher {
|
||||||
|
public AssertingIndexSearcher(IndexReader r) {
|
||||||
|
super(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
public AssertingIndexSearcher(ReaderContext context) {
|
||||||
|
super(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
public AssertingIndexSearcher(IndexReader r, ExecutorService ex) {
|
||||||
|
super(r, ex);
|
||||||
|
}
|
||||||
|
|
||||||
|
public AssertingIndexSearcher(ReaderContext context, ExecutorService ex) {
|
||||||
|
super(context, ex);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Ensures, that the returned {@code Weight} is not normalized again, which may produce wrong scores. */
|
||||||
|
@Override
|
||||||
|
public Weight createNormalizedWeight(Query query) throws IOException {
|
||||||
|
final Weight w = super.createNormalizedWeight(query);
|
||||||
|
return new Weight() {
|
||||||
|
@Override
|
||||||
|
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||||
|
return w.explain(context, doc);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Query getQuery() {
|
||||||
|
return w.getQuery();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public float getValue() {
|
||||||
|
return w.getValue();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void normalize(float norm) {
|
||||||
|
throw new IllegalStateException("Weight already normalized.");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
|
||||||
|
return w.scorer(context, scorerContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public float sumOfSquaredWeights() throws IOException {
|
||||||
|
throw new IllegalStateException("Weight already normalized.");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean scoresDocsOutOfOrder() {
|
||||||
|
return w.scoresDocsOutOfOrder();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
|
@ -198,7 +198,7 @@ public class QueryUtils {
|
||||||
public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException {
|
public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException {
|
||||||
//System.out.println("Checking "+q);
|
//System.out.println("Checking "+q);
|
||||||
final AtomicReaderContext[] readerContextArray = ReaderUtil.leaves(s.getTopReaderContext());
|
final AtomicReaderContext[] readerContextArray = ReaderUtil.leaves(s.getTopReaderContext());
|
||||||
if (q.weight(s).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next().
|
if (s.createNormalizedWeight(q).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next().
|
||||||
|
|
||||||
final int skip_op = 0;
|
final int skip_op = 0;
|
||||||
final int next_op = 1;
|
final int next_op = 1;
|
||||||
|
@ -241,7 +241,7 @@ public class QueryUtils {
|
||||||
lastDoc[0] = doc;
|
lastDoc[0] = doc;
|
||||||
try {
|
try {
|
||||||
if (scorer == null) {
|
if (scorer == null) {
|
||||||
Weight w = q.weight(s);
|
Weight w = s.createNormalizedWeight(q);
|
||||||
scorer = w.scorer(readerContextArray[leafPtr], ScorerContext.def());
|
scorer = w.scorer(readerContextArray[leafPtr], ScorerContext.def());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -286,7 +286,7 @@ public class QueryUtils {
|
||||||
if (lastReader[0] != null) {
|
if (lastReader[0] != null) {
|
||||||
final IndexReader previousReader = lastReader[0];
|
final IndexReader previousReader = lastReader[0];
|
||||||
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
|
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
|
||||||
Weight w = q.weight(indexSearcher);
|
Weight w = indexSearcher.createNormalizedWeight(q);
|
||||||
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
|
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
||||||
|
@ -312,7 +312,7 @@ public class QueryUtils {
|
||||||
// previous reader, hits NO_MORE_DOCS
|
// previous reader, hits NO_MORE_DOCS
|
||||||
final IndexReader previousReader = lastReader[0];
|
final IndexReader previousReader = lastReader[0];
|
||||||
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
|
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
|
||||||
Weight w = q.weight(indexSearcher);
|
Weight w = indexSearcher.createNormalizedWeight(q);
|
||||||
Scorer scorer = w.scorer((AtomicReaderContext)previousReader.getTopReaderContext(), ScorerContext.def());
|
Scorer scorer = w.scorer((AtomicReaderContext)previousReader.getTopReaderContext(), ScorerContext.def());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
||||||
|
@ -343,7 +343,7 @@ public class QueryUtils {
|
||||||
try {
|
try {
|
||||||
long startMS = System.currentTimeMillis();
|
long startMS = System.currentTimeMillis();
|
||||||
for (int i=lastDoc[0]+1; i<=doc; i++) {
|
for (int i=lastDoc[0]+1; i<=doc; i++) {
|
||||||
Weight w = q.weight(s);
|
Weight w = s.createNormalizedWeight(q);
|
||||||
Scorer scorer = w.scorer(context[leafPtr], ScorerContext.def());
|
Scorer scorer = w.scorer(context[leafPtr], ScorerContext.def());
|
||||||
Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
|
Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
|
||||||
Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
|
Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
|
||||||
|
@ -370,7 +370,7 @@ public class QueryUtils {
|
||||||
if (lastReader[0] != null) {
|
if (lastReader[0] != null) {
|
||||||
final IndexReader previousReader = lastReader[0];
|
final IndexReader previousReader = lastReader[0];
|
||||||
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
|
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
|
||||||
Weight w = q.weight(indexSearcher);
|
Weight w = indexSearcher.createNormalizedWeight(q);
|
||||||
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
|
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
||||||
|
@ -394,7 +394,7 @@ public class QueryUtils {
|
||||||
// previous reader, hits NO_MORE_DOCS
|
// previous reader, hits NO_MORE_DOCS
|
||||||
final IndexReader previousReader = lastReader[0];
|
final IndexReader previousReader = lastReader[0];
|
||||||
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
|
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
|
||||||
Weight w = q.weight(indexSearcher);
|
Weight w = indexSearcher.createNormalizedWeight(q);
|
||||||
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
|
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
|
||||||
if (scorer != null) {
|
if (scorer != null) {
|
||||||
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
||||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.lucene.index.codecs.standard.StandardCodec;
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
import org.apache.lucene.search.FieldCache;
|
import org.apache.lucene.search.FieldCache;
|
||||||
import org.apache.lucene.search.FieldCache.CacheEntry;
|
import org.apache.lucene.search.FieldCache.CacheEntry;
|
||||||
|
import org.apache.lucene.search.AssertingIndexSearcher;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.FSDirectory;
|
import org.apache.lucene.store.FSDirectory;
|
||||||
|
@ -1231,13 +1232,11 @@ public abstract class LuceneTestCase extends Assert {
|
||||||
* with one that returns null for getSequentialSubReaders.
|
* with one that returns null for getSequentialSubReaders.
|
||||||
*/
|
*/
|
||||||
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
|
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
|
||||||
|
|
||||||
if (random.nextBoolean()) {
|
if (random.nextBoolean()) {
|
||||||
if (maybeWrap && rarely()) {
|
if (maybeWrap && rarely()) {
|
||||||
return new IndexSearcher(new SlowMultiReaderWrapper(r));
|
r = new SlowMultiReaderWrapper(r);
|
||||||
} else {
|
|
||||||
return new IndexSearcher(r);
|
|
||||||
}
|
}
|
||||||
|
return random.nextBoolean() ? new AssertingIndexSearcher(r) : new AssertingIndexSearcher(r.getTopReaderContext());
|
||||||
} else {
|
} else {
|
||||||
int threads = 0;
|
int threads = 0;
|
||||||
final ExecutorService ex = (random.nextBoolean()) ? null
|
final ExecutorService ex = (random.nextBoolean()) ? null
|
||||||
|
@ -1246,20 +1245,31 @@ public abstract class LuceneTestCase extends Assert {
|
||||||
if (ex != null && VERBOSE) {
|
if (ex != null && VERBOSE) {
|
||||||
System.out.println("NOTE: newSearcher using ExecutorService with " + threads + " threads");
|
System.out.println("NOTE: newSearcher using ExecutorService with " + threads + " threads");
|
||||||
}
|
}
|
||||||
return new IndexSearcher(r.getTopReaderContext(), ex) {
|
return random.nextBoolean() ?
|
||||||
@Override
|
new AssertingIndexSearcher(r, ex) {
|
||||||
public void close() throws IOException {
|
@Override
|
||||||
super.close();
|
public void close() throws IOException {
|
||||||
if (ex != null) {
|
super.close();
|
||||||
ex.shutdown();
|
shutdownExecutorService(ex);
|
||||||
try {
|
|
||||||
ex.awaitTermination(1000, TimeUnit.MILLISECONDS);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
} : new AssertingIndexSearcher(r.getTopReaderContext(), ex) {
|
||||||
};
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
super.close();
|
||||||
|
shutdownExecutorService(ex);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void shutdownExecutorService(ExecutorService ex) {
|
||||||
|
if (ex != null) {
|
||||||
|
ex.shutdown();
|
||||||
|
try {
|
||||||
|
ex.awaitTermination(1000, TimeUnit.MILLISECONDS);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -173,7 +173,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
|
||||||
|
|
||||||
QueryUtils.check(random, dq, s);
|
QueryUtils.check(random, dq, s);
|
||||||
assertTrue(s.getTopReaderContext().isAtomic);
|
assertTrue(s.getTopReaderContext().isAtomic);
|
||||||
final Weight dw = dq.weight(s);
|
final Weight dw = s.createNormalizedWeight(dq);
|
||||||
final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
|
final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
|
||||||
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
|
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
|
||||||
if (skipOk) {
|
if (skipOk) {
|
||||||
|
@ -188,7 +188,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
|
||||||
dq.add(tq("dek", "DOES_NOT_EXIST"));
|
dq.add(tq("dek", "DOES_NOT_EXIST"));
|
||||||
assertTrue(s.getTopReaderContext().isAtomic);
|
assertTrue(s.getTopReaderContext().isAtomic);
|
||||||
QueryUtils.check(random, dq, s);
|
QueryUtils.check(random, dq, s);
|
||||||
final Weight dw = dq.weight(s);
|
final Weight dw = s.createNormalizedWeight(dq);
|
||||||
final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
|
final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
|
||||||
assertTrue("firsttime skipTo found no match",
|
assertTrue("firsttime skipTo found no match",
|
||||||
ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
|
ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
|
||||||
|
|
|
@ -73,7 +73,7 @@ public class TestTermScorer extends LuceneTestCase {
|
||||||
Term allTerm = new Term(FIELD, "all");
|
Term allTerm = new Term(FIELD, "all");
|
||||||
TermQuery termQuery = new TermQuery(allTerm);
|
TermQuery termQuery = new TermQuery(allTerm);
|
||||||
|
|
||||||
Weight weight = termQuery.weight(indexSearcher);
|
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
|
||||||
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
|
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
|
||||||
Scorer ts = weight.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
|
Scorer ts = weight.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
|
||||||
// we have 2 documents with the term all in them, one document for all the
|
// we have 2 documents with the term all in them, one document for all the
|
||||||
|
@ -134,7 +134,7 @@ public class TestTermScorer extends LuceneTestCase {
|
||||||
Term allTerm = new Term(FIELD, "all");
|
Term allTerm = new Term(FIELD, "all");
|
||||||
TermQuery termQuery = new TermQuery(allTerm);
|
TermQuery termQuery = new TermQuery(allTerm);
|
||||||
|
|
||||||
Weight weight = termQuery.weight(indexSearcher);
|
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
|
||||||
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
|
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
|
||||||
Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
|
Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
|
||||||
assertTrue("next did not return a doc",
|
assertTrue("next did not return a doc",
|
||||||
|
@ -152,7 +152,7 @@ public class TestTermScorer extends LuceneTestCase {
|
||||||
Term allTerm = new Term(FIELD, "all");
|
Term allTerm = new Term(FIELD, "all");
|
||||||
TermQuery termQuery = new TermQuery(allTerm);
|
TermQuery termQuery = new TermQuery(allTerm);
|
||||||
|
|
||||||
Weight weight = termQuery.weight(indexSearcher);
|
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
|
||||||
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
|
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
|
||||||
|
|
||||||
Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
|
Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
|
||||||
|
|
|
@ -197,7 +197,7 @@ public class TestTopDocsMerge extends LuceneTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ... then all shards:
|
// ... then all shards:
|
||||||
final Weight w = query.weight(searcher);
|
final Weight w = searcher.createNormalizedWeight(query);
|
||||||
|
|
||||||
final TopDocs[] shardHits = new TopDocs[subSearchers.length];
|
final TopDocs[] shardHits = new TopDocs[subSearchers.length];
|
||||||
for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
|
for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
|
||||||
|
|
|
@ -170,22 +170,20 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
||||||
*/
|
*/
|
||||||
public void testSpanNearScorerSkipTo1() throws Exception {
|
public void testSpanNearScorerSkipTo1() throws Exception {
|
||||||
SpanNearQuery q = makeQuery();
|
SpanNearQuery q = makeQuery();
|
||||||
Weight w = q.weight(searcher);
|
Weight w = searcher.createNormalizedWeight(q);
|
||||||
ReaderContext topReaderContext = searcher.getTopReaderContext();
|
ReaderContext topReaderContext = searcher.getTopReaderContext();
|
||||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||||
Scorer s = w.scorer(leaves[0], ScorerContext.def());
|
Scorer s = w.scorer(leaves[0], ScorerContext.def());
|
||||||
assertEquals(1, s.advance(1));
|
assertEquals(1, s.advance(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* not a direct test of NearSpans, but a demonstration of how/when
|
* not a direct test of NearSpans, but a demonstration of how/when
|
||||||
* this causes problems
|
* this causes problems
|
||||||
*/
|
*/
|
||||||
public void testSpanNearScorerExplain() throws Exception {
|
public void testSpanNearScorerExplain() throws Exception {
|
||||||
SpanNearQuery q = makeQuery();
|
SpanNearQuery q = makeQuery();
|
||||||
ReaderContext topReaderContext = searcher.getTopReaderContext();
|
Explanation e = searcher.explain(q, 1);
|
||||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
|
||||||
|
|
||||||
Explanation e = q.weight(searcher).explain(leaves[0], 1);
|
|
||||||
assertTrue("Scorer explanation value for doc#1 isn't positive: "
|
assertTrue("Scorer explanation value for doc#1 isn't positive: "
|
||||||
+ e.toString(),
|
+ e.toString(),
|
||||||
0.0f < e.getValue());
|
0.0f < e.getValue());
|
||||||
|
|
|
@ -434,7 +434,7 @@ public class TestSpans extends LuceneTestCase {
|
||||||
slop,
|
slop,
|
||||||
ordered);
|
ordered);
|
||||||
|
|
||||||
spanScorer = snq.weight(searcher).scorer(leaves[i], ScorerContext.def());
|
spanScorer = searcher.createNormalizedWeight(snq).scorer(leaves[i], ScorerContext.def());
|
||||||
} finally {
|
} finally {
|
||||||
searcher.setSimilarityProvider(oldSim);
|
searcher.setSimilarityProvider(oldSim);
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ public class BoostedQuery extends Query {
|
||||||
|
|
||||||
public BoostedWeight(IndexSearcher searcher) throws IOException {
|
public BoostedWeight(IndexSearcher searcher) throws IOException {
|
||||||
this.searcher = searcher;
|
this.searcher = searcher;
|
||||||
this.qWeight = q.weight(searcher);
|
this.qWeight = q.createWeight(searcher);
|
||||||
this.fcontext = boostVal.newContext(searcher);
|
this.fcontext = boostVal.newContext(searcher);
|
||||||
boostVal.createWeight(fcontext,searcher);
|
boostVal.createWeight(fcontext,searcher);
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class QueryValueSource extends ValueSource {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
|
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
|
||||||
Weight w = q.weight(searcher);
|
Weight w = searcher.createNormalizedWeight(q);
|
||||||
context.put(this, w);
|
context.put(this, w);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ class QueryDocValues extends FloatDocValues {
|
||||||
this.q = vs.q;
|
this.q = vs.q;
|
||||||
this.fcontext = fcontext;
|
this.fcontext = fcontext;
|
||||||
|
|
||||||
Weight w = fcontext==null ? null : (Weight)fcontext.get(q);
|
Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
|
||||||
if (w == null) {
|
if (w == null) {
|
||||||
IndexSearcher weightSearcher;
|
IndexSearcher weightSearcher;
|
||||||
if(fcontext == null) {
|
if(fcontext == null) {
|
||||||
|
@ -109,7 +109,8 @@ class QueryDocValues extends FloatDocValues {
|
||||||
weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
|
weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w = q.weight(weightSearcher);
|
vs.createWeight(fcontext, weightSearcher);
|
||||||
|
w = (Weight)fcontext.get(vs);
|
||||||
}
|
}
|
||||||
weight = w;
|
weight = w;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue