mirror of https://github.com/apache/lucene.git
LUCENE-2882: Cut over SpanQuery#getSpans to AtomicReaderContext
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1062775 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
32e156ffdb
commit
ce53019db5
|
@ -127,6 +127,9 @@ Changes in backwards compatibility policy
|
|||
|
||||
* LUCENE-2865: Weight#scorer(AtomicReaderContext, boolean, boolean) now accepts
|
||||
a ScorerContext struct instead of booleans.(Simon Willnauer)
|
||||
|
||||
* LUCENE-2882: Cut over SpanQuery#getSpans to AtomicReaderContext to enforce
|
||||
per segment semantics on SpanQuery & Spans. (Simon Willnauer)
|
||||
|
||||
Changes in Runtime Behavior
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.analysis.CachingTokenFilter;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.memory.MemoryIndex;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.spans.FieldMaskingSpanQuery;
|
||||
|
@ -50,7 +51,7 @@ public class WeightedSpanTermExtractor {
|
|||
|
||||
private String fieldName;
|
||||
private TokenStream tokenStream;
|
||||
private Map<String,IndexReader> readers = new HashMap<String,IndexReader>(10);
|
||||
private Map<String,AtomicReaderContext> readers = new HashMap<String,AtomicReaderContext>(10);
|
||||
private String defaultField;
|
||||
private boolean expandMultiTermQuery;
|
||||
private boolean cachedTokenStream;
|
||||
|
@ -66,11 +67,11 @@ public class WeightedSpanTermExtractor {
|
|||
}
|
||||
|
||||
private void closeReaders() {
|
||||
Collection<IndexReader> readerSet = readers.values();
|
||||
Collection<AtomicReaderContext> ctxSet = readers.values();
|
||||
|
||||
for (final IndexReader reader : readerSet) {
|
||||
for (final AtomicReaderContext ctx : ctxSet) {
|
||||
try {
|
||||
reader.close();
|
||||
ctx.reader.close();
|
||||
} catch (IOException e) {
|
||||
// alert?
|
||||
}
|
||||
|
@ -149,7 +150,7 @@ public class WeightedSpanTermExtractor {
|
|||
query = mtq;
|
||||
}
|
||||
if (mtq.getField() != null) {
|
||||
IndexReader ir = getReaderForField(mtq.getField());
|
||||
IndexReader ir = getLeafContextForField(mtq.getField()).reader;
|
||||
extract(query.rewrite(ir), terms);
|
||||
}
|
||||
} else if (query instanceof MultiPhraseQuery) {
|
||||
|
@ -234,7 +235,7 @@ public class WeightedSpanTermExtractor {
|
|||
final boolean mustRewriteQuery = mustRewriteQuery(spanQuery);
|
||||
if (mustRewriteQuery) {
|
||||
for (final String field : fieldNames) {
|
||||
final SpanQuery rewrittenQuery = (SpanQuery) spanQuery.rewrite(getReaderForField(field));
|
||||
final SpanQuery rewrittenQuery = (SpanQuery) spanQuery.rewrite(getLeafContextForField(field).reader);
|
||||
queries.put(field, rewrittenQuery);
|
||||
rewrittenQuery.extractTerms(nonWeightedTerms);
|
||||
}
|
||||
|
@ -246,12 +247,12 @@ public class WeightedSpanTermExtractor {
|
|||
|
||||
for (final String field : fieldNames) {
|
||||
|
||||
IndexReader reader = getReaderForField(field);
|
||||
AtomicReaderContext context = getLeafContextForField(field);
|
||||
final Spans spans;
|
||||
if (mustRewriteQuery) {
|
||||
spans = queries.get(field).getSpans(reader);
|
||||
spans = queries.get(field).getSpans(context);
|
||||
} else {
|
||||
spans = spanQuery.getSpans(reader);
|
||||
spans = spanQuery.getSpans(context);
|
||||
}
|
||||
|
||||
|
||||
|
@ -317,22 +318,23 @@ public class WeightedSpanTermExtractor {
|
|||
return rv;
|
||||
}
|
||||
|
||||
private IndexReader getReaderForField(String field) throws IOException {
|
||||
private AtomicReaderContext getLeafContextForField(String field) throws IOException {
|
||||
if(wrapToCaching && !cachedTokenStream && !(tokenStream instanceof CachingTokenFilter)) {
|
||||
tokenStream = new CachingTokenFilter(tokenStream);
|
||||
cachedTokenStream = true;
|
||||
}
|
||||
IndexReader reader = readers.get(field);
|
||||
if (reader == null) {
|
||||
AtomicReaderContext context = readers.get(field);
|
||||
if (context == null) {
|
||||
MemoryIndex indexer = new MemoryIndex();
|
||||
indexer.addField(field, tokenStream);
|
||||
tokenStream.reset();
|
||||
IndexSearcher searcher = indexer.createSearcher();
|
||||
reader = searcher.getIndexReader();
|
||||
readers.put(field, reader);
|
||||
// MEM index has only atomic ctx
|
||||
context = (AtomicReaderContext) searcher.getTopReaderContext();
|
||||
readers.put(field, context);
|
||||
}
|
||||
|
||||
return reader;
|
||||
return context;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -62,14 +62,15 @@ public class CachingSpanFilter extends SpanFilter {
|
|||
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
|
||||
SpanFilterResult result = getCachedResult(context.reader);
|
||||
SpanFilterResult result = getCachedResult(context);
|
||||
return result != null ? result.getDocIdSet() : null;
|
||||
}
|
||||
|
||||
// for testing
|
||||
int hitCount, missCount;
|
||||
|
||||
private SpanFilterResult getCachedResult(IndexReader reader) throws IOException {
|
||||
private SpanFilterResult getCachedResult(AtomicReaderContext context) throws IOException {
|
||||
final IndexReader reader = context.reader;
|
||||
|
||||
final Object coreKey = reader.getCoreCacheKey();
|
||||
final Object delCoreKey = reader.hasDeletions() ? reader.getDeletedDocs() : coreKey;
|
||||
|
@ -81,7 +82,7 @@ public class CachingSpanFilter extends SpanFilter {
|
|||
}
|
||||
|
||||
missCount++;
|
||||
result = filter.bitSpans(reader);
|
||||
result = filter.bitSpans(context);
|
||||
|
||||
cache.put(coreKey, delCoreKey, result);
|
||||
return result;
|
||||
|
@ -89,8 +90,8 @@ public class CachingSpanFilter extends SpanFilter {
|
|||
|
||||
|
||||
@Override
|
||||
public SpanFilterResult bitSpans(IndexReader reader) throws IOException {
|
||||
return getCachedResult(reader);
|
||||
public SpanFilterResult bitSpans(AtomicReaderContext context) throws IOException {
|
||||
return getCachedResult(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -15,7 +15,7 @@ package org.apache.lucene.search;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -30,9 +30,9 @@ import java.io.IOException;
|
|||
public abstract class SpanFilter extends Filter{
|
||||
/** Returns a SpanFilterResult with true for documents which should be permitted in
|
||||
search results, and false for those that should not and Spans for where the true docs match.
|
||||
* @param reader The {@link org.apache.lucene.index.IndexReader} to load position and DocIdSet information from
|
||||
* @param context The {@link AtomicReaderContext} to load position and DocIdSet information from
|
||||
* @return A {@link SpanFilterResult}
|
||||
* @throws java.io.IOException if there was an issue accessing the necessary information
|
||||
* */
|
||||
public abstract SpanFilterResult bitSpans(IndexReader reader) throws IOException;
|
||||
public abstract SpanFilterResult bitSpans(AtomicReaderContext context) throws IOException;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ package org.apache.lucene.search;
|
|||
*/
|
||||
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.Spans;
|
||||
|
@ -54,15 +53,15 @@ public class SpanQueryFilter extends SpanFilter {
|
|||
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
|
||||
SpanFilterResult result = bitSpans(context.reader);
|
||||
SpanFilterResult result = bitSpans(context);
|
||||
return result.getDocIdSet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SpanFilterResult bitSpans(IndexReader reader) throws IOException {
|
||||
public SpanFilterResult bitSpans(AtomicReaderContext context) throws IOException {
|
||||
|
||||
final OpenBitSet bits = new OpenBitSet(reader.maxDoc());
|
||||
Spans spans = query.getSpans(reader);
|
||||
final OpenBitSet bits = new OpenBitSet(context.reader.maxDoc());
|
||||
Spans spans = query.getSpans(context);
|
||||
List<SpanFilterResult.PositionInfo> tmp = new ArrayList<SpanFilterResult.PositionInfo>(20);
|
||||
int currentDoc = -1;
|
||||
SpanFilterResult.PositionInfo currentInfo = null;
|
||||
|
|
|
@ -144,7 +144,7 @@ public class PayloadNearQuery extends SpanNearQuery {
|
|||
|
||||
@Override
|
||||
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
|
||||
return new PayloadNearSpanScorer(query.getSpans(context.reader), this,
|
||||
return new PayloadNearSpanScorer(query.getSpans(context), this,
|
||||
similarity, context.reader.norms(query.getField()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,8 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
|
@ -38,6 +40,7 @@ import org.apache.lucene.search.spans.SpanOrQuery;
|
|||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.search.spans.Spans;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
|
||||
/**
|
||||
* Experimental class to get set of payloads for most standard Lucene queries.
|
||||
|
@ -48,14 +51,16 @@ import org.apache.lucene.search.spans.Spans;
|
|||
*
|
||||
*/
|
||||
public class PayloadSpanUtil {
|
||||
private IndexReader reader;
|
||||
private ReaderContext context;
|
||||
|
||||
/**
|
||||
* @param reader
|
||||
* @param context
|
||||
* that contains doc with payloads to extract
|
||||
*
|
||||
* @see IndexReader#getTopReaderContext()
|
||||
*/
|
||||
public PayloadSpanUtil(IndexReader reader) {
|
||||
this.reader = reader;
|
||||
public PayloadSpanUtil(ReaderContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -169,15 +174,16 @@ public class PayloadSpanUtil {
|
|||
|
||||
private void getPayloads(Collection<byte []> payloads, SpanQuery query)
|
||||
throws IOException {
|
||||
Spans spans = query.getSpans(reader);
|
||||
|
||||
while (spans.next() == true) {
|
||||
if (spans.isPayloadAvailable()) {
|
||||
Collection<byte[]> payload = spans.getPayload();
|
||||
for (byte [] bytes : payload) {
|
||||
payloads.add(bytes);
|
||||
final AtomicReaderContext[] leaves = ReaderUtil.leaves(context);
|
||||
for (AtomicReaderContext atomicReaderContext : leaves) {
|
||||
final Spans spans = query.getSpans(atomicReaderContext);
|
||||
while (spans.next() == true) {
|
||||
if (spans.isPayloadAvailable()) {
|
||||
Collection<byte[]> payload = spans.getPayload();
|
||||
for (byte [] bytes : payload) {
|
||||
payloads.add(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ public class PayloadTermQuery extends SpanTermQuery {
|
|||
|
||||
@Override
|
||||
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
|
||||
return new PayloadTermSpanScorer((TermSpans) query.getSpans(context.reader),
|
||||
return new PayloadTermSpanScorer((TermSpans) query.getSpans(context),
|
||||
this, similarity, context.reader.norms(query.getField()));
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.IOException;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Weight;
|
||||
|
@ -91,8 +92,8 @@ public class FieldMaskingSpanQuery extends SpanQuery {
|
|||
// ...this is done to be more consistent with things like SpanFirstQuery
|
||||
|
||||
@Override
|
||||
public Spans getSpans(IndexReader reader) throws IOException {
|
||||
return maskedQuery.getSpans(reader);
|
||||
public Spans getSpans(AtomicReaderContext context) throws IOException {
|
||||
return maskedQuery.getSpans(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.lucene.search.spans;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -77,11 +77,11 @@ public class NearSpansOrdered extends Spans {
|
|||
private SpanNearQuery query;
|
||||
private boolean collectPayloads = true;
|
||||
|
||||
public NearSpansOrdered(SpanNearQuery spanNearQuery, IndexReader reader) throws IOException {
|
||||
this(spanNearQuery, reader, true);
|
||||
public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context) throws IOException {
|
||||
this(spanNearQuery, context, true);
|
||||
}
|
||||
|
||||
public NearSpansOrdered(SpanNearQuery spanNearQuery, IndexReader reader, boolean collectPayloads)
|
||||
public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, boolean collectPayloads)
|
||||
throws IOException {
|
||||
if (spanNearQuery.getClauses().length < 2) {
|
||||
throw new IllegalArgumentException("Less than 2 clauses: "
|
||||
|
@ -94,7 +94,7 @@ public class NearSpansOrdered extends Spans {
|
|||
matchPayload = new LinkedList<byte[]>();
|
||||
subSpansByDoc = new Spans[clauses.length];
|
||||
for (int i = 0; i < clauses.length; i++) {
|
||||
subSpans[i] = clauses[i].getSpans(reader);
|
||||
subSpans[i] = clauses[i].getSpans(context);
|
||||
subSpansByDoc[i] = subSpans[i]; // used in toSameDoc()
|
||||
}
|
||||
query = spanNearQuery; // kept for toString() only.
|
||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.lucene.search.spans;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -131,7 +131,7 @@ public class NearSpansUnordered extends Spans {
|
|||
}
|
||||
|
||||
|
||||
public NearSpansUnordered(SpanNearQuery query, IndexReader reader)
|
||||
public NearSpansUnordered(SpanNearQuery query, AtomicReaderContext context)
|
||||
throws IOException {
|
||||
this.query = query;
|
||||
this.slop = query.getSlop();
|
||||
|
@ -141,7 +141,7 @@ public class NearSpansUnordered extends Spans {
|
|||
subSpans = new Spans[clauses.length];
|
||||
for (int i = 0; i < clauses.length; i++) {
|
||||
SpansCell cell =
|
||||
new SpansCell(clauses[i].getSpans(reader), i);
|
||||
new SpansCell(clauses[i].getSpans(context), i);
|
||||
ordered.add(cell);
|
||||
subSpans[i] = cell.spans;
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ public class SpanFirstQuery extends SpanPositionRangeQuery {
|
|||
|
||||
@Override
|
||||
protected AcceptStatus acceptPosition(Spans spans) throws IOException {
|
||||
assert spans.start() != spans.end();
|
||||
assert spans.start() != spans.end() : "start equals end: " + spans.start();
|
||||
if (spans.start() >= end)
|
||||
return AcceptStatus.NO_AND_ADVANCE;
|
||||
else if (spans.end() <= end)
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.search.spans;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -88,7 +89,7 @@ public class SpanMultiTermQueryWrapper<Q extends MultiTermQuery> extends SpanQue
|
|||
}
|
||||
|
||||
@Override
|
||||
public Spans getSpans(IndexReader reader) throws IOException {
|
||||
public Spans getSpans(AtomicReaderContext context) throws IOException {
|
||||
throw new UnsupportedOperationException("Query should have been rewritten");
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.Set;
|
|||
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
@ -116,16 +117,16 @@ public class SpanNearQuery extends SpanQuery implements Cloneable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Spans getSpans(final IndexReader reader) throws IOException {
|
||||
public Spans getSpans(final AtomicReaderContext context) throws IOException {
|
||||
if (clauses.size() == 0) // optimize 0-clause case
|
||||
return new SpanOrQuery(getClauses()).getSpans(reader);
|
||||
return new SpanOrQuery(getClauses()).getSpans(context);
|
||||
|
||||
if (clauses.size() == 1) // optimize 1-clause case
|
||||
return clauses.get(0).getSpans(reader);
|
||||
return clauses.get(0).getSpans(context);
|
||||
|
||||
return inOrder
|
||||
? (Spans) new NearSpansOrdered(this, reader, collectPayloads)
|
||||
: (Spans) new NearSpansUnordered(this, reader);
|
||||
? (Spans) new NearSpansOrdered(this, context, collectPayloads)
|
||||
: (Spans) new NearSpansUnordered(this, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.search.spans;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
@ -74,12 +75,12 @@ public class SpanNotQuery extends SpanQuery implements Cloneable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Spans getSpans(final IndexReader reader) throws IOException {
|
||||
public Spans getSpans(final AtomicReaderContext context) throws IOException {
|
||||
return new Spans() {
|
||||
private Spans includeSpans = include.getSpans(reader);
|
||||
private Spans includeSpans = include.getSpans(context);
|
||||
private boolean moreInclude = true;
|
||||
|
||||
private Spans excludeSpans = exclude.getSpans(reader);
|
||||
private Spans excludeSpans = exclude.getSpans(context);
|
||||
private boolean moreExclude = excludeSpans.next();
|
||||
|
||||
@Override
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.Iterator;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
@ -162,9 +163,9 @@ public class SpanOrQuery extends SpanQuery implements Cloneable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Spans getSpans(final IndexReader reader) throws IOException {
|
||||
public Spans getSpans(final AtomicReaderContext context) throws IOException {
|
||||
if (clauses.size() == 1) // optimize 1-clause case
|
||||
return (clauses.get(0)).getSpans(reader);
|
||||
return (clauses.get(0)).getSpans(context);
|
||||
|
||||
return new Spans() {
|
||||
private SpanQueue queue = null;
|
||||
|
@ -173,7 +174,7 @@ public class SpanOrQuery extends SpanQuery implements Cloneable {
|
|||
queue = new SpanQueue(clauses.size());
|
||||
Iterator<SpanQuery> i = clauses.iterator();
|
||||
while (i.hasNext()) {
|
||||
Spans spans = i.next().getSpans(reader);
|
||||
Spans spans = i.next().getSpans(context);
|
||||
if ( ((target == -1) && spans.next())
|
||||
|| ((target != -1) && spans.skipTo(target))) {
|
||||
queue.add(spans);
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.search.spans;
|
|||
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
||||
|
@ -80,8 +81,8 @@ public abstract class SpanPositionCheckQuery extends SpanQuery implements Clonea
|
|||
protected abstract AcceptStatus acceptPosition(Spans spans) throws IOException;
|
||||
|
||||
@Override
|
||||
public Spans getSpans(final IndexReader reader) throws IOException {
|
||||
return new PositionCheckSpan(reader);
|
||||
public Spans getSpans(final AtomicReaderContext context) throws IOException {
|
||||
return new PositionCheckSpan(context);
|
||||
}
|
||||
|
||||
|
||||
|
@ -105,8 +106,8 @@ public abstract class SpanPositionCheckQuery extends SpanQuery implements Clonea
|
|||
protected class PositionCheckSpan extends Spans {
|
||||
private Spans spans;
|
||||
|
||||
public PositionCheckSpan(IndexReader reader) throws IOException {
|
||||
spans = match.getSpans(reader);
|
||||
public PositionCheckSpan(AtomicReaderContext context) throws IOException {
|
||||
spans = match.getSpans(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.search.spans;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Weight;
|
||||
|
@ -28,7 +28,7 @@ import org.apache.lucene.search.Weight;
|
|||
public abstract class SpanQuery extends Query {
|
||||
/** Expert: Returns the matches for this query in an index. Used internally
|
||||
* to search for spans. */
|
||||
public abstract Spans getSpans(IndexReader reader) throws IOException;
|
||||
public abstract Spans getSpans(AtomicReaderContext context) throws IOException;
|
||||
|
||||
/** Returns the name of the field matched by this query.*/
|
||||
public abstract String getField();
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.search.spans;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
@ -80,7 +81,8 @@ public class SpanTermQuery extends SpanQuery {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Spans getSpans(final IndexReader reader) throws IOException {
|
||||
public Spans getSpans(final AtomicReaderContext context) throws IOException {
|
||||
final IndexReader reader = context.reader;
|
||||
final DocsAndPositionsEnum postings = reader.termPositionsEnum(reader.getDeletedDocs(),
|
||||
term.field(),
|
||||
term.bytes());
|
||||
|
|
|
@ -73,7 +73,7 @@ public class SpanWeight extends Weight {
|
|||
|
||||
@Override
|
||||
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
|
||||
return new SpanScorer(query.getSpans(context.reader), this, similarity, context.reader
|
||||
return new SpanScorer(query.getSpans(context), this, similarity, context.reader
|
||||
.norms(query.getField()));
|
||||
}
|
||||
|
||||
|
|
|
@ -83,4 +83,5 @@ public abstract class Spans {
|
|||
* @return true if there is a payload available at this position that can be loaded
|
||||
*/
|
||||
public abstract boolean isPayloadAvailable();
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.search;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.index.FieldInvertState;
|
||||
|
@ -278,7 +277,7 @@ final class JustCompileSearch {
|
|||
static final class JustCompileSpanFilter extends SpanFilter {
|
||||
|
||||
@Override
|
||||
public SpanFilterResult bitSpans(IndexReader reader) throws IOException {
|
||||
public SpanFilterResult bitSpans(AtomicReaderContext context) throws IOException {
|
||||
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.lucene.index.Term;
|
|||
import org.apache.lucene.queryParser.QueryParser;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.search.payloads.PayloadSpanUtil;
|
||||
import org.apache.lucene.search.spans.MultiSpansWrapper;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
|
@ -243,14 +244,15 @@ public class TestPositionIncrement extends LuceneTestCase {
|
|||
"a a b c d e a f g h i j a b k k")));
|
||||
writer.addDocument(doc);
|
||||
|
||||
IndexReader r = new SlowMultiReaderWrapper(writer.getReader());
|
||||
final IndexReader readerFromWriter = writer.getReader();
|
||||
SlowMultiReaderWrapper r = new SlowMultiReaderWrapper(readerFromWriter);
|
||||
|
||||
DocsAndPositionsEnum tp = r.termPositionsEnum(r.getDeletedDocs(),
|
||||
"content",
|
||||
new BytesRef("a"));
|
||||
|
||||
int count = 0;
|
||||
assertTrue(tp.nextDoc() != tp.NO_MORE_DOCS);
|
||||
assertTrue(tp.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS);
|
||||
// "a" occurs 4 times
|
||||
assertEquals(4, tp.freq());
|
||||
int expected = 0;
|
||||
|
@ -260,9 +262,9 @@ public class TestPositionIncrement extends LuceneTestCase {
|
|||
assertEquals(6, tp.nextPosition());
|
||||
|
||||
// only one doc has "a"
|
||||
assertEquals(tp.NO_MORE_DOCS, tp.nextDoc());
|
||||
assertEquals(DocsAndPositionsEnum.NO_MORE_DOCS, tp.nextDoc());
|
||||
|
||||
IndexSearcher is = new IndexSearcher(r);
|
||||
IndexSearcher is = new IndexSearcher(readerFromWriter);
|
||||
|
||||
SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a"));
|
||||
SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));
|
||||
|
@ -274,7 +276,7 @@ public class TestPositionIncrement extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println("\ngetPayloadSpans test");
|
||||
}
|
||||
Spans pspans = snq.getSpans(is.getIndexReader());
|
||||
Spans pspans = MultiSpansWrapper.wrap(is.getTopReaderContext(), snq);
|
||||
while (pspans.next()) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("doc " + pspans.doc() + ": span " + pspans.start()
|
||||
|
@ -289,11 +291,11 @@ public class TestPositionIncrement extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
assertEquals(5, count);
|
||||
assertTrue(sawZero);
|
||||
assertEquals(5, count);
|
||||
|
||||
// System.out.println("\ngetSpans test");
|
||||
Spans spans = snq.getSpans(is.getIndexReader());
|
||||
Spans spans = MultiSpansWrapper.wrap(is.getTopReaderContext(), snq);
|
||||
count = 0;
|
||||
sawZero = false;
|
||||
while (spans.next()) {
|
||||
|
@ -308,7 +310,7 @@ public class TestPositionIncrement extends LuceneTestCase {
|
|||
// System.out.println("\nPayloadSpanUtil test");
|
||||
|
||||
sawZero = false;
|
||||
PayloadSpanUtil psu = new PayloadSpanUtil(is.getIndexReader());
|
||||
PayloadSpanUtil psu = new PayloadSpanUtil(is.getTopReaderContext());
|
||||
Collection<byte[]> pls = psu.getPayloadsForQuery(snq);
|
||||
count = pls.size();
|
||||
for (byte[] bytes : pls) {
|
||||
|
|
|
@ -21,13 +21,14 @@ import java.util.List;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.English;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
|
||||
public class TestSpanQueryFilter extends LuceneTestCase {
|
||||
|
||||
|
@ -40,15 +41,18 @@ public class TestSpanQueryFilter extends LuceneTestCase {
|
|||
Field.Store.NO, Field.Index.ANALYZED));
|
||||
writer.addDocument(document);
|
||||
}
|
||||
IndexReader reader = writer.getReader();
|
||||
final int number = 10;
|
||||
IndexReader reader = writer.getReader();
|
||||
writer.close();
|
||||
|
||||
SpanTermQuery query = new SpanTermQuery(new Term("field", English.intToEnglish(10).trim()));
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(reader.getTopReaderContext());
|
||||
int subIndex = ReaderUtil.subIndex(number, leaves); // find the reader with this document in it
|
||||
|
||||
SpanTermQuery query = new SpanTermQuery(new Term("field", English.intToEnglish(number).trim()));
|
||||
SpanQueryFilter filter = new SpanQueryFilter(query);
|
||||
SpanFilterResult result = filter.bitSpans(new SlowMultiReaderWrapper(reader));
|
||||
SpanFilterResult result = filter.bitSpans(leaves[subIndex]);
|
||||
DocIdSet docIdSet = result.getDocIdSet();
|
||||
assertTrue("docIdSet is null and it shouldn't be", docIdSet != null);
|
||||
assertContainsDocId("docIdSet doesn't contain docId 10", docIdSet, 10);
|
||||
assertContainsDocId("docIdSet doesn't contain docId 10", docIdSet, number);
|
||||
List<SpanFilterResult.PositionInfo> spans = result.getPositions();
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
int size = getDocIdSetSize(docIdSet);
|
||||
|
@ -60,6 +64,7 @@ public class TestSpanQueryFilter extends LuceneTestCase {
|
|||
//There should be two positions in each
|
||||
assertTrue("info.getPositions() Size: " + info.getPositions().size() + " is not: " + 2, info.getPositions().size() == 2);
|
||||
}
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.util.English;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Similarity;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -127,7 +126,7 @@ public class PayloadHelper {
|
|||
doc.add(new Field(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
reader = new SlowMultiReaderWrapper(IndexReader.open(writer));
|
||||
reader = IndexReader.open(writer);
|
||||
writer.close();
|
||||
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
|
|
|
@ -301,7 +301,7 @@ public class TestPayloadNearQuery extends LuceneTestCase {
|
|||
|
||||
@Override public float scorePayload(int docId, String fieldName, int start, int end, byte[] payload, int offset, int length) {
|
||||
//we know it is size 4 here, so ignore the offset/length
|
||||
return payload[0];
|
||||
return payload[offset];
|
||||
}
|
||||
//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
//Make everything else 1 so we see the effect of the payload
|
||||
|
|
|
@ -26,9 +26,9 @@ import org.apache.lucene.search.CheckHits;
|
|||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.DefaultSimilarity;
|
||||
import org.apache.lucene.search.spans.MultiSpansWrapper;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.search.spans.Spans;
|
||||
import org.apache.lucene.search.spans.TermSpans;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
|
@ -39,7 +39,6 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.Payload;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -122,7 +121,7 @@ public class TestPayloadTermQuery extends LuceneTestCase {
|
|||
doc.add(newField("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
reader = new SlowMultiReaderWrapper(writer.getReader());
|
||||
reader = writer.getReader();
|
||||
writer.close();
|
||||
|
||||
searcher = new IndexSearcher(reader);
|
||||
|
@ -153,9 +152,8 @@ public class TestPayloadTermQuery extends LuceneTestCase {
|
|||
assertTrue(doc.score + " does not equal: " + 1, doc.score == 1);
|
||||
}
|
||||
CheckHits.checkExplanations(query, PayloadHelper.FIELD, searcher, true);
|
||||
Spans spans = query.getSpans(searcher.getIndexReader());
|
||||
Spans spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), query);
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
assertTrue("spans is not an instanceof " + TermSpans.class, spans instanceof TermSpans);
|
||||
/*float score = hits.score(0);
|
||||
for (int i =1; i < hits.length(); i++)
|
||||
{
|
||||
|
@ -205,9 +203,8 @@ public class TestPayloadTermQuery extends LuceneTestCase {
|
|||
}
|
||||
assertTrue(numTens + " does not equal: " + 10, numTens == 10);
|
||||
CheckHits.checkExplanations(query, "field", searcher, true);
|
||||
Spans spans = query.getSpans(searcher.getIndexReader());
|
||||
Spans spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), query);
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
assertTrue("spans is not an instanceof " + TermSpans.class, spans instanceof TermSpans);
|
||||
//should be two matches per document
|
||||
int count = 0;
|
||||
//100 hits times 2 matches per hit, we should have 200 in count
|
||||
|
@ -247,9 +244,8 @@ public class TestPayloadTermQuery extends LuceneTestCase {
|
|||
}
|
||||
assertTrue(numTens + " does not equal: " + 10, numTens == 10);
|
||||
CheckHits.checkExplanations(query, "field", searcher, true);
|
||||
Spans spans = query.getSpans(searcher.getIndexReader());
|
||||
Spans spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), query);
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
assertTrue("spans is not an instanceof " + TermSpans.class, spans instanceof TermSpans);
|
||||
//should be two matches per document
|
||||
int count = 0;
|
||||
//100 hits times 2 matches per hit, we should have 200 in count
|
||||
|
@ -293,7 +289,7 @@ public class TestPayloadTermQuery extends LuceneTestCase {
|
|||
@Override
|
||||
public float scorePayload(int docId, String fieldName, int start, int end, byte[] payload, int offset, int length) {
|
||||
//we know it is size 4 here, so ignore the offset/length
|
||||
return payload[0];
|
||||
return payload[offset];
|
||||
}
|
||||
|
||||
//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
@ -333,7 +329,7 @@ public class TestPayloadTermQuery extends LuceneTestCase {
|
|||
static class FullSimilarity extends DefaultSimilarity{
|
||||
public float scorePayload(int docId, String fieldName, byte[] payload, int offset, int length) {
|
||||
//we know it is size 4 here, so ignore the offset/length
|
||||
return payload[0];
|
||||
return payload[offset];
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.lucene.search.spans;
|
|||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.search.Similarity;
|
||||
|
||||
|
@ -82,7 +82,7 @@ final class JustCompileSearchSpans {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Spans getSpans(IndexReader reader) throws IOException {
|
||||
public Spans getSpans(AtomicReaderContext context) throws IOException {
|
||||
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
package org.apache.lucene.search.spans;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
|
||||
/**
|
||||
*
|
||||
* A wrapper to perform span operations on a non-leaf reader context
|
||||
* <p>
|
||||
* NOTE: This should be used for testing purposes only
|
||||
* @lucene.internal
|
||||
*/
|
||||
public class MultiSpansWrapper extends Spans { // can't be package private due to payloads
|
||||
|
||||
private SpanQuery query;
|
||||
private AtomicReaderContext[] leaves;
|
||||
private int leafOrd = 0;
|
||||
private Spans current;
|
||||
|
||||
private MultiSpansWrapper(AtomicReaderContext[] leaves, SpanQuery query) {
|
||||
this.query = query;
|
||||
this.leaves = leaves;
|
||||
|
||||
}
|
||||
|
||||
public static Spans wrap(ReaderContext topLevelReaderContext, SpanQuery query) throws IOException {
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topLevelReaderContext);
|
||||
if(leaves.length == 1) {
|
||||
return query.getSpans(leaves[0]);
|
||||
}
|
||||
return new MultiSpansWrapper(leaves, query);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean next() throws IOException {
|
||||
if (leafOrd >= leaves.length) {
|
||||
return false;
|
||||
}
|
||||
if (current == null) {
|
||||
current = query.getSpans(leaves[leafOrd]);
|
||||
}
|
||||
while(true) {
|
||||
if (current.next()) {
|
||||
return true;
|
||||
}
|
||||
if (++leafOrd < leaves.length) {
|
||||
current = query.getSpans(leaves[leafOrd]);
|
||||
} else {
|
||||
current = null;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean skipTo(int target) throws IOException {
|
||||
if (leafOrd >= leaves.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int subIndex = ReaderUtil.subIndex(target, leaves);
|
||||
assert subIndex >= leafOrd;
|
||||
if (subIndex != leafOrd) {
|
||||
current = query.getSpans(leaves[subIndex]);
|
||||
leafOrd = subIndex;
|
||||
} else if (current == null) {
|
||||
current = query.getSpans(leaves[leafOrd]);
|
||||
}
|
||||
while (true) {
|
||||
if (current.skipTo(target - leaves[leafOrd].docBase)) {
|
||||
return true;
|
||||
}
|
||||
if (++leafOrd < leaves.length) {
|
||||
current = query.getSpans(leaves[leafOrd]);
|
||||
} else {
|
||||
current = null;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int doc() {
|
||||
if (current == null) {
|
||||
return DocsEnum.NO_MORE_DOCS;
|
||||
}
|
||||
return current.doc() + leaves[leafOrd].docBase;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int start() {
|
||||
if (current == null) {
|
||||
return DocsEnum.NO_MORE_DOCS;
|
||||
}
|
||||
return current.start();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int end() {
|
||||
if (current == null) {
|
||||
return DocsEnum.NO_MORE_DOCS;
|
||||
}
|
||||
return current.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<byte[]> getPayload() throws IOException {
|
||||
if (current == null) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
return current.getPayload();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isPayloadAvailable() {
|
||||
if (current == null) {
|
||||
return false;
|
||||
}
|
||||
return current.isPayloadAvailable();
|
||||
}
|
||||
|
||||
}
|
|
@ -30,7 +30,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Payload;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
|
@ -176,6 +175,27 @@ public class TestBasics extends LuceneTestCase {
|
|||
QueryUtils.check(term2);
|
||||
QueryUtils.checkUnequal(term1,term2);
|
||||
}
|
||||
|
||||
public void testSpanTermQuery() throws Exception {
|
||||
SpanTermQuery term1 = new SpanTermQuery(new Term("field", "seventy"));
|
||||
checkHits(term1, new int[]
|
||||
{ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 170,
|
||||
171, 172, 173, 174, 175, 176, 177, 178, 179, 270, 271, 272, 273, 274,
|
||||
275, 276, 277, 278, 279, 370, 371, 372, 373, 374, 375, 376, 377, 378,
|
||||
379, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 570, 571, 572,
|
||||
573, 574, 575, 576, 577, 578, 579, 670, 671, 672, 673, 674, 675, 676,
|
||||
677, 678, 679, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 870,
|
||||
871, 872, 873, 874, 875, 876, 877, 878, 879, 970, 971, 972, 973, 974,
|
||||
975, 976, 977, 978, 979, 1070, 1071, 1072, 1073, 1074, 1075, 1076,
|
||||
1077, 1078, 1079, 1170, 1270, 1370, 1470, 1570, 1670, 1770, 1870, 1970,
|
||||
1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1271, 1272, 1273,
|
||||
1274, 1275, 1276, 1277, 1278, 1279, 1371, 1372, 1373, 1374, 1375, 1376,
|
||||
1377, 1378, 1379, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479,
|
||||
1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1671, 1672, 1673,
|
||||
1674, 1675, 1676, 1677, 1678, 1679, 1771, 1772, 1773, 1774, 1775, 1776,
|
||||
1777, 1778, 1779, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879,
|
||||
1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979 });
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSpanNearUnordered() throws Exception {
|
||||
|
@ -522,8 +542,8 @@ public class TestBasics extends LuceneTestCase {
|
|||
public void testSpansSkipTo() throws Exception {
|
||||
SpanTermQuery t1 = new SpanTermQuery(new Term("field", "seventy"));
|
||||
SpanTermQuery t2 = new SpanTermQuery(new Term("field", "seventy"));
|
||||
Spans s1 = t1.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
Spans s2 = t2.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
Spans s1 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t1);
|
||||
Spans s2 = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), t2);
|
||||
|
||||
assertTrue(s1.next());
|
||||
assertTrue(s2.next());
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.CheckHits;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
|
@ -254,7 +253,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
|
|||
SpanQuery q = new SpanOrQuery(q1, new FieldMaskingSpanQuery(q2, "gender"));
|
||||
check(q, new int[] { 0, 1, 2, 3, 4 });
|
||||
|
||||
Spans span = q.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
Spans span = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), q);
|
||||
|
||||
assertEquals(true, span.next());
|
||||
assertEquals(s(0,0,1), s(span));
|
||||
|
@ -295,8 +294,8 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
|
|||
check(qA, new int[] { 0, 1, 2, 4 });
|
||||
check(qB, new int[] { 0, 1, 2, 4 });
|
||||
|
||||
Spans spanA = qA.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
Spans spanB = qB.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
Spans spanA = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), qA);
|
||||
Spans spanB = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), qB);
|
||||
|
||||
while (spanA.next()) {
|
||||
assertTrue("spanB not still going", spanB.next());
|
||||
|
@ -316,7 +315,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase {
|
|||
new FieldMaskingSpanQuery(qB, "id") }, -1, false );
|
||||
check(q, new int[] { 0, 1, 2, 3 });
|
||||
|
||||
Spans span = q.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
Spans span = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), q);
|
||||
|
||||
assertEquals(true, span.next());
|
||||
assertEquals(s(0,0,1), s(span));
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queryParser.QueryParser;
|
||||
|
@ -34,6 +34,7 @@ import org.apache.lucene.search.Scorer;
|
|||
import org.apache.lucene.search.Weight.ScorerContext;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
|
||||
public class TestNearSpansOrdered extends LuceneTestCase {
|
||||
protected IndexSearcher searcher;
|
||||
|
@ -62,7 +63,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
|||
doc.add(newField(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
reader = new SlowMultiReaderWrapper(writer.getReader());
|
||||
reader = writer.getReader();
|
||||
writer.close();
|
||||
searcher = new IndexSearcher(reader);
|
||||
}
|
||||
|
@ -102,7 +103,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
|||
|
||||
public void testNearSpansNext() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
Spans span = q.getSpans(searcher.getIndexReader());
|
||||
Spans span = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), q);
|
||||
assertEquals(true, span.next());
|
||||
assertEquals(s(0,0,3), s(span));
|
||||
assertEquals(true, span.next());
|
||||
|
@ -117,7 +118,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
|||
*/
|
||||
public void testNearSpansSkipToLikeNext() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
Spans span = q.getSpans(searcher.getIndexReader());
|
||||
Spans span = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), q);
|
||||
assertEquals(true, span.skipTo(0));
|
||||
assertEquals(s(0,0,3), s(span));
|
||||
assertEquals(true, span.skipTo(1));
|
||||
|
@ -127,7 +128,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
|||
|
||||
public void testNearSpansNextThenSkipTo() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
Spans span = q.getSpans(searcher.getIndexReader());
|
||||
Spans span = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), q);
|
||||
assertEquals(true, span.next());
|
||||
assertEquals(s(0,0,3), s(span));
|
||||
assertEquals(true, span.skipTo(1));
|
||||
|
@ -137,7 +138,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
|||
|
||||
public void testNearSpansNextThenSkipPast() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
Spans span = q.getSpans(searcher.getIndexReader());
|
||||
Spans span = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), q);
|
||||
assertEquals(true, span.next());
|
||||
assertEquals(s(0,0,3), s(span));
|
||||
assertEquals(false, span.skipTo(2));
|
||||
|
@ -145,20 +146,20 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
|||
|
||||
public void testNearSpansSkipPast() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
Spans span = q.getSpans(searcher.getIndexReader());
|
||||
Spans span = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), q);
|
||||
assertEquals(false, span.skipTo(2));
|
||||
}
|
||||
|
||||
public void testNearSpansSkipTo0() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
Spans span = q.getSpans(searcher.getIndexReader());
|
||||
Spans span = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), q);
|
||||
assertEquals(true, span.skipTo(0));
|
||||
assertEquals(s(0,0,3), s(span));
|
||||
}
|
||||
|
||||
public void testNearSpansSkipTo1() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
Spans span = q.getSpans(searcher.getIndexReader());
|
||||
Spans span = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), q);
|
||||
assertEquals(true, span.skipTo(1));
|
||||
assertEquals(s(1,0,4), s(span));
|
||||
}
|
||||
|
@ -170,8 +171,9 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
|||
public void testSpanNearScorerSkipTo1() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
Weight w = q.weight(searcher);
|
||||
assertTrue(searcher.getTopReaderContext().isAtomic);
|
||||
Scorer s = w.scorer((AtomicReaderContext) searcher.getTopReaderContext(), ScorerContext.def());
|
||||
ReaderContext topReaderContext = searcher.getTopReaderContext();
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||
Scorer s = w.scorer(leaves[0], ScorerContext.def());
|
||||
assertEquals(1, s.advance(1));
|
||||
}
|
||||
/**
|
||||
|
@ -180,8 +182,10 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
|||
*/
|
||||
public void testSpanNearScorerExplain() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
assertTrue(searcher.getTopReaderContext().isAtomic);
|
||||
Explanation e = q.weight(searcher).explain((AtomicReaderContext) searcher.getTopReaderContext(), 1);
|
||||
ReaderContext topReaderContext = searcher.getTopReaderContext();
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||
|
||||
Explanation e = q.weight(searcher).explain(leaves[0], 1);
|
||||
assertTrue("Scorer explanation value for doc#1 isn't positive: "
|
||||
+ e.toString(),
|
||||
0.0f < e.getValue());
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.index.Payload;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.DefaultSimilarity;
|
||||
|
@ -68,12 +67,12 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
SpanTermQuery stq;
|
||||
Spans spans;
|
||||
stq = new SpanTermQuery(new Term(PayloadHelper.FIELD, "seventy"));
|
||||
spans = stq.getSpans(indexReader);
|
||||
spans = MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), stq);
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
checkSpans(spans, 100, 1, 1, 1);
|
||||
|
||||
stq = new SpanTermQuery(new Term(PayloadHelper.NO_PAYLOAD_FIELD, "seventy"));
|
||||
spans = stq.getSpans(indexReader);
|
||||
spans = MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), stq);
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
checkSpans(spans, 100, 0, 0, 0);
|
||||
}
|
||||
|
@ -84,7 +83,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
SpanFirstQuery sfq;
|
||||
match = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
|
||||
sfq = new SpanFirstQuery(match, 2);
|
||||
Spans spans = sfq.getSpans(indexReader);
|
||||
Spans spans = MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), sfq);
|
||||
checkSpans(spans, 109, 1, 1, 1);
|
||||
//Test more complicated subclause
|
||||
SpanQuery[] clauses = new SpanQuery[2];
|
||||
|
@ -92,11 +91,11 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "hundred"));
|
||||
match = new SpanNearQuery(clauses, 0, true);
|
||||
sfq = new SpanFirstQuery(match, 2);
|
||||
checkSpans(sfq.getSpans(indexReader), 100, 2, 1, 1);
|
||||
checkSpans(MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), sfq), 100, 2, 1, 1);
|
||||
|
||||
match = new SpanNearQuery(clauses, 0, false);
|
||||
sfq = new SpanFirstQuery(match, 2);
|
||||
checkSpans(sfq.getSpans(indexReader), 100, 2, 1, 1);
|
||||
checkSpans(MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), sfq), 100, 2, 1, 1);
|
||||
|
||||
}
|
||||
|
||||
|
@ -119,8 +118,9 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
IndexReader reader = writer.getReader();
|
||||
writer.close();
|
||||
|
||||
|
||||
checkSpans(snq.getSpans(new SlowMultiReaderWrapper(reader)), 1,new int[]{2});
|
||||
checkSpans(MultiSpansWrapper.wrap(reader.getTopReaderContext(), snq), 1,new int[]{2});
|
||||
reader.close();
|
||||
directory.close();
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
Spans spans;
|
||||
IndexSearcher searcher = getSearcher();
|
||||
stq = new SpanTermQuery(new Term(PayloadHelper.FIELD, "mark"));
|
||||
spans = stq.getSpans(searcher.getIndexReader());
|
||||
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), stq);
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
checkSpans(spans, 0, null);
|
||||
|
||||
|
@ -141,7 +141,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
clauses[2] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "xx"));
|
||||
SpanNearQuery spanNearQuery = new SpanNearQuery(clauses, 12, false);
|
||||
|
||||
spans = spanNearQuery.getSpans(searcher.getIndexReader());
|
||||
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), spanNearQuery);
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
checkSpans(spans, 2, new int[]{3,3});
|
||||
|
||||
|
@ -152,8 +152,8 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
|
||||
spanNearQuery = new SpanNearQuery(clauses, 6, true);
|
||||
|
||||
|
||||
spans = spanNearQuery.getSpans(searcher.getIndexReader());
|
||||
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), spanNearQuery);
|
||||
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
checkSpans(spans, 1, new int[]{3});
|
||||
|
||||
|
@ -175,7 +175,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
|
||||
// yy within 6 of xx within 6 of rr
|
||||
|
||||
spans = nestedSpanNearQuery.getSpans(searcher.getIndexReader());
|
||||
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), nestedSpanNearQuery);
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
checkSpans(spans, 2, new int[]{3,3});
|
||||
closeIndexReader.close();
|
||||
|
@ -206,8 +206,8 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
clauses3[1] = snq;
|
||||
|
||||
SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses3, 6, false);
|
||||
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), nestedSpanNearQuery);
|
||||
|
||||
spans = nestedSpanNearQuery.getSpans(searcher.getIndexReader());
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
checkSpans(spans, 1, new int[]{3});
|
||||
closeIndexReader.close();
|
||||
|
@ -244,7 +244,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
|
||||
SpanNearQuery nestedSpanNearQuery = new SpanNearQuery(clauses3, 6, false);
|
||||
|
||||
spans = nestedSpanNearQuery.getSpans(searcher.getIndexReader());
|
||||
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), nestedSpanNearQuery);
|
||||
assertTrue("spans is null and it shouldn't be", spans != null);
|
||||
checkSpans(spans, 2, new int[]{8, 8});
|
||||
closeIndexReader.close();
|
||||
|
@ -261,7 +261,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
doc.add(new Field("content", new StringReader("a b c d e f g h i j a k")));
|
||||
writer.addDocument(doc);
|
||||
|
||||
IndexReader reader = new SlowMultiReaderWrapper(writer.getReader());
|
||||
IndexReader reader = writer.getReader();
|
||||
IndexSearcher is = new IndexSearcher(reader);
|
||||
writer.close();
|
||||
|
||||
|
@ -269,7 +269,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));
|
||||
SpanQuery[] sqs = { stq1, stq2 };
|
||||
SpanNearQuery snq = new SpanNearQuery(sqs, 1, true);
|
||||
Spans spans = snq.getSpans(is.getIndexReader());
|
||||
Spans spans = MultiSpansWrapper.wrap(is.getTopReaderContext(), snq);
|
||||
|
||||
TopDocs topDocs = is.search(snq, 1);
|
||||
Set<String> payloadSet = new HashSet<String>();
|
||||
|
@ -298,7 +298,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(new Field("content", new StringReader("a b a d k f a h i k a k")));
|
||||
writer.addDocument(doc);
|
||||
IndexReader reader = new SlowMultiReaderWrapper(writer.getReader());
|
||||
IndexReader reader = writer.getReader();
|
||||
IndexSearcher is = new IndexSearcher(reader);
|
||||
writer.close();
|
||||
|
||||
|
@ -306,7 +306,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));
|
||||
SpanQuery[] sqs = { stq1, stq2 };
|
||||
SpanNearQuery snq = new SpanNearQuery(sqs, 0, true);
|
||||
Spans spans = snq.getSpans(is.getIndexReader());
|
||||
Spans spans = MultiSpansWrapper.wrap(is.getTopReaderContext(), snq);
|
||||
|
||||
TopDocs topDocs = is.search(snq, 1);
|
||||
Set<String> payloadSet = new HashSet<String>();
|
||||
|
@ -334,7 +334,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(new Field("content", new StringReader("j k a l f k k p a t a k l k t a")));
|
||||
writer.addDocument(doc);
|
||||
IndexReader reader = new SlowMultiReaderWrapper(writer.getReader());
|
||||
IndexReader reader = writer.getReader();
|
||||
IndexSearcher is = new IndexSearcher(reader);
|
||||
writer.close();
|
||||
|
||||
|
@ -342,7 +342,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k"));
|
||||
SpanQuery[] sqs = { stq1, stq2 };
|
||||
SpanNearQuery snq = new SpanNearQuery(sqs, 0, true);
|
||||
Spans spans = snq.getSpans(is.getIndexReader());
|
||||
Spans spans = MultiSpansWrapper.wrap(is.getTopReaderContext(), snq);
|
||||
|
||||
TopDocs topDocs = is.search(snq, 1);
|
||||
Set<String> payloadSet = new HashSet<String>();
|
||||
|
@ -376,11 +376,11 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
doc.add(newField(PayloadHelper.FIELD,"xx rr yy mm pp", Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
|
||||
IndexReader reader = new SlowMultiReaderWrapper(writer.getReader());
|
||||
IndexReader reader = writer.getReader();
|
||||
writer.close();
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
|
||||
PayloadSpanUtil psu = new PayloadSpanUtil(searcher.getIndexReader());
|
||||
PayloadSpanUtil psu = new PayloadSpanUtil(searcher.getTopReaderContext());
|
||||
|
||||
Collection<byte[]> payloads = psu.getPayloadsForQuery(new TermQuery(new Term(PayloadHelper.FIELD, "rr")));
|
||||
if(VERBOSE)
|
||||
|
@ -440,7 +440,7 @@ public class TestPayloadSpans extends LuceneTestCase {
|
|||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
closeIndexReader = new SlowMultiReaderWrapper(writer.getReader());
|
||||
closeIndexReader = writer.getReader();
|
||||
writer.close();
|
||||
|
||||
IndexSearcher searcher = new IndexSearcher(closeIndexReader);
|
||||
|
|
|
@ -28,16 +28,18 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.Weight.ScorerContext;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.SlowMultiReaderWrapper;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class TestSpans extends LuceneTestCase {
|
||||
|
@ -197,7 +199,7 @@ public class TestSpans extends LuceneTestCase {
|
|||
makeSpanTermQuery("t3") },
|
||||
slop,
|
||||
ordered);
|
||||
Spans spans = snq.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
Spans spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), snq);
|
||||
|
||||
assertTrue("first range", spans.next());
|
||||
assertEquals("first doc", 11, spans.doc());
|
||||
|
@ -223,7 +225,7 @@ public class TestSpans extends LuceneTestCase {
|
|||
makeSpanTermQuery("u2") },
|
||||
0,
|
||||
false);
|
||||
Spans spans = snq.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
Spans spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), snq);
|
||||
assertTrue("Does not have next and it should", spans.next());
|
||||
assertEquals("doc", 4, spans.doc());
|
||||
assertEquals("start", 1, spans.start());
|
||||
|
@ -259,7 +261,7 @@ public class TestSpans extends LuceneTestCase {
|
|||
},
|
||||
1,
|
||||
false);
|
||||
spans = snq.getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
spans = MultiSpansWrapper.wrap(searcher.getTopReaderContext(), snq);
|
||||
assertTrue("Does not have next and it should", spans.next());
|
||||
assertEquals("doc", 4, spans.doc());
|
||||
assertEquals("start", 0, spans.start());
|
||||
|
@ -317,7 +319,7 @@ public class TestSpans extends LuceneTestCase {
|
|||
for (int i = 0; i < terms.length; i++) {
|
||||
sqa[i] = makeSpanTermQuery(terms[i]);
|
||||
}
|
||||
return (new SpanOrQuery(sqa)).getSpans(new SlowMultiReaderWrapper(searcher.getIndexReader()));
|
||||
return MultiSpansWrapper.wrap(searcher.getTopReaderContext(), new SpanOrQuery(sqa));
|
||||
}
|
||||
|
||||
private void tstNextSpans(Spans spans, int doc, int start, int end)
|
||||
|
@ -402,34 +404,43 @@ public class TestSpans extends LuceneTestCase {
|
|||
public void testSpanScorerZeroSloppyFreq() throws Exception {
|
||||
boolean ordered = true;
|
||||
int slop = 1;
|
||||
|
||||
final Similarity sim = new DefaultSimilarity() {
|
||||
@Override
|
||||
public float sloppyFreq(int distance) {
|
||||
return 0.0f;
|
||||
ReaderContext topReaderContext = searcher.getTopReaderContext();
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||
int subIndex = ReaderUtil.subIndex(11, leaves);
|
||||
for (int i = 0; i < leaves.length; i++) {
|
||||
|
||||
|
||||
final Similarity sim = new DefaultSimilarity() {
|
||||
@Override
|
||||
public float sloppyFreq(int distance) {
|
||||
return 0.0f;
|
||||
}
|
||||
};
|
||||
|
||||
final Similarity oldSim = searcher.getSimilarity();
|
||||
Scorer spanScorer;
|
||||
try {
|
||||
searcher.setSimilarity(sim);
|
||||
SpanNearQuery snq = new SpanNearQuery(
|
||||
new SpanQuery[] {
|
||||
makeSpanTermQuery("t1"),
|
||||
makeSpanTermQuery("t2") },
|
||||
slop,
|
||||
ordered);
|
||||
|
||||
spanScorer = snq.weight(searcher).scorer(leaves[i], ScorerContext.def());
|
||||
} finally {
|
||||
searcher.setSimilarity(oldSim);
|
||||
}
|
||||
if (i == subIndex) {
|
||||
assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals("first doc number", spanScorer.docID() + leaves[i].docBase, 11);
|
||||
float score = spanScorer.score();
|
||||
assertTrue("first doc score should be zero, " + score, score == 0.0f);
|
||||
} else {
|
||||
assertTrue("no second doc", spanScorer.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
|
||||
}
|
||||
};
|
||||
|
||||
final Similarity oldSim = searcher.getSimilarity();
|
||||
Scorer spanScorer;
|
||||
try {
|
||||
searcher.setSimilarity(sim);
|
||||
SpanNearQuery snq = new SpanNearQuery(
|
||||
new SpanQuery[] {
|
||||
makeSpanTermQuery("t1"),
|
||||
makeSpanTermQuery("t2") },
|
||||
slop,
|
||||
ordered);
|
||||
|
||||
spanScorer = snq.weight(searcher).scorer(new AtomicReaderContext(new SlowMultiReaderWrapper(searcher.getIndexReader())), ScorerContext.def());
|
||||
} finally {
|
||||
searcher.setSimilarity(oldSim);
|
||||
}
|
||||
assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals("first doc number", spanScorer.docID(), 11);
|
||||
float score = spanScorer.score();
|
||||
assertTrue("first doc score should be zero, " + score, score == 0.0f);
|
||||
assertTrue("no second doc", spanScorer.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
|
||||
}
|
||||
|
||||
// LUCENE-1404
|
||||
|
|
Loading…
Reference in New Issue