LUCENE-2831: Revise Weight#scorer & Filter#getDocIdSet API to pass Readers context

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1055636 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Simon Willnauer 2011-01-05 20:47:08 +00:00
parent acd9519e59
commit 36b17aab62
81 changed files with 855 additions and 486 deletions

View File

@ -128,6 +128,10 @@ Changes in backwards compatibility policy
ParallelMultiSearcher into IndexSearcher as an optional ParallelMultiSearcher into IndexSearcher as an optional
ExecutorServiced passed to its ctor. (Mike McCandless) ExecutorServiced passed to its ctor. (Mike McCandless)
* LUCENE-2837: Changed Weight#scorer, Weight#explain & Filter#getDocIdSet to
operate on a ReaderContext instead of directly on IndexReader to enable
searches to be aware of IndexSearcher's context. (Simon Willnauer)
Changes in Runtime Behavior Changes in Runtime Behavior
* LUCENE-2650, LUCENE-2825: The behavior of FSDirectory.open has changed. On 64-bit * LUCENE-2650, LUCENE-2825: The behavior of FSDirectory.open has changed. On 64-bit

View File

@ -31,6 +31,7 @@ import java.util.Comparator;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.*; import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitVector; import org.apache.lucene.util.BitVector;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -45,6 +46,8 @@ import org.apache.lucene.util.Bits;
public class InstantiatedIndexReader extends IndexReader { public class InstantiatedIndexReader extends IndexReader {
private final InstantiatedIndex index; private final InstantiatedIndex index;
private ReaderContext context = new AtomicReaderContext(this);
public InstantiatedIndexReader(InstantiatedIndex index) { public InstantiatedIndexReader(InstantiatedIndex index) {
super(); super();
@ -424,6 +427,11 @@ public class InstantiatedIndexReader extends IndexReader {
} }
}; };
} }
@Override
public ReaderContext getTopReaderContext() {
return context;
}
@Override @Override
public TermFreqVector[] getTermFreqVectors(int docNumber) throws IOException { public TermFreqVector[] getTermFreqVectors(int docNumber) throws IOException {

View File

@ -48,6 +48,7 @@ import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.index.TermPositionVector; import org.apache.lucene.index.TermPositionVector;
import org.apache.lucene.index.TermVectorMapper; import org.apache.lucene.index.TermVectorMapper;
import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.Collector; import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
@ -738,6 +739,7 @@ public class MemoryIndex implements Serializable {
private final class MemoryIndexReader extends IndexReader { private final class MemoryIndexReader extends IndexReader {
private IndexSearcher searcher; // needed to find searcher.getSimilarity() private IndexSearcher searcher; // needed to find searcher.getSimilarity()
private final ReaderContext readerInfos = new AtomicReaderContext(this);
private MemoryIndexReader() { private MemoryIndexReader() {
super(); // avoid as much superclass baggage as possible super(); // avoid as much superclass baggage as possible
@ -764,6 +766,11 @@ public class MemoryIndex implements Serializable {
if (DEBUG) System.err.println("MemoryIndexReader.docFreq: " + term + ", freq:" + freq); if (DEBUG) System.err.println("MemoryIndexReader.docFreq: " + term + ", freq:" + freq);
return freq; return freq;
} }
@Override
public ReaderContext getTopReaderContext() {
return readerInfos;
}
@Override @Override
public Fields fields() { public Fields fields() {

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.OpenBitSetDISI; import org.apache.lucene.util.OpenBitSetDISI;
@ -41,10 +42,10 @@ public class BooleanFilter extends Filter
ArrayList<Filter> notFilters = null; ArrayList<Filter> notFilters = null;
ArrayList<Filter> mustFilters = null; ArrayList<Filter> mustFilters = null;
private DocIdSetIterator getDISI(ArrayList<Filter> filters, int index, IndexReader reader) private DocIdSetIterator getDISI(ArrayList<Filter> filters, int index, ReaderContext info)
throws IOException throws IOException
{ {
return filters.get(index).getDocIdSet(reader).iterator(); return filters.get(index).getDocIdSet(info).iterator();
} }
/** /**
@ -52,21 +53,21 @@ public class BooleanFilter extends Filter
* of the filters that have been added. * of the filters that have been added.
*/ */
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException public DocIdSet getDocIdSet(ReaderContext context) throws IOException
{ {
OpenBitSetDISI res = null; OpenBitSetDISI res = null;
final IndexReader reader = context.reader;
if (shouldFilters != null) { if (shouldFilters != null) {
for (int i = 0; i < shouldFilters.size(); i++) { for (int i = 0; i < shouldFilters.size(); i++) {
if (res == null) { if (res == null) {
res = new OpenBitSetDISI(getDISI(shouldFilters, i, reader), reader.maxDoc()); res = new OpenBitSetDISI(getDISI(shouldFilters, i, context), reader.maxDoc());
} else { } else {
DocIdSet dis = shouldFilters.get(i).getDocIdSet(reader); DocIdSet dis = shouldFilters.get(i).getDocIdSet(context);
if(dis instanceof OpenBitSet) { if(dis instanceof OpenBitSet) {
// optimized case for OpenBitSets // optimized case for OpenBitSets
res.or((OpenBitSet) dis); res.or((OpenBitSet) dis);
} else { } else {
res.inPlaceOr(getDISI(shouldFilters, i, reader)); res.inPlaceOr(getDISI(shouldFilters, i, context));
} }
} }
} }
@ -75,15 +76,15 @@ public class BooleanFilter extends Filter
if (notFilters!=null) { if (notFilters!=null) {
for (int i = 0; i < notFilters.size(); i++) { for (int i = 0; i < notFilters.size(); i++) {
if (res == null) { if (res == null) {
res = new OpenBitSetDISI(getDISI(notFilters, i, reader), reader.maxDoc()); res = new OpenBitSetDISI(getDISI(notFilters, i, context), reader.maxDoc());
res.flip(0, reader.maxDoc()); // NOTE: may set bits on deleted docs res.flip(0, reader.maxDoc()); // NOTE: may set bits on deleted docs
} else { } else {
DocIdSet dis = notFilters.get(i).getDocIdSet(reader); DocIdSet dis = notFilters.get(i).getDocIdSet(context);
if(dis instanceof OpenBitSet) { if(dis instanceof OpenBitSet) {
// optimized case for OpenBitSets // optimized case for OpenBitSets
res.andNot((OpenBitSet) dis); res.andNot((OpenBitSet) dis);
} else { } else {
res.inPlaceNot(getDISI(notFilters, i, reader)); res.inPlaceNot(getDISI(notFilters, i, context));
} }
} }
} }
@ -92,14 +93,14 @@ public class BooleanFilter extends Filter
if (mustFilters!=null) { if (mustFilters!=null) {
for (int i = 0; i < mustFilters.size(); i++) { for (int i = 0; i < mustFilters.size(); i++) {
if (res == null) { if (res == null) {
res = new OpenBitSetDISI(getDISI(mustFilters, i, reader), reader.maxDoc()); res = new OpenBitSetDISI(getDISI(mustFilters, i, context), reader.maxDoc());
} else { } else {
DocIdSet dis = mustFilters.get(i).getDocIdSet(reader); DocIdSet dis = mustFilters.get(i).getDocIdSet(context);
if(dis instanceof OpenBitSet) { if(dis instanceof OpenBitSet) {
// optimized case for OpenBitSets // optimized case for OpenBitSets
res.and((OpenBitSet) dis); res.and((OpenBitSet) dis);
} else { } else {
res.inPlaceAnd(getDISI(mustFilters, i, reader)); res.inPlaceAnd(getDISI(mustFilters, i, context));
} }
} }
} }

View File

@ -20,6 +20,7 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
@ -96,21 +97,21 @@ public class ChainedFilter extends Filter
* {@link Filter#getDocIdSet}. * {@link Filter#getDocIdSet}.
*/ */
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException public DocIdSet getDocIdSet(ReaderContext context) throws IOException
{ {
int[] index = new int[1]; // use array as reference to modifiable int; int[] index = new int[1]; // use array as reference to modifiable int;
index[0] = 0; // an object attribute would not be thread safe. index[0] = 0; // an object attribute would not be thread safe.
if (logic != -1) if (logic != -1)
return getDocIdSet(reader, logic, index); return getDocIdSet(context, logic, index);
else if (logicArray != null) else if (logicArray != null)
return getDocIdSet(reader, logicArray, index); return getDocIdSet(context, logicArray, index);
else else
return getDocIdSet(reader, DEFAULT, index); return getDocIdSet(context, DEFAULT, index);
} }
private DocIdSetIterator getDISI(Filter filter, IndexReader reader) private DocIdSetIterator getDISI(Filter filter, ReaderContext info)
throws IOException { throws IOException {
DocIdSet docIdSet = filter.getDocIdSet(reader); DocIdSet docIdSet = filter.getDocIdSet(info);
if (docIdSet == null) { if (docIdSet == null) {
return DocIdSet.EMPTY_DOCIDSET.iterator(); return DocIdSet.EMPTY_DOCIDSET.iterator();
} else { } else {
@ -123,9 +124,10 @@ public class ChainedFilter extends Filter
} }
} }
private OpenBitSetDISI initialResult(IndexReader reader, int logic, int[] index) private OpenBitSetDISI initialResult(ReaderContext info, int logic, int[] index)
throws IOException throws IOException
{ {
IndexReader reader = info.reader;
OpenBitSetDISI result; OpenBitSetDISI result;
/** /**
* First AND operation takes place against a completely false * First AND operation takes place against a completely false
@ -133,12 +135,12 @@ public class ChainedFilter extends Filter
*/ */
if (logic == AND) if (logic == AND)
{ {
result = new OpenBitSetDISI(getDISI(chain[index[0]], reader), reader.maxDoc()); result = new OpenBitSetDISI(getDISI(chain[index[0]], info), reader.maxDoc());
++index[0]; ++index[0];
} }
else if (logic == ANDNOT) else if (logic == ANDNOT)
{ {
result = new OpenBitSetDISI(getDISI(chain[index[0]], reader), reader.maxDoc()); result = new OpenBitSetDISI(getDISI(chain[index[0]], info), reader.maxDoc());
result.flip(0,reader.maxDoc()); // NOTE: may set bits for deleted docs. result.flip(0,reader.maxDoc()); // NOTE: may set bits for deleted docs.
++index[0]; ++index[0];
} }
@ -155,13 +157,13 @@ public class ChainedFilter extends Filter
* @param logic Logical operation * @param logic Logical operation
* @return DocIdSet * @return DocIdSet
*/ */
private DocIdSet getDocIdSet(IndexReader reader, int logic, int[] index) private DocIdSet getDocIdSet(ReaderContext info, int logic, int[] index)
throws IOException throws IOException
{ {
OpenBitSetDISI result = initialResult(reader, logic, index); OpenBitSetDISI result = initialResult(info, logic, index);
for (; index[0] < chain.length; index[0]++) for (; index[0] < chain.length; index[0]++)
{ {
doChain(result, logic, chain[index[0]].getDocIdSet(reader)); doChain(result, logic, chain[index[0]].getDocIdSet(info));
} }
return result; return result;
} }
@ -172,16 +174,16 @@ public class ChainedFilter extends Filter
* @param logic Logical operation * @param logic Logical operation
* @return DocIdSet * @return DocIdSet
*/ */
private DocIdSet getDocIdSet(IndexReader reader, int[] logic, int[] index) private DocIdSet getDocIdSet(ReaderContext info, int[] logic, int[] index)
throws IOException throws IOException
{ {
if (logic.length != chain.length) if (logic.length != chain.length)
throw new IllegalArgumentException("Invalid number of elements in logic array"); throw new IllegalArgumentException("Invalid number of elements in logic array");
OpenBitSetDISI result = initialResult(reader, logic[0], index); OpenBitSetDISI result = initialResult(info, logic[0], index);
for (; index[0] < chain.length; index[0]++) for (; index[0] < chain.length; index[0]++)
{ {
doChain(result, logic[index[0]], chain[index[0]].getDocIdSet(reader)); doChain(result, logic[index[0]], chain[index[0]].getDocIdSet(info));
} }
return result; return result;
} }

View File

@ -19,6 +19,7 @@ import java.io.IOException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum;
@ -27,7 +28,8 @@ import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
public class DuplicateFilter extends Filter public class DuplicateFilter extends Filter
{ { // TODO: make duplicate filter aware of ReaderContext such that we can
// filter duplicates across segments
String fieldName; String fieldName;
@ -70,15 +72,15 @@ public class DuplicateFilter extends Filter
} }
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException public DocIdSet getDocIdSet(ReaderContext context) throws IOException
{ {
if(processingMode==PM_FAST_INVALIDATION) if(processingMode==PM_FAST_INVALIDATION)
{ {
return fastBits(reader); return fastBits(context.reader);
} }
else else
{ {
return correctBits(reader); return correctBits(context.reader);
} }
} }
@ -96,7 +98,7 @@ public class DuplicateFilter extends Filter
} else { } else {
docs = termsEnum.docs(delDocs, docs); docs = termsEnum.docs(delDocs, docs);
int doc = docs.nextDoc(); int doc = docs.nextDoc();
if (doc != docs.NO_MORE_DOCS) { if (doc != DocsEnum.NO_MORE_DOCS) {
if (keepMode == KM_USE_FIRST_OCCURRENCE) { if (keepMode == KM_USE_FIRST_OCCURRENCE) {
bits.set(doc); bits.set(doc);
} else { } else {
@ -104,7 +106,7 @@ public class DuplicateFilter extends Filter
while (true) { while (true) {
lastDoc = doc; lastDoc = doc;
doc = docs.nextDoc(); doc = docs.nextDoc();
if (doc == docs.NO_MORE_DOCS) { if (doc == DocsEnum.NO_MORE_DOCS) {
break; break;
} }
} }
@ -136,7 +138,7 @@ public class DuplicateFilter extends Filter
// unset potential duplicates // unset potential duplicates
docs = termsEnum.docs(delDocs, docs); docs = termsEnum.docs(delDocs, docs);
int doc = docs.nextDoc(); int doc = docs.nextDoc();
if (doc != docs.NO_MORE_DOCS) { if (doc != DocsEnum.NO_MORE_DOCS) {
if (keepMode == KM_USE_FIRST_OCCURRENCE) { if (keepMode == KM_USE_FIRST_OCCURRENCE) {
doc = docs.nextDoc(); doc = docs.nextDoc();
} }
@ -147,7 +149,7 @@ public class DuplicateFilter extends Filter
lastDoc = doc; lastDoc = doc;
bits.clear(lastDoc); bits.clear(lastDoc);
doc = docs.nextDoc(); doc = docs.nextDoc();
if (doc == docs.NO_MORE_DOCS) { if (doc == DocsEnum.NO_MORE_DOCS) {
break; break;
} }
} }

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.Comparator; import java.util.Comparator;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -108,8 +109,8 @@ public final class FieldCacheRewriteMethod extends MultiTermQuery.RewriteMethod
* results. * results.
*/ */
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final FieldCache.DocTermsIndex fcsi = FieldCache.DEFAULT.getTermsIndex(reader, query.field); final FieldCache.DocTermsIndex fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader, query.field);
final OpenBitSet termSet = new OpenBitSet(fcsi.numOrd()); final OpenBitSet termSet = new OpenBitSet(fcsi.numOrd());
TermsEnum termsEnum = query.getTermsEnum(new Terms() { TermsEnum termsEnum = query.getTermsEnum(new Terms() {
@ -142,7 +143,7 @@ public final class FieldCacheRewriteMethod extends MultiTermQuery.RewriteMethod
return DocIdSet.EMPTY_DOCIDSET; return DocIdSet.EMPTY_DOCIDSET;
} }
return new FieldCacheRangeFilter.FieldCacheDocIdSet(reader, true) { return new FieldCacheRangeFilter.FieldCacheDocIdSet(context.reader, true) {
@Override @Override
boolean matchDoc(int doc) throws ArrayIndexOutOfBoundsException { boolean matchDoc(int doc) throws ArrayIndexOutOfBoundsException {
return termSet.fastGet(fcsi.getOrd(doc)); return termSet.fastGet(fcsi.getOrd(doc));

View File

@ -23,6 +23,7 @@ import java.util.Set;
import java.util.TreeSet; import java.util.TreeSet;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
@ -57,7 +58,8 @@ public class TermsFilter extends Filter
* @see org.apache.lucene.search.Filter#getDocIdSet(org.apache.lucene.index.IndexReader) * @see org.apache.lucene.search.Filter#getDocIdSet(org.apache.lucene.index.IndexReader)
*/ */
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
IndexReader reader = context.reader;
OpenBitSet result=new OpenBitSet(reader.maxDoc()); OpenBitSet result=new OpenBitSet(reader.maxDoc());
Fields fields = reader.fields(); Fields fields = reader.fields();
BytesRef br = new BytesRef(); BytesRef br = new BytesRef();

View File

@ -24,6 +24,7 @@ import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
@ -83,7 +84,7 @@ public class BooleanFilterTest extends LuceneTestCase {
private void tstFilterCard(String mes, int expected, Filter filt) private void tstFilterCard(String mes, int expected, Filter filt)
throws Throwable throws Throwable
{ {
DocIdSetIterator disi = filt.getDocIdSet(reader).iterator(); DocIdSetIterator disi = filt.getDocIdSet(new AtomicReaderContext(reader)).iterator();
int actual = 0; int actual = 0;
while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
actual++; actual++;

View File

@ -21,6 +21,7 @@ import java.util.HashSet;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
@ -59,23 +60,25 @@ public class TermsFilterTest extends LuceneTestCase {
w.addDocument(doc); w.addDocument(doc);
} }
IndexReader reader = new SlowMultiReaderWrapper(w.getReader()); IndexReader reader = new SlowMultiReaderWrapper(w.getReader());
ReaderContext context = reader.getTopReaderContext();
assertTrue(context.isAtomic);
w.close(); w.close();
TermsFilter tf=new TermsFilter(); TermsFilter tf=new TermsFilter();
tf.addTerm(new Term(fieldName,"19")); tf.addTerm(new Term(fieldName,"19"));
OpenBitSet bits = (OpenBitSet)tf.getDocIdSet(reader); OpenBitSet bits = (OpenBitSet)tf.getDocIdSet(context);
assertEquals("Must match nothing", 0, bits.cardinality()); assertEquals("Must match nothing", 0, bits.cardinality());
tf.addTerm(new Term(fieldName,"20")); tf.addTerm(new Term(fieldName,"20"));
bits = (OpenBitSet)tf.getDocIdSet(reader); bits = (OpenBitSet)tf.getDocIdSet(context);
assertEquals("Must match 1", 1, bits.cardinality()); assertEquals("Must match 1", 1, bits.cardinality());
tf.addTerm(new Term(fieldName,"10")); tf.addTerm(new Term(fieldName,"10"));
bits = (OpenBitSet)tf.getDocIdSet(reader); bits = (OpenBitSet)tf.getDocIdSet(context);
assertEquals("Must match 2", 2, bits.cardinality()); assertEquals("Must match 2", 2, bits.cardinality());
tf.addTerm(new Term(fieldName,"00")); tf.addTerm(new Term(fieldName,"00"));
bits = (OpenBitSet)tf.getDocIdSet(reader); bits = (OpenBitSet)tf.getDocIdSet(context);
assertEquals("Must match 2", 2, bits.cardinality()); assertEquals("Must match 2", 2, bits.cardinality());
reader.close(); reader.close();

View File

@ -19,7 +19,7 @@ package org.apache.lucene.spatial.geohash;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.FieldCache.DocTerms; import org.apache.lucene.search.FieldCache.DocTerms;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
@ -62,15 +62,15 @@ public class GeoHashDistanceFilter extends DistanceFilter {
} }
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final DocTerms geoHashValues = FieldCache.DEFAULT.getTerms(reader, geoHashField); final DocTerms geoHashValues = FieldCache.DEFAULT.getTerms(context.reader, geoHashField);
final BytesRef br = new BytesRef(); final BytesRef br = new BytesRef();
final int docBase = nextDocBase; final int docBase = nextDocBase;
nextDocBase += reader.maxDoc(); nextDocBase += context.reader.maxDoc();
return new FilteredDocIdSet(startingFilter.getDocIdSet(reader)) { return new FilteredDocIdSet(startingFilter.getDocIdSet(context)) {
@Override @Override
public boolean match(int doc) { public boolean match(int doc) {

View File

@ -20,7 +20,7 @@ import java.io.IOException;
import java.util.List; import java.util.List;
import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
@ -45,8 +45,8 @@ public class CartesianShapeFilter extends Filter {
} }
@Override @Override
public DocIdSet getDocIdSet(final IndexReader reader) throws IOException { public DocIdSet getDocIdSet(final ReaderContext context) throws IOException {
final Bits delDocs = reader.getDeletedDocs(); final Bits delDocs = context.reader.getDeletedDocs();
final List<Double> area = shape.getArea(); final List<Double> area = shape.getArea();
final int sz = area.size(); final int sz = area.size();
@ -58,7 +58,7 @@ public class CartesianShapeFilter extends Filter {
return new DocIdSet() { return new DocIdSet() {
@Override @Override
public DocIdSetIterator iterator() throws IOException { public DocIdSetIterator iterator() throws IOException {
return reader.termDocsEnum(delDocs, fieldName, bytesRef); return context.reader.termDocsEnum(delDocs, fieldName, bytesRef);
} }
@Override @Override
@ -67,11 +67,11 @@ public class CartesianShapeFilter extends Filter {
} }
}; };
} else { } else {
final OpenBitSet bits = new OpenBitSet(reader.maxDoc()); final OpenBitSet bits = new OpenBitSet(context.reader.maxDoc());
for (int i =0; i< sz; i++) { for (int i =0; i< sz; i++) {
double boxId = area.get(i).doubleValue(); double boxId = area.get(i).doubleValue();
NumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(boxId), 0, bytesRef); NumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(boxId), 0, bytesRef);
final DocsEnum docsEnum = reader.termDocsEnum(delDocs, fieldName, bytesRef); final DocsEnum docsEnum = context.reader.termDocsEnum(delDocs, fieldName, bytesRef);
if (docsEnum == null) continue; if (docsEnum == null) continue;
// iterate through all documents // iterate through all documents
// which have this boxId // which have this boxId

View File

@ -18,7 +18,8 @@
package org.apache.lucene.spatial.tier; package org.apache.lucene.spatial.tier;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.FilteredDocIdSet; import org.apache.lucene.search.FilteredDocIdSet;
import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
@ -64,15 +65,15 @@ public class LatLongDistanceFilter extends DistanceFilter {
} }
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final double[] latIndex = FieldCache.DEFAULT.getDoubles(reader, latField); final double[] latIndex = FieldCache.DEFAULT.getDoubles(context.reader, latField);
final double[] lngIndex = FieldCache.DEFAULT.getDoubles(reader, lngField); final double[] lngIndex = FieldCache.DEFAULT.getDoubles(context.reader, lngField);
final int docBase = nextDocBase; final int docBase = nextDocBase;
nextDocBase += reader.maxDoc(); nextDocBase += context.reader.maxDoc();
return new FilteredDocIdSet(startingFilter.getDocIdSet(reader)) { return new FilteredDocIdSet(startingFilter.getDocIdSet(context)) {
@Override @Override
protected boolean match(int doc) { protected boolean match(int doc) {
double x = latIndex[doc]; double x = latIndex[doc];

View File

@ -22,6 +22,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField; import org.apache.lucene.document.NumericField;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
@ -30,6 +31,7 @@ import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
public class TestDistance extends LuceneTestCase { public class TestDistance extends LuceneTestCase {
private Directory directory; private Directory directory;
@ -100,9 +102,9 @@ public class TestDistance extends LuceneTestCase {
LatLongDistanceFilter f = new LatLongDistanceFilter(new QueryWrapperFilter(new MatchAllDocsQuery()), LatLongDistanceFilter f = new LatLongDistanceFilter(new QueryWrapperFilter(new MatchAllDocsQuery()),
lat, lng, 1.0, latField, lngField); lat, lng, 1.0, latField, lngField);
IndexReader[] readers = r.getSequentialSubReaders(); AtomicReaderContext[] leaves = r.getTopReaderContext().leaves();
for(int i=0;i<readers.length;i++) { for (int i = 0; i < leaves.length; i++) {
f.getDocIdSet(readers[i]); f.getDocIdSet(leaves[i]);
} }
r.close(); r.close();
} }

View File

@ -19,7 +19,7 @@ package org.apache.lucene.xmlparser.builders;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeFilter; import org.apache.lucene.search.NumericRangeFilter;
@ -157,7 +157,7 @@ public class NumericRangeFilterBuilder implements FilterBuilder {
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
return null; return null;
} }

View File

@ -67,7 +67,7 @@ public class TestNumericRangeFilterBuilder extends LuceneTestCase {
IndexReader reader = IndexReader.open(ramDir, true); IndexReader reader = IndexReader.open(ramDir, true);
try try
{ {
assertNull(filter.getDocIdSet(reader)); assertNull(filter.getDocIdSet(reader.getTopReaderContext()));
} }
finally finally
{ {

View File

@ -26,6 +26,7 @@ import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
@ -364,12 +365,16 @@ class BufferedDeletes {
// Delete by query // Delete by query
if (deletes.queries.size() > 0) { if (deletes.queries.size() > 0) {
IndexSearcher searcher = new IndexSearcher(reader); IndexSearcher searcher = new IndexSearcher(reader);
final ReaderContext readerContext = searcher.getTopReaderContext();
assert readerContext.isAtomic;
try { try {
for (Entry<Query, Integer> entry : deletes.queries.entrySet()) { for (Entry<Query, Integer> entry : deletes.queries.entrySet()) {
Query query = entry.getKey(); Query query = entry.getKey();
int limit = entry.getValue().intValue(); int limit = entry.getValue().intValue();
Weight weight = query.weight(searcher); Weight weight = query.weight(searcher);
Scorer scorer = weight.scorer(reader, true, false);
Scorer scorer = weight.scorer(readerContext, true, false);
if (scorer != null) { if (scorer != null) {
while(true) { while(true) {
int doc = scorer.nextDoc(); int doc = scorer.nextDoc();

View File

@ -35,7 +35,6 @@ import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.CodecProvider;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
@ -60,8 +59,8 @@ class DirectoryReader extends IndexReader implements Cloneable {
private boolean rollbackHasChanges; private boolean rollbackHasChanges;
private SegmentReader[] subReaders; private SegmentReader[] subReaders;
private ReaderContext topLevelReaderContext;
private int[] starts; // 1st docno for each segment private int[] starts; // 1st docno for each segment
private final Map<SegmentReader,ReaderUtil.Slice> subReaderToSlice = new HashMap<SegmentReader,ReaderUtil.Slice>();
private int maxDoc = 0; private int maxDoc = 0;
private int numDocs = -1; private int numDocs = -1;
private boolean hasDeletions = false; private boolean hasDeletions = false;
@ -300,25 +299,22 @@ class DirectoryReader extends IndexReader implements Cloneable {
private void initialize(SegmentReader[] subReaders) throws IOException { private void initialize(SegmentReader[] subReaders) throws IOException {
this.subReaders = subReaders; this.subReaders = subReaders;
starts = new int[subReaders.length + 1]; // build starts array starts = new int[subReaders.length + 1]; // build starts array
final AtomicReaderContext[] subReaderCtx = new AtomicReaderContext[subReaders.length];
topLevelReaderContext = new CompositeReaderContext(this, subReaderCtx, subReaderCtx);
final List<Fields> subFields = new ArrayList<Fields>(); final List<Fields> subFields = new ArrayList<Fields>();
final List<ReaderUtil.Slice> fieldSlices = new ArrayList<ReaderUtil.Slice>();
for (int i = 0; i < subReaders.length; i++) { for (int i = 0; i < subReaders.length; i++) {
starts[i] = maxDoc; starts[i] = maxDoc;
subReaderCtx[i] = new AtomicReaderContext(topLevelReaderContext, subReaders[i], i, maxDoc, i, maxDoc);
maxDoc += subReaders[i].maxDoc(); // compute maxDocs maxDoc += subReaders[i].maxDoc(); // compute maxDocs
if (subReaders[i].hasDeletions()) { if (subReaders[i].hasDeletions()) {
hasDeletions = true; hasDeletions = true;
} }
final ReaderUtil.Slice slice = new ReaderUtil.Slice(starts[i], subReaders[i].maxDoc(), i);
subReaderToSlice.put(subReaders[i], slice);
final Fields f = subReaders[i].fields(); final Fields f = subReaders[i].fields();
if (f != null) { if (f != null) {
subFields.add(f); subFields.add(f);
fieldSlices.add(slice);
} }
} }
starts[subReaders.length] = maxDoc; starts[subReaders.length] = maxDoc;
@ -844,18 +840,18 @@ class DirectoryReader extends IndexReader implements Cloneable {
fieldSet.addAll(names); fieldSet.addAll(names);
} }
return fieldSet; return fieldSet;
} }
@Override
public ReaderContext getTopReaderContext() {
return topLevelReaderContext;
}
@Override @Override
public IndexReader[] getSequentialSubReaders() { public IndexReader[] getSequentialSubReaders() {
return subReaders; return subReaders;
} }
@Override
public int getSubReaderDocBase(IndexReader subReader) {
return subReaderToSlice.get(subReader).start;
}
/** Returns the directory this index resides in. */ /** Returns the directory this index resides in. */
@Override @Override
public Directory directory() { public Directory directory() {

View File

@ -19,6 +19,7 @@ package org.apache.lucene.index;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
@ -417,6 +418,11 @@ public class FilterIndexReader extends IndexReader {
public IndexReader[] getSequentialSubReaders() { public IndexReader[] getSequentialSubReaders() {
return in.getSequentialSubReaders(); return in.getSequentialSubReaders();
} }
@Override
public ReaderContext getTopReaderContext() {
return in.getTopReaderContext();
}
@Override @Override
public Fields fields() throws IOException { public Fields fields() throws IOException {

View File

@ -1126,7 +1126,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
if (docs == null) return 0; if (docs == null) return 0;
int n = 0; int n = 0;
int doc; int doc;
while ((doc = docs.nextDoc()) != docs.NO_MORE_DOCS) { while ((doc = docs.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
deleteDocument(doc); deleteDocument(doc);
n++; n++;
} }
@ -1356,9 +1356,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
} }
/** Expert: returns the sequential sub readers that this /** Expert: returns the sequential sub readers that this
* reader is logically composed of. For example, * reader is logically composed of. If this reader is not composed
* IndexSearcher uses this API to drive searching by one
* sub reader at a time. If this reader is not composed
* of sequential child readers, it should return null. * of sequential child readers, it should return null.
* If this method returns an empty array, that means this * If this method returns an empty array, that means this
* reader is a null reader (for example a MultiReader * reader is a null reader (for example a MultiReader
@ -1373,12 +1371,33 @@ public abstract class IndexReader implements Cloneable,Closeable {
public IndexReader[] getSequentialSubReaders() { public IndexReader[] getSequentialSubReaders() {
return null; return null;
} }
/**
/** Expert: returns the docID base for this subReader. */ * Expert: Returns a the root {@link ReaderContext} for this
public int getSubReaderDocBase(IndexReader subReader) { * {@link IndexReader}'s sub-reader tree. Iff this reader is composed of sub
throw new UnsupportedOperationException(); * readers ,ie. this reader being a composite reader, this method returns a
} * {@link CompositeReaderContext} holding the reader's direct children as well as a
* view of the reader tree's atomic leaf contexts. All sub-
* {@link ReaderContext} instances referenced from this readers top-level
* context are private to this reader and are not shared with another context
* tree. For example, IndexSearcher uses this API to drive searching by one
* atomic leaf reader at a time. If this reader is not composed of child
* readers, this method returns an {@link AtomicReaderContext}.
* <p>
* Note: Any of the sub-{@link CompositeReaderContext} instances reference from this
* top-level context holds a <code>null</code> {@link CompositeReaderContext#leaves}
* reference. Only the top-level context maintains the convenience leaf-view
* for performance reasons.
* <p>
* NOTE: You should not try using sub-readers returned by this method to make
* any changes (setNorm, deleteDocument, etc.). While this might succeed for
* one composite reader (like MultiReader), it will most likely lead to index
* corruption for other readers (like DirectoryReader obtained through
* {@link #open}. Use the top-level context's reader directly.
*
* @lucene.experimental
*/
public abstract ReaderContext getTopReaderContext();
/** Expert */ /** Expert */
public Object getCoreCacheKey() { public Object getCoreCacheKey() {
@ -1431,4 +1450,137 @@ public abstract class IndexReader implements Cloneable,Closeable {
Fields retrieveFields() { Fields retrieveFields() {
return fields; return fields;
} }
/**
* A struct like class that represents a hierarchical relationship between
* {@link IndexReader} instances.
* @lucene.experimental
*/
public static abstract class ReaderContext {
/** The reader context for this reader's immediate parent, or null if none */
public final ReaderContext parent;
/** The actual reader */
public final IndexReader reader;
/** <code>true</code> iff the reader is an atomic reader */
public final boolean isAtomic;
/** <code>true</code> if this context struct represents the top level reader within the hierarchical context */
public final boolean isTopLevel;
/** the doc base for this reader in the parent, <tt>0</tt> if parent is null */
public final int docBaseInParent;
/** the ord for this reader in the parent, <tt>0</tt> if parent is null */
public final int ordInParent;
ReaderContext(ReaderContext parent, IndexReader reader,
boolean isAtomic, boolean isTopLevel, int ordInParent, int docBaseInParent) {
this.parent = parent;
this.reader = reader;
this.isAtomic = isAtomic;
this.docBaseInParent = docBaseInParent;
this.ordInParent = ordInParent;
this.isTopLevel = isTopLevel;
}
/**
* Returns the context's leaves if this context is a top-level context
* otherwise <code>null</code>.
* <p>
* Note: this is convenience method since leaves can always be obtained by
* walking the context tree.
*/
public AtomicReaderContext[] leaves() {
return null;
}
/**
* Returns the context's children iff this context is a composite context
* otherwise <code>null</code>.
* <p>
* Note: this method is a convenience method to prevent
* <code>instanceof</code> checks and type-casts to
* {@link CompositeReaderContext}.
*/
public ReaderContext[] children() {
return null;
}
}
/**
* {@link ReaderContext} for composite {@link IndexReader} instance.
* @lucene.experimental
*/
public static final class CompositeReaderContext extends ReaderContext {
/** the composite readers immediate children */
public final ReaderContext[] children;
/** the composite readers leaf reader contexts if this is the top level reader in this context */
public final AtomicReaderContext[] leaves;
/**
* Creates a {@link CompositeReaderContext} for intermediate readers that aren't
* not top-level readers in the current context
*/
public CompositeReaderContext(ReaderContext parent, IndexReader reader,
int ordInParent, int docbaseInParent, ReaderContext[] children) {
this(parent, reader, ordInParent, docbaseInParent, children, null);
}
/**
* Creates a {@link CompositeReaderContext} for top-level readers with parent set to <code>null</code>
*/
public CompositeReaderContext(IndexReader reader, ReaderContext[] children, AtomicReaderContext[] leaves) {
this(null, reader, 0, 0, children, leaves);
}
private CompositeReaderContext(ReaderContext parent, IndexReader reader,
int ordInParent, int docbaseInParent, ReaderContext[] children,
AtomicReaderContext[] leaves) {
super(parent, reader, false, leaves != null, ordInParent, docbaseInParent);
this.children = children;
this.leaves = leaves;
}
@Override
public AtomicReaderContext[] leaves() {
return leaves;
}
@Override
public ReaderContext[] children() {
return children;
}
}
/**
* {@link ReaderContext} for atomic {@link IndexReader} instances
* @lucene.experimental
*/
public static final class AtomicReaderContext extends ReaderContext {
/** The readers ord in the top-level's leaves array */
public final int ord;
/** The readers absolute doc base */
public final int docBase;
/**
* Creates a new {@link AtomicReaderContext}
*/
public AtomicReaderContext(ReaderContext parent, IndexReader reader,
int ord, int docBase, int leafOrd, int leafDocBase) {
this(parent, reader, ord, docBase, leafOrd, leafDocBase, false);
}
private AtomicReaderContext(ReaderContext parent, IndexReader reader,
int ord, int docBase, int leafOrd, int leafDocBase, boolean topLevel) {
super(parent, reader, true, topLevel, ord, docBase);
assert reader.getSequentialSubReaders() == null : "Atomic readers must not have subreaders";
this.ord = leafOrd;
this.docBase = leafDocBase;
}
/**
* Creates a new {@link AtomicReaderContext} for a atomic reader without an immediate
* parent.
*/
public AtomicReaderContext(IndexReader atomicReader) {
this(null, atomicReader, 0, 0, 0, 0, true); // toplevel!!
}
}
} }

View File

@ -33,8 +33,8 @@ import org.apache.lucene.util.ReaderUtil;
* their content. */ * their content. */
public class MultiReader extends IndexReader implements Cloneable { public class MultiReader extends IndexReader implements Cloneable {
protected IndexReader[] subReaders; protected IndexReader[] subReaders;
private final ReaderContext topLevelContext;
private int[] starts; // 1st docno for each segment private int[] starts; // 1st docno for each segment
private final Map<IndexReader,ReaderUtil.Slice> subReaderToSlice = new HashMap<IndexReader,ReaderUtil.Slice>();
private boolean[] decrefOnClose; // remember which subreaders to decRef on close private boolean[] decrefOnClose; // remember which subreaders to decRef on close
private int maxDoc = 0; private int maxDoc = 0;
private int numDocs = -1; private int numDocs = -1;
@ -48,7 +48,7 @@ public class MultiReader extends IndexReader implements Cloneable {
* @param subReaders set of (sub)readers * @param subReaders set of (sub)readers
*/ */
public MultiReader(IndexReader... subReaders) throws IOException { public MultiReader(IndexReader... subReaders) throws IOException {
initialize(subReaders, true); topLevelContext = initialize(subReaders, true);
} }
/** /**
@ -60,14 +60,13 @@ public class MultiReader extends IndexReader implements Cloneable {
* @param subReaders set of (sub)readers * @param subReaders set of (sub)readers
*/ */
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) throws IOException { public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) throws IOException {
initialize(subReaders, closeSubReaders); topLevelContext = initialize(subReaders, closeSubReaders);
} }
private void initialize(IndexReader[] subReaders, boolean closeSubReaders) throws IOException { private ReaderContext initialize(IndexReader[] subReaders, boolean closeSubReaders) throws IOException {
this.subReaders = subReaders.clone(); this.subReaders = subReaders.clone();
starts = new int[subReaders.length + 1]; // build starts array starts = new int[subReaders.length + 1]; // build starts array
decrefOnClose = new boolean[subReaders.length]; decrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) { for (int i = 0; i < subReaders.length; i++) {
starts[i] = maxDoc; starts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc(); // compute maxDocs maxDoc += subReaders[i].maxDoc(); // compute maxDocs
@ -82,14 +81,9 @@ public class MultiReader extends IndexReader implements Cloneable {
if (subReaders[i].hasDeletions()) { if (subReaders[i].hasDeletions()) {
hasDeletions = true; hasDeletions = true;
} }
final ReaderUtil.Slice slice = new ReaderUtil.Slice(starts[i],
subReaders[i].maxDoc(),
i);
subReaderToSlice.put(subReaders[i], slice);
} }
starts[subReaders.length] = maxDoc; starts[subReaders.length] = maxDoc;
return ReaderUtil.buildReaderContext(this);
} }
@Override @Override
@ -97,11 +91,6 @@ public class MultiReader extends IndexReader implements Cloneable {
throw new UnsupportedOperationException(""); throw new UnsupportedOperationException("");
} }
@Override
public int getSubReaderDocBase(IndexReader subReader) {
return subReaderToSlice.get(subReader).start;
}
@Override @Override
public Fields fields() throws IOException { public Fields fields() throws IOException {
throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields"); throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
@ -403,4 +392,8 @@ public class MultiReader extends IndexReader implements Cloneable {
public IndexReader[] getSequentialSubReaders() { public IndexReader[] getSequentialSubReaders() {
return subReaders; return subReaders;
} }
public ReaderContext getTopReaderContext() {
return topLevelContext;
}
} }

View File

@ -21,7 +21,9 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.FieldSelectorResult; import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.document.Fieldable; import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -55,7 +57,7 @@ public class ParallelReader extends IndexReader {
private Map<IndexReader,Collection<String>> readerToFields = new HashMap<IndexReader,Collection<String>>(); private Map<IndexReader,Collection<String>> readerToFields = new HashMap<IndexReader,Collection<String>>();
private List<IndexReader> storedFieldReaders = new ArrayList<IndexReader>(); private List<IndexReader> storedFieldReaders = new ArrayList<IndexReader>();
private Map<String,byte[]> normsCache = new HashMap<String,byte[]>(); private Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
private final ReaderContext topLevelReaderContext = new AtomicReaderContext(this);
private int maxDoc; private int maxDoc;
private int numDocs; private int numDocs;
private boolean hasDeletions; private boolean hasDeletions;
@ -90,7 +92,7 @@ public class ParallelReader extends IndexReader {
buffer.append(')'); buffer.append(')');
return buffer.toString(); return buffer.toString();
} }
/** Add an IndexReader. /** Add an IndexReader.
* @throws IOException if there is a low-level IO error * @throws IOException if there is a low-level IO error
*/ */
@ -559,6 +561,11 @@ public class ParallelReader extends IndexReader {
} }
return fieldSet; return fieldSet;
} }
@Override
public ReaderContext getTopReaderContext() {
return topLevelReaderContext;
}
} }

View File

@ -51,7 +51,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
private SegmentInfo si; private SegmentInfo si;
private int readBufferSize; private int readBufferSize;
private final ReaderContext readerContext = new AtomicReaderContext(this);
CloseableThreadLocal<FieldsReader> fieldsReaderLocal = new FieldsReaderLocal(); CloseableThreadLocal<FieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>(); CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
@ -1183,6 +1183,11 @@ public class SegmentReader extends IndexReader implements Cloneable {
buffer.append(si.toString(core.dir, pendingDeleteCount)); buffer.append(si.toString(core.dir, pendingDeleteCount));
return buffer.toString(); return buffer.toString();
} }
@Override
public ReaderContext getTopReaderContext() {
return readerContext;
}
/** /**
* Return the name of the segment this reader is reading. * Return the name of the segment this reader is reading.

View File

@ -30,6 +30,7 @@ import org.apache.lucene.util.ReaderUtil; // javadoc
import org.apache.lucene.index.DirectoryReader; // javadoc import org.apache.lucene.index.DirectoryReader; // javadoc
import org.apache.lucene.index.MultiReader; // javadoc import org.apache.lucene.index.MultiReader; // javadoc
import org.apache.lucene.index.IndexReader.ReaderContext;
/** /**
* This class forces a composite reader (eg a {@link * This class forces a composite reader (eg a {@link
@ -55,10 +56,12 @@ import org.apache.lucene.index.MultiReader; // javadoc
public final class SlowMultiReaderWrapper extends FilterIndexReader { public final class SlowMultiReaderWrapper extends FilterIndexReader {
private final ReaderContext readerContext;
private final Map<String,byte[]> normsCache = new HashMap<String,byte[]>(); private final Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
public SlowMultiReaderWrapper(IndexReader other) { public SlowMultiReaderWrapper(IndexReader other) {
super(other); super(other);
readerContext = new AtomicReaderContext(this); // emulate atomic reader!
} }
@Override @Override
@ -103,6 +106,11 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
} }
} }
@Override
public ReaderContext getTopReaderContext() {
return readerContext;
}
@Override @Override
protected void doSetNorm(int n, String field, byte value) protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException { throws CorruptIndexException, IOException {

View File

@ -18,6 +18,7 @@ package org.apache.lucene.search;
*/ */
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause.Occur;
@ -223,7 +224,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) public Explanation explain(ReaderContext context, int doc)
throws IOException { throws IOException {
final int minShouldMatch = final int minShouldMatch =
BooleanQuery.this.getMinimumNumberShouldMatch(); BooleanQuery.this.getMinimumNumberShouldMatch();
@ -237,7 +238,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
for (Iterator<Weight> wIter = weights.iterator(); wIter.hasNext();) { for (Iterator<Weight> wIter = weights.iterator(); wIter.hasNext();) {
Weight w = wIter.next(); Weight w = wIter.next();
BooleanClause c = cIter.next(); BooleanClause c = cIter.next();
if (w.scorer(reader, true, true) == null) { if (w.scorer(context, true, true) == null) {
if (c.isRequired()) { if (c.isRequired()) {
fail = true; fail = true;
Explanation r = new Explanation(0.0f, "no match on required clause (" + c.getQuery().toString() + ")"); Explanation r = new Explanation(0.0f, "no match on required clause (" + c.getQuery().toString() + ")");
@ -245,7 +246,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
} }
continue; continue;
} }
Explanation e = w.explain(reader, doc); Explanation e = w.explain(context, doc);
if (e.isMatch()) { if (e.isMatch()) {
if (!c.isProhibited()) { if (!c.isProhibited()) {
sumExpl.addDetail(e); sumExpl.addDetail(e);
@ -299,7 +300,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer)
throws IOException { throws IOException {
List<Scorer> required = new ArrayList<Scorer>(); List<Scorer> required = new ArrayList<Scorer>();
List<Scorer> prohibited = new ArrayList<Scorer>(); List<Scorer> prohibited = new ArrayList<Scorer>();
@ -307,7 +308,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
Iterator<BooleanClause> cIter = clauses.iterator(); Iterator<BooleanClause> cIter = clauses.iterator();
for (Weight w : weights) { for (Weight w : weights) {
BooleanClause c = cIter.next(); BooleanClause c = cIter.next();
Scorer subScorer = w.scorer(reader, true, false); Scorer subScorer = w.scorer(context, true, false);
if (subScorer == null) { if (subScorer == null) {
if (c.isRequired()) { if (c.isRequired()) {
return null; return null;

View File

@ -17,6 +17,7 @@ package org.apache.lucene.search;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import java.io.IOException; import java.io.IOException;
@ -60,8 +61,8 @@ public class CachingSpanFilter extends SpanFilter {
} }
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
SpanFilterResult result = getCachedResult(reader); SpanFilterResult result = getCachedResult(context.reader);
return result != null ? result.getDocIdSet() : null; return result != null ? result.getDocIdSet() : null;
} }

View File

@ -23,6 +23,7 @@ import java.util.Map;
import java.util.WeakHashMap; import java.util.WeakHashMap;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.util.OpenBitSetDISI; import org.apache.lucene.util.OpenBitSetDISI;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
@ -37,6 +38,9 @@ import org.apache.lucene.util.Bits;
* {@link DeletesMode#DYNAMIC}). * {@link DeletesMode#DYNAMIC}).
*/ */
public class CachingWrapperFilter extends Filter { public class CachingWrapperFilter extends Filter {
// TODO: make this filter aware of ReaderContext. a cached filter could
// specify the actual readers key or something similar to indicate on which
// level of the readers hierarchy it should be cached.
Filter filter; Filter filter;
/** /**
@ -191,8 +195,8 @@ public class CachingWrapperFilter extends Filter {
int hitCount, missCount; int hitCount, missCount;
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final IndexReader reader = context.reader;
final Object coreKey = reader.getCoreCacheKey(); final Object coreKey = reader.getCoreCacheKey();
final Object delCoreKey = reader.hasDeletions() ? reader.getDeletedDocs() : coreKey; final Object delCoreKey = reader.hasDeletions() ? reader.getDeletedDocs() : coreKey;
@ -205,7 +209,7 @@ public class CachingWrapperFilter extends Filter {
missCount++; missCount++;
// cache miss // cache miss
docIdSet = docIdSetToCache(filter.getDocIdSet(reader), reader); docIdSet = docIdSetToCache(filter.getDocIdSet(context), reader);
if (docIdSet != null) { if (docIdSet != null) {
cache.put(coreKey, delCoreKey, docIdSet); cache.put(coreKey, delCoreKey, docIdSet);

View File

@ -18,6 +18,7 @@ package org.apache.lucene.search;
*/ */
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.ToStringUtils;
@ -132,18 +133,18 @@ public class ConstantScoreQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
final DocIdSetIterator disi; final DocIdSetIterator disi;
if (filter != null) { if (filter != null) {
assert query == null; assert query == null;
final DocIdSet dis = filter.getDocIdSet(reader); final DocIdSet dis = filter.getDocIdSet(context);
if (dis == null) if (dis == null)
return null; return null;
disi = dis.iterator(); disi = dis.iterator();
} else { } else {
assert query != null && innerWeight != null; assert query != null && innerWeight != null;
disi = disi =
innerWeight.scorer(reader, scoreDocsInOrder, topScorer); innerWeight.scorer(context, scoreDocsInOrder, topScorer);
} }
if (disi == null) if (disi == null)
return null; return null;
@ -156,8 +157,8 @@ public class ConstantScoreQuery extends Query {
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(ReaderContext context, int doc) throws IOException {
final Scorer cs = scorer(reader, true, false); final Scorer cs = scorer(context, true, false);
final boolean exists = (cs != null && cs.advance(doc) == doc); final boolean exists = (cs != null && cs.advance(doc) == doc);
final ComplexExplanation result = new ComplexExplanation(); final ComplexExplanation result = new ComplexExplanation();

View File

@ -23,6 +23,7 @@ import java.util.Iterator;
import java.util.Set; import java.util.Set;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
/** /**
@ -141,12 +142,12 @@ public class DisjunctionMaxQuery extends Query implements Iterable<Query> {
/* Create the scorer used to score our associated DisjunctionMaxQuery */ /* Create the scorer used to score our associated DisjunctionMaxQuery */
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder,
boolean topScorer) throws IOException { boolean topScorer) throws IOException {
Scorer[] scorers = new Scorer[weights.size()]; Scorer[] scorers = new Scorer[weights.size()];
int idx = 0; int idx = 0;
for (Weight w : weights) { for (Weight w : weights) {
Scorer subScorer = w.scorer(reader, true, false); Scorer subScorer = w.scorer(context, true, false);
if (subScorer != null && subScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { if (subScorer != null && subScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
scorers[idx++] = subScorer; scorers[idx++] = subScorer;
} }
@ -158,13 +159,13 @@ public class DisjunctionMaxQuery extends Query implements Iterable<Query> {
/* Explain the score we computed for doc */ /* Explain the score we computed for doc */
@Override @Override
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(ReaderContext context, int doc) throws IOException {
if (disjuncts.size() == 1) return weights.get(0).explain(reader,doc); if (disjuncts.size() == 1) return weights.get(0).explain(context,doc);
ComplexExplanation result = new ComplexExplanation(); ComplexExplanation result = new ComplexExplanation();
float max = 0.0f, sum = 0.0f; float max = 0.0f, sum = 0.0f;
result.setDescription(tieBreakerMultiplier == 0.0f ? "max of:" : "max plus " + tieBreakerMultiplier + " times others of:"); result.setDescription(tieBreakerMultiplier == 0.0f ? "max of:" : "max plus " + tieBreakerMultiplier + " times others of:");
for (Weight wt : weights) { for (Weight wt : weights) {
Explanation e = wt.explain(reader, doc); Explanation e = wt.explain(context, doc);
if (e.isMatch()) { if (e.isMatch()) {
result.setMatch(Boolean.TRUE); result.setMatch(Boolean.TRUE);
result.addDetail(e); result.addDetail(e);

View File

@ -19,6 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.MultiFields;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
@ -73,7 +74,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
/** This method is implemented for each data type */ /** This method is implemented for each data type */
@Override @Override
public abstract DocIdSet getDocIdSet(IndexReader reader) throws IOException; public abstract DocIdSet getDocIdSet(ReaderContext context) throws IOException;
/** /**
* Creates a string range filter using {@link FieldCache#getTermsIndex}. This works with all * Creates a string range filter using {@link FieldCache#getTermsIndex}. This works with all
@ -83,8 +84,8 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
public static FieldCacheRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) { public static FieldCacheRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) { return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final FieldCache.DocTermsIndex fcsi = FieldCache.DEFAULT.getTermsIndex(reader, field); final FieldCache.DocTermsIndex fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader, field);
final BytesRef spare = new BytesRef(); final BytesRef spare = new BytesRef();
final int lowerPoint = fcsi.binarySearchLookup(lowerVal == null ? null : new BytesRef(lowerVal), spare); final int lowerPoint = fcsi.binarySearchLookup(lowerVal == null ? null : new BytesRef(lowerVal), spare);
final int upperPoint = fcsi.binarySearchLookup(upperVal == null ? null : new BytesRef(upperVal), spare); final int upperPoint = fcsi.binarySearchLookup(upperVal == null ? null : new BytesRef(upperVal), spare);
@ -124,7 +125,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
// for this DocIdSet, we can ignore deleted docs // for this DocIdSet, we can ignore deleted docs
// because deleted docs have an order of 0 (null entry in StringIndex) // because deleted docs have an order of 0 (null entry in StringIndex)
return new FieldCacheDocIdSet(reader, true) { return new FieldCacheDocIdSet(context.reader, true) {
@Override @Override
final boolean matchDoc(int doc) { final boolean matchDoc(int doc) {
final int docOrd = fcsi.getOrd(doc); final int docOrd = fcsi.getOrd(doc);
@ -152,7 +153,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
public static FieldCacheRangeFilter<Byte> newByteRange(String field, FieldCache.ByteParser parser, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) { public static FieldCacheRangeFilter<Byte> newByteRange(String field, FieldCache.ByteParser parser, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Byte>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { return new FieldCacheRangeFilter<Byte>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final byte inclusiveLowerPoint, inclusiveUpperPoint; final byte inclusiveLowerPoint, inclusiveUpperPoint;
if (lowerVal != null) { if (lowerVal != null) {
final byte i = lowerVal.byteValue(); final byte i = lowerVal.byteValue();
@ -174,9 +175,9 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (inclusiveLowerPoint > inclusiveUpperPoint) if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET; return DocIdSet.EMPTY_DOCIDSET;
final byte[] values = FieldCache.DEFAULT.getBytes(reader, field, (FieldCache.ByteParser) parser); final byte[] values = FieldCache.DEFAULT.getBytes(context.reader, field, (FieldCache.ByteParser) parser);
// we only respect deleted docs if the range contains 0 // we only respect deleted docs if the range contains 0
return new FieldCacheDocIdSet(reader, !(inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) { return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) {
@Override @Override
boolean matchDoc(int doc) { boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
@ -203,7 +204,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
public static FieldCacheRangeFilter<Short> newShortRange(String field, FieldCache.ShortParser parser, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) { public static FieldCacheRangeFilter<Short> newShortRange(String field, FieldCache.ShortParser parser, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Short>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { return new FieldCacheRangeFilter<Short>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final short inclusiveLowerPoint, inclusiveUpperPoint; final short inclusiveLowerPoint, inclusiveUpperPoint;
if (lowerVal != null) { if (lowerVal != null) {
short i = lowerVal.shortValue(); short i = lowerVal.shortValue();
@ -225,9 +226,9 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (inclusiveLowerPoint > inclusiveUpperPoint) if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET; return DocIdSet.EMPTY_DOCIDSET;
final short[] values = FieldCache.DEFAULT.getShorts(reader, field, (FieldCache.ShortParser) parser); final short[] values = FieldCache.DEFAULT.getShorts(context.reader, field, (FieldCache.ShortParser) parser);
// ignore deleted docs if range doesn't contain 0 // ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(reader, !(inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) { return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) {
@Override @Override
boolean matchDoc(int doc) { boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
@ -254,7 +255,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
public static FieldCacheRangeFilter<Integer> newIntRange(String field, FieldCache.IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) { public static FieldCacheRangeFilter<Integer> newIntRange(String field, FieldCache.IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Integer>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { return new FieldCacheRangeFilter<Integer>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final int inclusiveLowerPoint, inclusiveUpperPoint; final int inclusiveLowerPoint, inclusiveUpperPoint;
if (lowerVal != null) { if (lowerVal != null) {
int i = lowerVal.intValue(); int i = lowerVal.intValue();
@ -276,9 +277,9 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (inclusiveLowerPoint > inclusiveUpperPoint) if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET; return DocIdSet.EMPTY_DOCIDSET;
final int[] values = FieldCache.DEFAULT.getInts(reader, field, (FieldCache.IntParser) parser); final int[] values = FieldCache.DEFAULT.getInts(context.reader, field, (FieldCache.IntParser) parser);
// ignore deleted docs if range doesn't contain 0 // ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(reader, !(inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) { return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) {
@Override @Override
boolean matchDoc(int doc) { boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
@ -305,7 +306,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
public static FieldCacheRangeFilter<Long> newLongRange(String field, FieldCache.LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) { public static FieldCacheRangeFilter<Long> newLongRange(String field, FieldCache.LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Long>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { return new FieldCacheRangeFilter<Long>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final long inclusiveLowerPoint, inclusiveUpperPoint; final long inclusiveLowerPoint, inclusiveUpperPoint;
if (lowerVal != null) { if (lowerVal != null) {
long i = lowerVal.longValue(); long i = lowerVal.longValue();
@ -327,9 +328,9 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (inclusiveLowerPoint > inclusiveUpperPoint) if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET; return DocIdSet.EMPTY_DOCIDSET;
final long[] values = FieldCache.DEFAULT.getLongs(reader, field, (FieldCache.LongParser) parser); final long[] values = FieldCache.DEFAULT.getLongs(context.reader, field, (FieldCache.LongParser) parser);
// ignore deleted docs if range doesn't contain 0 // ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(reader, !(inclusiveLowerPoint <= 0L && inclusiveUpperPoint >= 0L)) { return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0L && inclusiveUpperPoint >= 0L)) {
@Override @Override
boolean matchDoc(int doc) { boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
@ -356,7 +357,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
public static FieldCacheRangeFilter<Float> newFloatRange(String field, FieldCache.FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) { public static FieldCacheRangeFilter<Float> newFloatRange(String field, FieldCache.FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Float>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { return new FieldCacheRangeFilter<Float>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
// we transform the floating point numbers to sortable integers // we transform the floating point numbers to sortable integers
// using NumericUtils to easier find the next bigger/lower value // using NumericUtils to easier find the next bigger/lower value
final float inclusiveLowerPoint, inclusiveUpperPoint; final float inclusiveLowerPoint, inclusiveUpperPoint;
@ -382,9 +383,9 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (inclusiveLowerPoint > inclusiveUpperPoint) if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET; return DocIdSet.EMPTY_DOCIDSET;
final float[] values = FieldCache.DEFAULT.getFloats(reader, field, (FieldCache.FloatParser) parser); final float[] values = FieldCache.DEFAULT.getFloats(context.reader, field, (FieldCache.FloatParser) parser);
// ignore deleted docs if range doesn't contain 0 // ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(reader, !(inclusiveLowerPoint <= 0.0f && inclusiveUpperPoint >= 0.0f)) { return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0.0f && inclusiveUpperPoint >= 0.0f)) {
@Override @Override
boolean matchDoc(int doc) { boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
@ -411,7 +412,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
public static FieldCacheRangeFilter<Double> newDoubleRange(String field, FieldCache.DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) { public static FieldCacheRangeFilter<Double> newDoubleRange(String field, FieldCache.DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
return new FieldCacheRangeFilter<Double>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { return new FieldCacheRangeFilter<Double>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
// we transform the floating point numbers to sortable integers // we transform the floating point numbers to sortable integers
// using NumericUtils to easier find the next bigger/lower value // using NumericUtils to easier find the next bigger/lower value
final double inclusiveLowerPoint, inclusiveUpperPoint; final double inclusiveLowerPoint, inclusiveUpperPoint;
@ -437,9 +438,9 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (inclusiveLowerPoint > inclusiveUpperPoint) if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET; return DocIdSet.EMPTY_DOCIDSET;
final double[] values = FieldCache.DEFAULT.getDoubles(reader, field, (FieldCache.DoubleParser) parser); final double[] values = FieldCache.DEFAULT.getDoubles(context.reader, field, (FieldCache.DoubleParser) parser);
// ignore deleted docs if range doesn't contain 0 // ignore deleted docs if range doesn't contain 0
return new FieldCacheDocIdSet(reader, !(inclusiveLowerPoint <= 0.0 && inclusiveUpperPoint >= 0.0)) { return new FieldCacheDocIdSet(context.reader, !(inclusiveLowerPoint <= 0.0 && inclusiveUpperPoint >= 0.0)) {
@Override @Override
boolean matchDoc(int doc) { boolean matchDoc(int doc) {
return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import org.apache.lucene.index.DocsEnum; // javadoc @link import org.apache.lucene.index.DocsEnum; // javadoc @link
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -115,8 +116,8 @@ public class FieldCacheTermsFilter extends Filter {
} }
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
return new FieldCacheTermsFilterDocIdSet(getFieldCache().getTermsIndex(reader, field)); return new FieldCacheTermsFilterDocIdSet(getFieldCache().getTermsIndex(context.reader, field));
} }
protected class FieldCacheTermsFilterDocIdSet extends DocIdSet { protected class FieldCacheTermsFilterDocIdSet extends DocIdSet {

View File

@ -19,7 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.util.DocIdBitSet; import org.apache.lucene.util.DocIdBitSet;
/** /**
@ -38,10 +38,13 @@ public abstract class Filter implements java.io.Serializable {
* must refer to document IDs for that segment, not for * must refer to document IDs for that segment, not for
* the top-level reader. * the top-level reader.
* *
* @param reader a {@link IndexReader} instance opened on the index currently * @param context a {@link ReaderContext} instance opened on the index currently
* searched on. The provided reader is always an * searched on. Note, it is likely that the provided reader info does not
* atomic reader, so you can call reader.fields() * represent the whole underlying index i.e. if the index has more than
* or reader.getDeletedDocs(), for example. * one segment the given reader only represents a single segment.
* The provided context is always an atomic context, so you can call
* {@link IndexReader#fields()} or {@link IndexReader#getDeletedDocs()}
* on the context's reader, for example.
* *
* @return a DocIdSet that provides the documents which should be permitted or * @return a DocIdSet that provides the documents which should be permitted or
* prohibited in search results. <b>NOTE:</b> null can be returned if * prohibited in search results. <b>NOTE:</b> null can be returned if
@ -49,5 +52,6 @@ public abstract class Filter implements java.io.Serializable {
* *
* @see DocIdBitSet * @see DocIdBitSet
*/ */
public abstract DocIdSet getDocIdSet(IndexReader reader) throws IOException; // TODO make this context an AtomicContext
public abstract DocIdSet getDocIdSet(ReaderContext context) throws IOException;
} }

View File

@ -18,6 +18,7 @@ package org.apache.lucene.search;
*/ */
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.ToStringUtils;
@ -81,7 +82,7 @@ extends Query {
} }
@Override @Override
public Explanation explain (IndexReader ir, int i) throws IOException { public Explanation explain (ReaderContext ir, int i) throws IOException {
Explanation inner = weight.explain (ir, i); Explanation inner = weight.explain (ir, i);
if (getBoost()!=1) { if (getBoost()!=1) {
Explanation preBoost = inner; Explanation preBoost = inner;
@ -111,7 +112,7 @@ extends Query {
// return a filtering scorer // return a filtering scorer
@Override @Override
public Scorer scorer(IndexReader indexReader, boolean scoreDocsInOrder, boolean topScorer) public Scorer scorer(ReaderContext indexReader, boolean scoreDocsInOrder, boolean topScorer)
throws IOException { throws IOException {
final Scorer scorer = weight.scorer(indexReader, true, false); final Scorer scorer = weight.scorer(indexReader, true, false);
if (scorer == null) { if (scorer == null) {

View File

@ -18,9 +18,7 @@ package org.apache.lucene.search;
*/ */
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException; import java.util.NoSuchElementException;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService; import java.util.concurrent.CompletionService;
@ -35,6 +33,8 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.store.NIOFSDirectory; // javadoc import org.apache.lucene.store.NIOFSDirectory; // javadoc
@ -57,14 +57,15 @@ import org.apache.lucene.util.ThreadInterruptedException;
* use your own (non-Lucene) objects instead.</p> * use your own (non-Lucene) objects instead.</p>
*/ */
public class IndexSearcher { public class IndexSearcher {
IndexReader reader; final IndexReader reader; // package private for testing!
private boolean closeReader; private boolean closeReader;
// NOTE: these members might change in incompatible ways // NOTE: these members might change in incompatible ways
// in the next release // in the next release
protected final IndexReader[] subReaders; protected final ReaderContext readerContext;
protected final AtomicReaderContext[] leafContexts;
protected final IndexSearcher[] subSearchers; protected final IndexSearcher[] subSearchers;
protected final int[] docStarts; // protected final int[] docStarts;
private final ExecutorService executor; private final ExecutorService executor;
/** The Similarity implementation used by this searcher. */ /** The Similarity implementation used by this searcher. */
@ -115,83 +116,73 @@ public class IndexSearcher {
this(r, false, executor); this(r, false, executor);
} }
/** Expert: directly specify the reader, subReaders and /**
* their docID starts. * Creates a searcher searching the provided top-level {@link ReaderContext}.
* <p>
* Given a non-<code>null</code> {@link ExecutorService} this method runs
* searches for each segment separately, using the provided ExecutorService.
* IndexSearcher will not shutdown/awaitTermination this ExecutorService on
* close; you must do so, eventually, on your own. NOTE: if you are using
* {@link NIOFSDirectory}, do not use the shutdownNow method of
* ExecutorService as this uses Thread.interrupt under-the-hood which can
* silently close file descriptors (see <a
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
* *
* @lucene.experimental */ * @see ReaderContext
public IndexSearcher(IndexReader reader, IndexReader[] subReaders, int[] docStarts) { * @see IndexReader#getTopReaderContext()
this.reader = reader; * @lucene.experimental
this.subReaders = subReaders; */
this.docStarts = docStarts; public IndexSearcher(ReaderContext context, ExecutorService executor) {
subSearchers = new IndexSearcher[subReaders.length]; this(context, false, executor);
for(int i=0;i<subReaders.length;i++) { }
subSearchers[i] = new IndexSearcher(subReaders[i]);
} /**
closeReader = false; * Creates a searcher searching the provided top-level {@link ReaderContext}.
executor = null; *
* @see ReaderContext
* @see IndexReader#getTopReaderContext()
* @lucene.experimental
*/
public IndexSearcher(ReaderContext context) {
this(context, null);
} }
/** Expert: directly specify the reader, subReaders and // convinience ctor for other IR based ctors
* their docID starts, and an ExecutorService. In this private IndexSearcher(IndexReader reader, boolean closeReader, ExecutorService executor) {
* case, each segment will be separately searched using the this(reader.getTopReaderContext(), closeReader, executor);
* ExecutorService. IndexSearcher will not
* shutdown/awaitTermination this ExecutorService on
* close; you must do so, eventually, on your own. NOTE:
* if you are using {@link NIOFSDirectory}, do not use
* the shutdownNow method of ExecutorService as this uses
* Thread.interrupt under-the-hood which can silently
* close file descriptors (see <a
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
*
* @lucene.experimental */
public IndexSearcher(IndexReader reader, IndexReader[] subReaders, int[] docStarts, ExecutorService executor) {
this.reader = reader;
this.subReaders = subReaders;
this.docStarts = docStarts;
subSearchers = new IndexSearcher[subReaders.length];
for(int i=0;i<subReaders.length;i++) {
subSearchers[i] = new IndexSearcher(subReaders[i]);
}
closeReader = false;
this.executor = executor;
} }
private IndexSearcher(IndexReader r, boolean closeReader, ExecutorService executor) { private IndexSearcher(ReaderContext context, boolean closeReader, ExecutorService executor) {
reader = r; // TODO: eable this assert once SolrIndexReader and friends are refactored to use ReaderContext
// We can't assert this here since SolrIndexReader will fail in some contexts - once solr is consistent we should be fine here
// Lucene instead passes all tests even with this assert!
// assert context.isTopLevel: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader;
reader = context.reader;
this.executor = executor; this.executor = executor;
this.closeReader = closeReader; this.closeReader = closeReader;
this.readerContext = context;
List<IndexReader> subReadersList = new ArrayList<IndexReader>(); if (context.isAtomic) {
gatherSubReaders(subReadersList, reader); assert context.leaves() == null : "AtomicReaderContext must not have any leaves";
subReaders = subReadersList.toArray(new IndexReader[subReadersList.size()]); this.leafContexts = new AtomicReaderContext[] { (AtomicReaderContext) context };
docStarts = new int[subReaders.length]; } else {
subSearchers = new IndexSearcher[subReaders.length]; assert context.leaves() != null : "non-atomic top-level context must have leaves";
int maxDoc = 0; this.leafContexts = context.leaves();
for (int i = 0; i < subReaders.length; i++) { }
docStarts[i] = maxDoc; subSearchers = new IndexSearcher[this.leafContexts.length];
maxDoc += subReaders[i].maxDoc(); for (int i = 0; i < subSearchers.length; i++) { // TODO do we need those IS if executor is null?
if (subReaders[i] == r) { if (leafContexts[i].reader == context.reader) {
subSearchers[i] = this; subSearchers[i] = this;
} else { } else {
subSearchers[i] = new IndexSearcher(subReaders[i]); subSearchers[i] = new IndexSearcher(leafContexts[i].reader.getTopReaderContext()); // we need to get a TL context for sub searchers!
} }
} }
} }
protected void gatherSubReaders(List<IndexReader> allSubReaders, IndexReader r) {
ReaderUtil.gatherSubReaders(allSubReaders, r);
}
/** Return the {@link IndexReader} this searches. */ /** Return the {@link IndexReader} this searches. */
public IndexReader getIndexReader() { public IndexReader getIndexReader() {
return reader; return reader;
} }
/** Returns the atomic subReaders used by this searcher. */
public IndexReader[] getSubReaders() {
return subReaders;
}
/** Expert: Returns one greater than the largest possible document number. /** Expert: Returns one greater than the largest possible document number.
* *
* @see org.apache.lucene.index.IndexReader#maxDoc() * @see org.apache.lucene.index.IndexReader#maxDoc()
@ -206,7 +197,7 @@ public class IndexSearcher {
return reader.docFreq(term); return reader.docFreq(term);
} else { } else {
final ExecutionHelper<Integer> runner = new ExecutionHelper<Integer>(executor); final ExecutionHelper<Integer> runner = new ExecutionHelper<Integer>(executor);
for(int i = 0; i < subReaders.length; i++) { for(int i = 0; i < subSearchers.length; i++) {
final IndexSearcher searchable = subSearchers[i]; final IndexSearcher searchable = subSearchers[i];
runner.submit(new Callable<Integer>() { runner.submit(new Callable<Integer>() {
public Integer call() throws IOException { public Integer call() throws IOException {
@ -369,9 +360,9 @@ public class IndexSearcher {
final Lock lock = new ReentrantLock(); final Lock lock = new ReentrantLock();
final ExecutionHelper<TopDocs> runner = new ExecutionHelper<TopDocs>(executor); final ExecutionHelper<TopDocs> runner = new ExecutionHelper<TopDocs>(executor);
for (int i = 0; i < subReaders.length; i++) { // search each sub for (int i = 0; i < subSearchers.length; i++) { // search each sub
runner.submit( runner.submit(
new MultiSearcherCallableNoSort(lock, subSearchers[i], weight, filter, nDocs, hq, docStarts[i])); new MultiSearcherCallableNoSort(lock, subSearchers[i], weight, filter, nDocs, hq, leafContexts[i].docBase));
} }
int totalHits = 0; int totalHits = 0;
@ -438,9 +429,9 @@ public class IndexSearcher {
final FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(nDocs); final FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(nDocs);
final Lock lock = new ReentrantLock(); final Lock lock = new ReentrantLock();
final ExecutionHelper<TopFieldDocs> runner = new ExecutionHelper<TopFieldDocs>(executor); final ExecutionHelper<TopFieldDocs> runner = new ExecutionHelper<TopFieldDocs>(executor);
for (int i = 0; i < subReaders.length; i++) { // search each sub for (int i = 0; i < subSearchers.length; i++) { // search each sub
runner.submit( runner.submit(
new MultiSearcherCallableWithSort(lock, subSearchers[i], weight, filter, nDocs, hq, sort, docStarts[i])); new MultiSearcherCallableWithSort(lock, subSearchers[i], weight, filter, nDocs, hq, sort, leafContexts[i].docBase));
} }
int totalHits = 0; int totalHits = 0;
float maxScore = Float.NEGATIVE_INFINITY; float maxScore = Float.NEGATIVE_INFINITY;
@ -484,27 +475,27 @@ public class IndexSearcher {
// always use single thread: // always use single thread:
if (filter == null) { if (filter == null) {
for (int i = 0; i < subReaders.length; i++) { // search each subreader for (int i = 0; i < leafContexts.length; i++) { // search each subreader
collector.setNextReader(subReaders[i], docStarts[i]); collector.setNextReader(leafContexts[i].reader, leafContexts[i].docBase);
Scorer scorer = weight.scorer(subReaders[i], !collector.acceptsDocsOutOfOrder(), true); Scorer scorer = weight.scorer(leafContexts[i], !collector.acceptsDocsOutOfOrder(), true);
if (scorer != null) { if (scorer != null) {
scorer.score(collector); scorer.score(collector);
} }
} }
} else { } else {
for (int i = 0; i < subReaders.length; i++) { // search each subreader for (int i = 0; i < leafContexts.length; i++) { // search each subreader
collector.setNextReader(subReaders[i], docStarts[i]); collector.setNextReader(leafContexts[i].reader, leafContexts[i].docBase);
searchWithFilter(subReaders[i], weight, filter, collector); searchWithFilter(leafContexts[i], weight, filter, collector);
} }
} }
} }
private void searchWithFilter(IndexReader reader, Weight weight, private void searchWithFilter(ReaderContext context, Weight weight,
final Filter filter, final Collector collector) throws IOException { final Filter filter, final Collector collector) throws IOException {
assert filter != null; assert filter != null;
Scorer scorer = weight.scorer(reader, true, false); Scorer scorer = weight.scorer(context, true, false);
if (scorer == null) { if (scorer == null) {
return; return;
} }
@ -513,7 +504,7 @@ public class IndexSearcher {
assert docID == -1 || docID == DocIdSetIterator.NO_MORE_DOCS; assert docID == -1 || docID == DocIdSetIterator.NO_MORE_DOCS;
// CHECKME: use ConjunctionScorer here? // CHECKME: use ConjunctionScorer here?
DocIdSet filterDocIdSet = filter.getDocIdSet(reader); DocIdSet filterDocIdSet = filter.getDocIdSet(context);
if (filterDocIdSet == null) { if (filterDocIdSet == null) {
// this means the filter does not accept any documents. // this means the filter does not accept any documents.
return; return;
@ -581,10 +572,10 @@ public class IndexSearcher {
* @throws BooleanQuery.TooManyClauses * @throws BooleanQuery.TooManyClauses
*/ */
protected Explanation explain(Weight weight, int doc) throws IOException { protected Explanation explain(Weight weight, int doc) throws IOException {
int n = ReaderUtil.subIndex(doc, docStarts); int n = ReaderUtil.subIndex(doc, leafContexts);
int deBasedDoc = doc - docStarts[n]; int deBasedDoc = doc - leafContexts[n].docBase;
return weight.explain(subReaders[n], deBasedDoc); return weight.explain(leafContexts[n], deBasedDoc);
} }
private boolean fieldSortDoTrackScores; private boolean fieldSortDoTrackScores;
@ -615,6 +606,14 @@ public class IndexSearcher {
return query.weight(this); return query.weight(this);
} }
/**
* Returns this searchers the top-level {@link ReaderContext}.
* @see IndexReader#getTopReaderContext()
*/
/* Sugar for .getIndexReader().getTopReaderContext() */
public ReaderContext getTopReaderContext() {
return readerContext;
}
/** /**
* A thread subclass for searching a single searchable * A thread subclass for searching a single searchable

View File

@ -18,6 +18,7 @@ package org.apache.lucene.search;
*/ */
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
@ -126,13 +127,13 @@ public class MatchAllDocsQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new MatchAllScorer(reader, similarity, this, return new MatchAllScorer(context.reader, similarity, this,
normsField != null ? reader.norms(normsField) : null); normsField != null ? context.reader.norms(normsField) : null);
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) { public Explanation explain(ReaderContext context, int doc) {
// explain query weight // explain query weight
Explanation queryExpl = new ComplexExplanation Explanation queryExpl = new ComplexExplanation
(true, getValue(), "MatchAllDocsQuery, product of:"); (true, getValue(), "MatchAllDocsQuery, product of:");

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.*; import java.util.*;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsAndPositionsEnum;
@ -167,10 +168,10 @@ public class MultiPhraseQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
if (termArrays.size() == 0) // optimize zero-term case if (termArrays.size() == 0) // optimize zero-term case
return null; return null;
final IndexReader reader = context.reader;
final Bits delDocs = reader.getDeletedDocs(); final Bits delDocs = reader.getDeletedDocs();
PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.size()]; PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.size()];
@ -219,7 +220,7 @@ public class MultiPhraseQuery extends Query {
if (slop == 0) { if (slop == 0) {
ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity, ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity,
reader.norms(field)); reader.norms(field));
if (s.noDocs) { if (s.noDocs) {
return null; return null;
} else { } else {
@ -232,7 +233,7 @@ public class MultiPhraseQuery extends Query {
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) public Explanation explain(ReaderContext context, int doc)
throws IOException { throws IOException {
ComplexExplanation result = new ComplexExplanation(); ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+"), product of:"); result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
@ -263,7 +264,7 @@ public class MultiPhraseQuery extends Query {
fieldExpl.setDescription("fieldWeight("+getQuery()+" in "+doc+ fieldExpl.setDescription("fieldWeight("+getQuery()+" in "+doc+
"), product of:"); "), product of:");
Scorer scorer = scorer(reader, true, false); Scorer scorer = scorer(context, true, false);
if (scorer == null) { if (scorer == null) {
return new Explanation(0.0f, "no matching docs"); return new Explanation(0.0f, "no matching docs");
} }
@ -283,7 +284,7 @@ public class MultiPhraseQuery extends Query {
fieldExpl.addDetail(idfExpl); fieldExpl.addDetail(idfExpl);
Explanation fieldNormExpl = new Explanation(); Explanation fieldNormExpl = new Explanation();
byte[] fieldNorms = reader.norms(field); byte[] fieldNorms = context.reader.norms(field);
float fieldNorm = float fieldNorm =
fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f; fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f;
fieldNormExpl.setValue(fieldNorm); fieldNormExpl.setValue(fieldNorm);

View File

@ -19,8 +19,9 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Fields; import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.DocsEnum;
@ -104,7 +105,8 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
* results. * results.
*/ */
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final IndexReader reader = context.reader;
final Fields fields = reader.fields(); final Fields fields = reader.fields();
if (fields == null) { if (fields == null) {
// reader has no fields // reader has no fields
@ -121,7 +123,7 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
assert termsEnum != null; assert termsEnum != null;
if (termsEnum.next() != null) { if (termsEnum.next() != null) {
// fill into a OpenBitSet // fill into a OpenBitSet
final OpenBitSet bitSet = new OpenBitSet(reader.maxDoc()); final OpenBitSet bitSet = new OpenBitSet(context.reader.maxDoc());
int termCount = 0; int termCount = 0;
final Bits delDocs = reader.getDeletedDocs(); final Bits delDocs = reader.getDeletedDocs();
DocsEnum docsEnum = null; DocsEnum docsEnum = null;

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.Set; import java.util.Set;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
@ -174,10 +175,10 @@ public class PhraseQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
if (terms.size() == 0) // optimize zero-term case if (terms.size() == 0) // optimize zero-term case
return null; return null;
final IndexReader reader = context.reader;
PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.size()]; PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.size()];
final Bits delDocs = reader.getDeletedDocs(); final Bits delDocs = reader.getDeletedDocs();
for (int i = 0; i < terms.size(); i++) { for (int i = 0; i < terms.size(); i++) {
@ -206,7 +207,7 @@ public class PhraseQuery extends Query {
if (slop == 0) { // optimize exact case if (slop == 0) { // optimize exact case
ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity, ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity,
reader.norms(field)); reader.norms(field));
if (s.noDocs) { if (s.noDocs) {
return null; return null;
} else { } else {
@ -215,12 +216,12 @@ public class PhraseQuery extends Query {
} else { } else {
return return
new SloppyPhraseScorer(this, postingsFreqs, similarity, slop, new SloppyPhraseScorer(this, postingsFreqs, similarity, slop,
reader.norms(field)); reader.norms(field));
} }
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) public Explanation explain(ReaderContext context, int doc)
throws IOException { throws IOException {
Explanation result = new Explanation(); Explanation result = new Explanation();
@ -267,7 +268,7 @@ public class PhraseQuery extends Query {
fieldExpl.setDescription("fieldWeight("+field+":"+query+" in "+doc+ fieldExpl.setDescription("fieldWeight("+field+":"+query+" in "+doc+
"), product of:"); "), product of:");
Scorer scorer = scorer(reader, true, false); Scorer scorer = scorer(context, true, false);
if (scorer == null) { if (scorer == null) {
return new Explanation(0.0f, "no matching docs"); return new Explanation(0.0f, "no matching docs");
} }
@ -287,7 +288,7 @@ public class PhraseQuery extends Query {
fieldExpl.addDetail(idfExpl); fieldExpl.addDetail(idfExpl);
Explanation fieldNormExpl = new Explanation(); Explanation fieldNormExpl = new Explanation();
byte[] fieldNorms = reader.norms(field); byte[] fieldNorms = context.reader.norms(field);
float fieldNorm = float fieldNorm =
fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f; fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f;
fieldNormExpl.setValue(fieldNorm); fieldNormExpl.setValue(fieldNorm);

View File

@ -18,9 +18,7 @@ package org.apache.lucene.search;
*/ */
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.IndexReader;
/** /**
* Constrains search results to only match those which also match a provided * Constrains search results to only match those which also match a provided
@ -48,12 +46,14 @@ public class QueryWrapperFilter extends Filter {
} }
@Override @Override
public DocIdSet getDocIdSet(final IndexReader reader) throws IOException { public DocIdSet getDocIdSet(final ReaderContext context) throws IOException {
final Weight weight = query.weight(new IndexSearcher(reader)); // get a private context that is used to rewrite, createWeight and score eventually
final ReaderContext privateContext = context.reader.getTopReaderContext();
final Weight weight = query.weight(new IndexSearcher(privateContext));
return new DocIdSet() { return new DocIdSet() {
@Override @Override
public DocIdSetIterator iterator() throws IOException { public DocIdSetIterator iterator() throws IOException {
return weight.scorer(reader, true, false); return weight.scorer(privateContext, true, false);
} }
@Override @Override
public boolean isCacheable() { return false; } public boolean isCacheable() { return false; }

View File

@ -17,6 +17,7 @@ package org.apache.lucene.search;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.Spans; import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.OpenBitSet;
@ -52,8 +53,8 @@ public class SpanQueryFilter extends SpanFilter {
} }
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
SpanFilterResult result = bitSpans(reader); SpanFilterResult result = bitSpans(context.reader);
return result.getDocIdSet(); return result.getDocIdSet();
} }

View File

@ -21,8 +21,10 @@ import java.io.IOException;
import java.util.Set; import java.util.Set;
import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Explanation.IDFExplanation; import org.apache.lucene.search.Explanation.IDFExplanation;
import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.ToStringUtils;
@ -75,7 +77,8 @@ public class TermQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
final IndexReader reader = context.reader;
DocsEnum docs = reader.termDocsEnum(reader.getDeletedDocs(), DocsEnum docs = reader.termDocsEnum(reader.getDeletedDocs(),
term.field(), term.field(),
term.bytes()); term.bytes());
@ -88,8 +91,9 @@ public class TermQuery extends Query {
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) public Explanation explain(ReaderContext context, int doc)
throws IOException { throws IOException {
final IndexReader reader = context.reader;
ComplexExplanation result = new ComplexExplanation(); ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+"), product of:"); result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");

View File

@ -21,16 +21,26 @@ import java.io.IOException;
import java.io.Serializable; import java.io.Serializable;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
/** /**
* Expert: Calculate query weights and build query scorers. * Expert: Calculate query weights and build query scorers.
* <p> * <p>
* The purpose of {@link Weight} is to ensure searching does not * The purpose of {@link Weight} is to ensure searching does not modify a
* modify a {@link Query}, so that a {@link Query} instance can be reused. <br> * {@link Query}, so that a {@link Query} instance can be reused. <br>
* {@link IndexSearcher} dependent state of the query should reside in the * {@link IndexSearcher} dependent state of the query should reside in the
* {@link Weight}. <br> * {@link Weight}. <br>
* {@link IndexReader} dependent state should reside in the {@link Scorer}. * {@link IndexReader} dependent state should reside in the {@link Scorer}.
* <p> * <p>
* Since {@link Weight} creates {@link Scorer} instances for a given
* {@link ReaderContext} ({@link #scorer(ReaderContext, boolean, boolean)})
* callers must maintain the relationship between the searcher's top-level
* {@link ReaderContext} and the context used to create a {@link Scorer}. A
* {@link ReaderContext} used to create a {@link Scorer} should be a leaf
* context ({@link AtomicReaderContext}) of the searcher's top-level context,
* otherwise the scorer's state will be undefined.
* <p>
* A <code>Weight</code> is used in the following way: * A <code>Weight</code> is used in the following way:
* <ol> * <ol>
* <li>A <code>Weight</code> is constructed by a top-level query, given a * <li>A <code>Weight</code> is constructed by a top-level query, given a
@ -41,9 +51,11 @@ import org.apache.lucene.index.IndexReader;
* query. * query.
* <li>The query normalization factor is passed to {@link #normalize(float)}. At * <li>The query normalization factor is passed to {@link #normalize(float)}. At
* this point the weighting is complete. * this point the weighting is complete.
* <li>A <code>Scorer</code> is constructed by {@link #scorer(IndexReader,boolean,boolean)}. * <li>A <code>Scorer</code> is constructed by
* {@link #scorer(ReaderContext,boolean,boolean)}.
* </ol> * </ol>
* *
*
* @since 2.9 * @since 2.9
*/ */
public abstract class Weight implements Serializable { public abstract class Weight implements Serializable {
@ -51,12 +63,12 @@ public abstract class Weight implements Serializable {
/** /**
* An explanation of the score computation for the named document. * An explanation of the score computation for the named document.
* *
* @param reader sub-reader containing the give doc * @param context the readers context to create the {@link Explanation} for.
* @param doc * @param doc the document's id relative to the given context's reader
* @return an Explanation for the score * @return an Explanation for the score
* @throws IOException * @throws IOException if an {@link IOException} occurs
*/ */
public abstract Explanation explain(IndexReader reader, int doc) throws IOException; public abstract Explanation explain(ReaderContext context, int doc) throws IOException;
/** The query that this concerns. */ /** The query that this concerns. */
public abstract Query getQuery(); public abstract Query getQuery();
@ -78,9 +90,12 @@ public abstract class Weight implements Serializable {
* in-order.<br> * in-order.<br>
* <b>NOTE:</b> null can be returned if no documents will be scored by this * <b>NOTE:</b> null can be returned if no documents will be scored by this
* query. * query.
* <b>NOTE: Calling this method with a {@link ReaderContext} that is not a
* leaf context ({@link AtomicReaderContext}) of the searcher's top-level context
* used to create this {@link Weight} instance can cause undefined behavior.
* *
* @param reader * @param context
* the {@link IndexReader} for which to return the {@link Scorer}. * the {@link ReaderContext} for which to return the {@link Scorer}.
* @param scoreDocsInOrder * @param scoreDocsInOrder
* specifies whether in-order scoring of documents is required. Note * specifies whether in-order scoring of documents is required. Note
* that if set to false (i.e., out-of-order scoring is required), * that if set to false (i.e., out-of-order scoring is required),
@ -96,7 +111,8 @@ public abstract class Weight implements Serializable {
* @return a {@link Scorer} which scores documents in/out-of order. * @return a {@link Scorer} which scores documents in/out-of order.
* @throws IOException * @throws IOException
*/ */
public abstract Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, // TODO make this context an AtomicContext if possible
public abstract Scorer scorer(ReaderContext context, boolean scoreDocsInOrder,
boolean topScorer) throws IOException; boolean topScorer) throws IOException;
/** The sum of squared weights of contained query clauses. */ /** The sum of squared weights of contained query clauses. */
@ -106,7 +122,7 @@ public abstract class Weight implements Serializable {
* Returns true iff this implementation scores docs only out of order. This * Returns true iff this implementation scores docs only out of order. This
* method is used in conjunction with {@link Collector}'s * method is used in conjunction with {@link Collector}'s
* {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and * {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
* {@link #scorer(org.apache.lucene.index.IndexReader, boolean, boolean)} to * {@link #scorer(ReaderContext, boolean, boolean)} to
* create a matching {@link Scorer} instance for a given {@link Collector}, or * create a matching {@link Scorer} instance for a given {@link Collector}, or
* vice versa. * vice versa.
* <p> * <p>

View File

@ -22,6 +22,7 @@ import java.util.Set;
import java.util.Arrays; import java.util.Arrays;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Explanation;
@ -239,40 +240,40 @@ public class CustomScoreQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
// Pass true for "scoresDocsInOrder", because we // Pass true for "scoresDocsInOrder", because we
// require in-order scoring, even if caller does not, // require in-order scoring, even if caller does not,
// since we call advance on the valSrcScorers. Pass // since we call advance on the valSrcScorers. Pass
// false for "topScorer" because we will not invoke // false for "topScorer" because we will not invoke
// score(Collector) on these scorers: // score(Collector) on these scorers:
Scorer subQueryScorer = subQueryWeight.scorer(reader, true, false); Scorer subQueryScorer = subQueryWeight.scorer(context, true, false);
if (subQueryScorer == null) { if (subQueryScorer == null) {
return null; return null;
} }
Scorer[] valSrcScorers = new Scorer[valSrcWeights.length]; Scorer[] valSrcScorers = new Scorer[valSrcWeights.length];
for(int i = 0; i < valSrcScorers.length; i++) { for(int i = 0; i < valSrcScorers.length; i++) {
valSrcScorers[i] = valSrcWeights[i].scorer(reader, true, topScorer); valSrcScorers[i] = valSrcWeights[i].scorer(context, true, topScorer);
} }
return new CustomScorer(similarity, reader, this, subQueryScorer, valSrcScorers); return new CustomScorer(similarity, context.reader, this, subQueryScorer, valSrcScorers);
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(ReaderContext context, int doc) throws IOException {
Explanation explain = doExplain(reader, doc); Explanation explain = doExplain(context, doc);
return explain == null ? new Explanation(0.0f, "no matching docs") : explain; return explain == null ? new Explanation(0.0f, "no matching docs") : explain;
} }
private Explanation doExplain(IndexReader reader, int doc) throws IOException { private Explanation doExplain(ReaderContext info, int doc) throws IOException {
Explanation subQueryExpl = subQueryWeight.explain(reader, doc); Explanation subQueryExpl = subQueryWeight.explain(info, doc);
if (!subQueryExpl.isMatch()) { if (!subQueryExpl.isMatch()) {
return subQueryExpl; return subQueryExpl;
} }
// match // match
Explanation[] valSrcExpls = new Explanation[valSrcWeights.length]; Explanation[] valSrcExpls = new Explanation[valSrcWeights.length];
for(int i = 0; i < valSrcWeights.length; i++) { for(int i = 0; i < valSrcWeights.length; i++) {
valSrcExpls[i] = valSrcWeights[i].explain(reader, doc); valSrcExpls[i] = valSrcWeights[i].explain(info, doc);
} }
Explanation customExp = CustomScoreQuery.this.getCustomScoreProvider(reader).customExplain(doc,subQueryExpl,valSrcExpls); Explanation customExp = CustomScoreQuery.this.getCustomScoreProvider(info.reader).customExplain(doc,subQueryExpl,valSrcExpls);
float sc = getValue() * customExp.getValue(); float sc = getValue() * customExp.getValue();
Explanation res = new ComplexExplanation( Explanation res = new ComplexExplanation(
true, sc, CustomScoreQuery.this.toString() + ", product of:"); true, sc, CustomScoreQuery.this.toString() + ", product of:");

View File

@ -19,6 +19,7 @@ package org.apache.lucene.search.function;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
@ -98,14 +99,14 @@ public class ValueSourceQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new ValueSourceScorer(similarity, reader, this); return new ValueSourceScorer(similarity, context.reader, this);
} }
/*(non-Javadoc) @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int) */ /*(non-Javadoc) @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int) */
@Override @Override
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(ReaderContext context, int doc) throws IOException {
DocValues vals = valSrc.getValues(reader); DocValues vals = valSrc.getValues(context.reader);
float sc = queryWeight * vals.floatVal(doc); float sc = queryWeight * vals.floatVal(doc);
Explanation result = new ComplexExplanation( Explanation result = new ComplexExplanation(

View File

@ -17,7 +17,7 @@ package org.apache.lucene.search.payloads;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
@ -143,10 +143,10 @@ public class PayloadNearQuery extends SpanNearQuery {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder,
boolean topScorer) throws IOException { boolean topScorer) throws IOException {
return new PayloadNearSpanScorer(query.getSpans(reader), this, return new PayloadNearSpanScorer(query.getSpans(context.reader), this,
similarity, reader.norms(query.getField())); similarity, context.reader.norms(query.getField()));
} }
} }

View File

@ -17,9 +17,9 @@ package org.apache.lucene.search.payloads;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
@ -74,10 +74,10 @@ public class PayloadTermQuery extends SpanTermQuery {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder,
boolean topScorer) throws IOException { boolean topScorer) throws IOException {
return new PayloadTermSpanScorer((TermSpans) query.getSpans(reader), return new PayloadTermSpanScorer((TermSpans) query.getSpans(context.reader),
this, similarity, reader.norms(query.getField())); this, similarity, context.reader.norms(query.getField()));
} }
protected class PayloadTermSpanScorer extends SpanScorer { protected class PayloadTermSpanScorer extends SpanScorer {

View File

@ -17,7 +17,7 @@ package org.apache.lucene.search.spans;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
import org.apache.lucene.search.Explanation.IDFExplanation; import org.apache.lucene.search.Explanation.IDFExplanation;
@ -72,13 +72,13 @@ public class SpanWeight extends Weight {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new SpanScorer(query.getSpans(reader), this, similarity, reader return new SpanScorer(query.getSpans(context.reader), this, similarity, context.reader
.norms(query.getField())); .norms(query.getField()));
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) public Explanation explain(ReaderContext context, int doc)
throws IOException { throws IOException {
ComplexExplanation result = new ComplexExplanation(); ComplexExplanation result = new ComplexExplanation();
@ -111,12 +111,12 @@ public class SpanWeight extends Weight {
fieldExpl.setDescription("fieldWeight("+field+":"+query.toString(field)+ fieldExpl.setDescription("fieldWeight("+field+":"+query.toString(field)+
" in "+doc+"), product of:"); " in "+doc+"), product of:");
Explanation tfExpl = ((SpanScorer)scorer(reader, true, false)).explain(doc); Explanation tfExpl = ((SpanScorer)scorer(context, true, false)).explain(doc);
fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(tfExpl);
fieldExpl.addDetail(idfExpl); fieldExpl.addDetail(idfExpl);
Explanation fieldNormExpl = new Explanation(); Explanation fieldNormExpl = new Explanation();
byte[] fieldNorms = reader.norms(field); byte[] fieldNorms = context.reader.norms(field);
float fieldNorm = float fieldNorm =
fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f; fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f;
fieldNormExpl.setValue(fieldNorm); fieldNormExpl.setValue(fieldNorm);

View File

@ -19,9 +19,13 @@ package org.apache.lucene.util;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.CompositeReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
/** /**
* Common util methods for dealing with {@link IndexReader}s. * Common util methods for dealing with {@link IndexReader}s.
@ -148,6 +152,67 @@ public final class ReaderUtil {
.toArray(new IndexReader[subReadersList.size()]); .toArray(new IndexReader[subReadersList.size()]);
return subReaders[subIndex]; return subReaders[subIndex];
} }
public static ReaderContext buildReaderContext(IndexReader reader) {
return new ReaderContextBuilder(reader).build();
}
public static class ReaderContextBuilder {
private final IndexReader reader;
private final AtomicReaderContext[] leaves;
private int leafOrd = 0;
private int leafDocBase = 0;
public ReaderContextBuilder(IndexReader reader) {
this.reader = reader;
leaves = new AtomicReaderContext[numLeaves(reader)];
}
public ReaderContext build() {
return build(null, reader, 0, 0);
}
private ReaderContext build(CompositeReaderContext parent, IndexReader reader, int ord, int docBase) {
IndexReader[] sequentialSubReaders = reader.getSequentialSubReaders();
if (sequentialSubReaders == null) {
AtomicReaderContext atomic = new AtomicReaderContext(parent, reader, ord, docBase, leafOrd, leafDocBase);
leaves[leafOrd++] = atomic;
leafDocBase += reader.maxDoc();
return atomic;
} else {
ReaderContext[] children = new ReaderContext[sequentialSubReaders.length];
final CompositeReaderContext newParent;
if (parent == null) {
newParent = new CompositeReaderContext(reader, children, leaves);
} else {
newParent = new CompositeReaderContext(parent, reader, ord, docBase, children);
}
int newDocBase = 0;
for (int i = 0; i < sequentialSubReaders.length; i++) {
build(newParent, sequentialSubReaders[i], i, newDocBase);
newDocBase += sequentialSubReaders[i].maxDoc();
}
return newParent;
}
}
private int numLeaves(IndexReader reader) {
final AtomicInteger numLeaves = new AtomicInteger();
try {
new Gather(reader) {
@Override
protected void add(int base, IndexReader r) {
numLeaves.incrementAndGet();
}
}.run();
} catch (IOException ioe) {
// won't happen
throw new RuntimeException(ioe);
}
return numLeaves.get();
}
}
/** /**
@ -175,4 +240,30 @@ public final class ReaderUtil {
} }
return hi; return hi;
} }
/**
* Returns index of the searcher/reader for document <code>n</code> in the
* array used to construct this searcher/reader.
*/
public static int subIndex(int n, AtomicReaderContext[] leaves) { // find
// searcher/reader for doc n:
int size = leaves.length;
int lo = 0; // search starts array
int hi = size - 1; // for first element less than n, return its index
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
int midValue = leaves[mid].docBase;
if (n < midValue)
hi = mid - 1;
else if (n > midValue)
lo = mid + 1;
else { // found a match
while (mid + 1 < size && leaves[mid + 1].docBase == midValue) {
mid++; // scan to last match
}
return mid;
}
}
return hi;
}
} }

View File

@ -20,7 +20,8 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import junit.framework.Assert; import junit.framework.Assert;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
/** /**
* A unit test helper class to test when the filter is getting cached and when it is not. * A unit test helper class to test when the filter is getting cached and when it is not.
@ -41,10 +42,10 @@ public class CachingWrapperFilterHelper extends CachingWrapperFilter {
} }
@Override @Override
public synchronized DocIdSet getDocIdSet(IndexReader reader) throws IOException { public synchronized DocIdSet getDocIdSet(ReaderContext context) throws IOException {
final int saveMissCount = missCount; final int saveMissCount = missCount;
DocIdSet docIdSet = super.getDocIdSet(reader); DocIdSet docIdSet = super.getDocIdSet(context);
if (shouldHaveCache) { if (shouldHaveCache) {
Assert.assertEquals("Cache should have data ", saveMissCount, missCount); Assert.assertEquals("Cache should have data ", saveMissCount, missCount);

View File

@ -20,6 +20,7 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.PriorityQueue; import org.apache.lucene.util.PriorityQueue;
@ -152,7 +153,7 @@ final class JustCompileSearch {
// still added here in case someone will add abstract methods in the future. // still added here in case someone will add abstract methods in the future.
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
return null; return null;
} }
} }
@ -281,7 +282,7 @@ final class JustCompileSearch {
} }
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
return null; return null;
} }
} }
@ -333,7 +334,7 @@ final class JustCompileSearch {
static final class JustCompileWeight extends Weight { static final class JustCompileWeight extends Weight {
@Override @Override
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(ReaderContext context, int doc) throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }
@ -358,7 +359,7 @@ final class JustCompileSearch {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer)
throws IOException { throws IOException {
throw new UnsupportedOperationException(UNSUPPORTED_MSG); throw new UnsupportedOperationException(UNSUPPORTED_MSG);
} }

View File

@ -17,7 +17,7 @@ package org.apache.lucene.search;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.util.DocIdBitSet; import org.apache.lucene.util.DocIdBitSet;
import java.util.BitSet; import java.util.BitSet;
@ -25,7 +25,7 @@ public class MockFilter extends Filter {
private boolean wasCalled; private boolean wasCalled;
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) { public DocIdSet getDocIdSet(ReaderContext context) {
wasCalled = true; wasCalled = true;
return new DocIdBitSet(new BitSet()); return new DocIdBitSet(new BitSet());
} }

View File

@ -12,6 +12,8 @@ import junit.framework.Assert;
import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.MultiReader;
@ -210,14 +212,22 @@ public class QueryUtils {
throw e2; throw e2;
} }
} }
private static AtomicReaderContext[] getLeaves(IndexSearcher searcher) {
ReaderContext topLevelReaderContext = searcher.getTopReaderContext();
if (topLevelReaderContext.isAtomic) {
return new AtomicReaderContext[] {(AtomicReaderContext) topLevelReaderContext};
} else {
return topLevelReaderContext.leaves();
}
}
/** alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc /** alternate scorer skipTo(),skipTo(),next(),next(),skipTo(),skipTo(), etc
* and ensure a hitcollector receives same docs and scores * and ensure a hitcollector receives same docs and scores
*/ */
public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException { public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException {
//System.out.println("Checking "+q); //System.out.println("Checking "+q);
final AtomicReaderContext[] context = getLeaves(s);
if (q.weight(s).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next(). if (q.weight(s).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next().
final int skip_op = 0; final int skip_op = 0;
@ -247,8 +257,8 @@ public class QueryUtils {
s.search(q, new Collector() { s.search(q, new Collector() {
private Scorer sc; private Scorer sc;
private IndexReader reader;
private Scorer scorer; private Scorer scorer;
private int leafPtr;
@Override @Override
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
@ -262,7 +272,7 @@ public class QueryUtils {
try { try {
if (scorer == null) { if (scorer == null) {
Weight w = q.weight(s); Weight w = q.weight(s);
scorer = w.scorer(reader, true, false); scorer = w.scorer(context[leafPtr], true, false);
} }
int op = order[(opidx[0]++) % order.length]; int op = order[(opidx[0]++) % order.length];
@ -305,14 +315,17 @@ public class QueryUtils {
// previous reader, hits NO_MORE_DOCS // previous reader, hits NO_MORE_DOCS
if (lastReader[0] != null) { if (lastReader[0] != null) {
final IndexReader previousReader = lastReader[0]; final IndexReader previousReader = lastReader[0];
Weight w = q.weight(new IndexSearcher(previousReader)); IndexSearcher indexSearcher = new IndexSearcher(previousReader);
Scorer scorer = w.scorer(previousReader, true, false); Weight w = q.weight(indexSearcher);
Scorer scorer = w.scorer(indexSearcher.getTopReaderContext(), true, false);
if (scorer != null) { if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
} }
leafPtr++;
} }
this.reader = lastReader[0] = reader; lastReader[0] = reader;
assert context[leafPtr].reader == reader;
this.scorer = null; this.scorer = null;
lastDoc[0] = -1; lastDoc[0] = -1;
} }
@ -327,8 +340,9 @@ public class QueryUtils {
// confirm that skipping beyond the last doc, on the // confirm that skipping beyond the last doc, on the
// previous reader, hits NO_MORE_DOCS // previous reader, hits NO_MORE_DOCS
final IndexReader previousReader = lastReader[0]; final IndexReader previousReader = lastReader[0];
Weight w = q.weight(new IndexSearcher(previousReader)); IndexSearcher indexSearcher = new IndexSearcher(previousReader);
Scorer scorer = w.scorer(previousReader, true, false); Weight w = q.weight(indexSearcher);
Scorer scorer = w.scorer(previousReader.getTopReaderContext() , true, false);
if (scorer != null) { if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@ -343,10 +357,10 @@ public class QueryUtils {
final float maxDiff = 1e-3f; final float maxDiff = 1e-3f;
final int lastDoc[] = {-1}; final int lastDoc[] = {-1};
final IndexReader lastReader[] = {null}; final IndexReader lastReader[] = {null};
final ReaderContext[] context = getLeaves(s);
s.search(q,new Collector() { s.search(q,new Collector() {
private Scorer scorer; private Scorer scorer;
private IndexReader reader; private int leafPtr;
@Override @Override
public void setScorer(Scorer scorer) throws IOException { public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer; this.scorer = scorer;
@ -358,7 +372,7 @@ public class QueryUtils {
long startMS = System.currentTimeMillis(); long startMS = System.currentTimeMillis();
for (int i=lastDoc[0]+1; i<=doc; i++) { for (int i=lastDoc[0]+1; i<=doc; i++) {
Weight w = q.weight(s); Weight w = q.weight(s);
Scorer scorer = w.scorer(reader, true, false); Scorer scorer = w.scorer(context[leafPtr], true, false);
Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS); Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID()); Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
float skipToScore = scorer.score(); float skipToScore = scorer.score();
@ -383,15 +397,17 @@ public class QueryUtils {
// previous reader, hits NO_MORE_DOCS // previous reader, hits NO_MORE_DOCS
if (lastReader[0] != null) { if (lastReader[0] != null) {
final IndexReader previousReader = lastReader[0]; final IndexReader previousReader = lastReader[0];
Weight w = q.weight(new IndexSearcher(previousReader)); IndexSearcher indexSearcher = new IndexSearcher(previousReader);
Scorer scorer = w.scorer(previousReader, true, false); Weight w = q.weight(indexSearcher);
Scorer scorer = w.scorer(indexSearcher.getTopReaderContext(), true, false);
if (scorer != null) { if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
} }
leafPtr++;
} }
this.reader = lastReader[0] = reader; lastReader[0] = reader;
lastDoc[0] = -1; lastDoc[0] = -1;
} }
@Override @Override
@ -404,8 +420,9 @@ public class QueryUtils {
// confirm that skipping beyond the last doc, on the // confirm that skipping beyond the last doc, on the
// previous reader, hits NO_MORE_DOCS // previous reader, hits NO_MORE_DOCS
final IndexReader previousReader = lastReader[0]; final IndexReader previousReader = lastReader[0];
Weight w = q.weight(new IndexSearcher(previousReader)); IndexSearcher indexSearcher = new IndexSearcher(previousReader);
Scorer scorer = w.scorer(previousReader, true, false); Weight w = q.weight(indexSearcher);
Scorer scorer = w.scorer(indexSearcher.getTopReaderContext(), true, false);
if (scorer != null) { if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);

View File

@ -17,7 +17,7 @@ package org.apache.lucene.search;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.util.DocIdBitSet; import org.apache.lucene.util.DocIdBitSet;
import java.util.BitSet; import java.util.BitSet;
@ -31,8 +31,8 @@ public class SingleDocTestFilter extends Filter {
} }
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
BitSet bits = new BitSet(reader.maxDoc()); BitSet bits = new BitSet(context.reader.maxDoc());
bits.set(doc); bits.set(doc);
return new DocIdBitSet(bits); return new DocIdBitSet(bits);
} }

View File

@ -23,6 +23,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.index.SerialMergeScheduler;
import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.SlowMultiReaderWrapper;
@ -40,20 +42,20 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);
ReaderContext context = reader.getTopReaderContext();
MockFilter filter = new MockFilter(); MockFilter filter = new MockFilter();
CachingWrapperFilter cacher = new CachingWrapperFilter(filter); CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
// first time, nested filter is called // first time, nested filter is called
cacher.getDocIdSet(reader); cacher.getDocIdSet(context);
assertTrue("first time", filter.wasCalled()); assertTrue("first time", filter.wasCalled());
// make sure no exception if cache is holding the wrong docIdSet // make sure no exception if cache is holding the wrong docIdSet
cacher.getDocIdSet(reader); cacher.getDocIdSet(context);
// second time, nested filter should not be called // second time, nested filter should not be called
filter.clear(); filter.clear();
cacher.getDocIdSet(reader); cacher.getDocIdSet(context);
assertFalse("second time", filter.wasCalled()); assertFalse("second time", filter.wasCalled());
reader.close(); reader.close();
@ -66,17 +68,18 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);
ReaderContext context = reader.getTopReaderContext();
final Filter filter = new Filter() { final Filter filter = new Filter() {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) { public DocIdSet getDocIdSet(ReaderContext context) {
return null; return null;
} }
}; };
CachingWrapperFilter cacher = new CachingWrapperFilter(filter); CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
// the caching filter should return the empty set constant // the caching filter should return the empty set constant
assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader)); assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(context));
reader.close(); reader.close();
dir.close(); dir.close();
@ -88,10 +91,11 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.close(); writer.close();
IndexReader reader = IndexReader.open(dir, true); IndexReader reader = IndexReader.open(dir, true);
ReaderContext context = reader.getTopReaderContext();
final Filter filter = new Filter() { final Filter filter = new Filter() {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) { public DocIdSet getDocIdSet(ReaderContext context) {
return new DocIdSet() { return new DocIdSet() {
@Override @Override
public DocIdSetIterator iterator() { public DocIdSetIterator iterator() {
@ -103,16 +107,17 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
CachingWrapperFilter cacher = new CachingWrapperFilter(filter); CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
// the caching filter should return the empty set constant // the caching filter should return the empty set constant
assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(reader)); assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(context));
reader.close(); reader.close();
dir.close(); dir.close();
} }
private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException { private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException {
ReaderContext context = reader.getTopReaderContext();
final CachingWrapperFilter cacher = new CachingWrapperFilter(filter); final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
final DocIdSet originalSet = filter.getDocIdSet(reader); final DocIdSet originalSet = filter.getDocIdSet(context);
final DocIdSet cachedSet = cacher.getDocIdSet(reader); final DocIdSet cachedSet = cacher.getDocIdSet(context);
assertTrue(cachedSet.isCacheable()); assertTrue(cachedSet.isCacheable());
assertEquals(shouldCacheable, originalSet.isCacheable()); assertEquals(shouldCacheable, originalSet.isCacheable());
//System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName()); //System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName());
@ -140,7 +145,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
// a openbitset filter is always cacheable // a openbitset filter is always cacheable
assertDocIdSetCacheable(reader, new Filter() { assertDocIdSetCacheable(reader, new Filter() {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) { public DocIdSet getDocIdSet(ReaderContext context) {
return new OpenBitSet(); return new OpenBitSet();
} }
}, true); }, true);

View File

@ -25,6 +25,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import java.text.DecimalFormat; import java.text.DecimalFormat;
@ -163,9 +164,9 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
dq.add(tq("dek", "DOES_NOT_EXIST")); dq.add(tq("dek", "DOES_NOT_EXIST"));
QueryUtils.check(random, dq, s); QueryUtils.check(random, dq, s);
assertTrue(s.getTopReaderContext().isAtomic);
final Weight dw = dq.weight(s); final Weight dw = dq.weight(s);
final Scorer ds = dw.scorer(s.getIndexReader(), true, false); final Scorer ds = dw.scorer(s.getTopReaderContext(), true, false);
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS; final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
if (skipOk) { if (skipOk) {
fail("firsttime skipTo found a match? ... " fail("firsttime skipTo found a match? ... "
@ -177,11 +178,10 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f); final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f);
dq.add(tq("dek", "albino")); dq.add(tq("dek", "albino"));
dq.add(tq("dek", "DOES_NOT_EXIST")); dq.add(tq("dek", "DOES_NOT_EXIST"));
assertTrue(s.getTopReaderContext().isAtomic);
QueryUtils.check(random, dq, s); QueryUtils.check(random, dq, s);
final Weight dw = dq.weight(s); final Weight dw = dq.weight(s);
final Scorer ds = dw.scorer(s.getIndexReader(), true, false); final Scorer ds = dw.scorer(s.getTopReaderContext(), true, false);
assertTrue("firsttime skipTo found no match", assertTrue("firsttime skipTo found no match",
ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS); ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id")); assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));

View File

@ -28,6 +28,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Index; import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
@ -114,7 +115,7 @@ public class TestDocIdSet extends LuceneTestCase {
// Now search w/ a Filter which returns a null DocIdSet // Now search w/ a Filter which returns a null DocIdSet
Filter f = new Filter() { Filter f = new Filter() {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
return null; return null;
} }
}; };

View File

@ -20,6 +20,7 @@ package org.apache.lucene.search;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause.Occur;
@ -87,7 +88,7 @@ public class TestFilteredQuery extends LuceneTestCase {
private static Filter newStaticFilterB() { private static Filter newStaticFilterB() {
return new Filter() { return new Filter() {
@Override @Override
public DocIdSet getDocIdSet (IndexReader reader) { public DocIdSet getDocIdSet (ReaderContext context) {
BitSet bitset = new BitSet(5); BitSet bitset = new BitSet(5);
bitset.set (1); bitset.set (1);
bitset.set (3); bitset.set (3);
@ -158,7 +159,7 @@ public class TestFilteredQuery extends LuceneTestCase {
private static Filter newStaticFilterA() { private static Filter newStaticFilterA() {
return new Filter() { return new Filter() {
@Override @Override
public DocIdSet getDocIdSet (IndexReader reader) { public DocIdSet getDocIdSet (ReaderContext context) {
BitSet bitset = new BitSet(5); BitSet bitset = new BitSet(5);
bitset.set(0, 5); bitset.set(0, 5);
return new DocIdBitSet(bitset); return new DocIdBitSet(bitset);
@ -216,7 +217,7 @@ public class TestFilteredQuery extends LuceneTestCase {
bq.add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.SHOULD); bq.add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.SHOULD);
ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
assertEquals(1, hits.length); assertEquals(1, hits.length);
QueryUtils.check(random, query,searcher); QueryUtils.check(random, query, searcher);
} }
} }

View File

@ -25,6 +25,8 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@ -33,6 +35,7 @@ import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.OpenBitSet;
/** /**
* *
*/ */
@ -59,7 +62,7 @@ public class TestFilteredSearch extends LuceneTestCase {
directory.close(); directory.close();
} }
public void searchFiltered(IndexWriter writer, Directory directory, SimpleDocIdSetFilter filter, boolean optimize) { public void searchFiltered(IndexWriter writer, Directory directory, Filter filter, boolean optimize) {
try { try {
for (int i = 0; i < 60; i++) {//Simple docs for (int i = 0; i < 60; i++) {//Simple docs
Document doc = new Document(); Document doc = new Document();
@ -75,7 +78,6 @@ public class TestFilteredSearch extends LuceneTestCase {
IndexSearcher indexSearcher = new IndexSearcher(directory, true); IndexSearcher indexSearcher = new IndexSearcher(directory, true);
filter.setTopReader(indexSearcher.getIndexReader());
ScoreDoc[] hits = indexSearcher.search(booleanQuery, filter, 1000).scoreDocs; ScoreDoc[] hits = indexSearcher.search(booleanQuery, filter, 1000).scoreDocs;
assertEquals("Number of matched documents", 1, hits.length); assertEquals("Number of matched documents", 1, hits.length);
indexSearcher.close(); indexSearcher.close();
@ -89,20 +91,17 @@ public class TestFilteredSearch extends LuceneTestCase {
public static final class SimpleDocIdSetFilter extends Filter { public static final class SimpleDocIdSetFilter extends Filter {
private final int[] docs; private final int[] docs;
private int index; private int index;
private IndexReader topReader;
public SimpleDocIdSetFilter(int[] docs) { public SimpleDocIdSetFilter(int[] docs) {
this.docs = docs; this.docs = docs;
} }
public void setTopReader(IndexReader r) {
topReader = r;
}
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) { public DocIdSet getDocIdSet(ReaderContext context) {
assert context.isAtomic;
final OpenBitSet set = new OpenBitSet(); final OpenBitSet set = new OpenBitSet();
int docBase = topReader.getSubReaderDocBase(reader); int docBase = ((AtomicReaderContext)context).docBase;
final int limit = docBase+reader.maxDoc(); final int limit = docBase+context.reader.maxDoc();
for (;index < docs.length; index++) { for (;index < docs.length; index++) {
final int docId = docs[index]; final int docId = docs[index];
if(docId > limit) if(docId > limit)

View File

@ -177,13 +177,13 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
@Test @Test
public void testInverseRange() throws Exception { public void testInverseRange() throws Exception {
NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange("field8", 8, 1000, -1000, true, true); NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange("field8", 8, 1000, -1000, true, true);
assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()))); assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()).getTopReaderContext()));
f = NumericRangeFilter.newIntRange("field8", 8, Integer.MAX_VALUE, null, false, false); f = NumericRangeFilter.newIntRange("field8", 8, Integer.MAX_VALUE, null, false, false);
assertSame("A exclusive range starting with Integer.MAX_VALUE should return the EMPTY_DOCIDSET instance", assertSame("A exclusive range starting with Integer.MAX_VALUE should return the EMPTY_DOCIDSET instance",
DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()))); DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()).getTopReaderContext()));
f = NumericRangeFilter.newIntRange("field8", 8, null, Integer.MIN_VALUE, false, false); f = NumericRangeFilter.newIntRange("field8", 8, null, Integer.MIN_VALUE, false, false);
assertSame("A exclusive range ending with Integer.MIN_VALUE should return the EMPTY_DOCIDSET instance", assertSame("A exclusive range ending with Integer.MIN_VALUE should return the EMPTY_DOCIDSET instance",
DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()))); DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()).getTopReaderContext()));
} }
@Test @Test

View File

@ -182,13 +182,14 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
@Test @Test
public void testInverseRange() throws Exception { public void testInverseRange() throws Exception {
NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange("field8", 8, 1000L, -1000L, true, true); NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange("field8", 8, 1000L, -1000L, true, true);
assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()))); assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET,
f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()).getTopReaderContext()));
f = NumericRangeFilter.newLongRange("field8", 8, Long.MAX_VALUE, null, false, false); f = NumericRangeFilter.newLongRange("field8", 8, Long.MAX_VALUE, null, false, false);
assertSame("A exclusive range starting with Long.MAX_VALUE should return the EMPTY_DOCIDSET instance", assertSame("A exclusive range starting with Long.MAX_VALUE should return the EMPTY_DOCIDSET instance",
DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()))); DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()).getTopReaderContext()));
f = NumericRangeFilter.newLongRange("field8", 8, null, Long.MIN_VALUE, false, false); f = NumericRangeFilter.newLongRange("field8", 8, null, Long.MIN_VALUE, false, false);
assertSame("A exclusive range ending with Long.MIN_VALUE should return the EMPTY_DOCIDSET instance", assertSame("A exclusive range ending with Long.MIN_VALUE should return the EMPTY_DOCIDSET instance",
DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()))); DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(new SlowMultiReaderWrapper(searcher.getIndexReader()).getTopReaderContext()));
} }
@Test @Test

View File

@ -7,6 +7,7 @@ import java.util.BitSet;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@ -141,7 +142,7 @@ public class TestScorerPerf extends LuceneTestCase {
final BitSet rnd = sets[random.nextInt(sets.length)]; final BitSet rnd = sets[random.nextInt(sets.length)];
Query q = new ConstantScoreQuery(new Filter() { Query q = new ConstantScoreQuery(new Filter() {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) { public DocIdSet getDocIdSet(ReaderContext context) {
return new DocIdBitSet(rnd); return new DocIdBitSet(rnd);
} }
}); });

View File

@ -34,6 +34,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.MultiReader;
@ -687,9 +688,9 @@ public class TestSort extends LuceneTestCase implements Serializable {
// a filter that only allows through the first hit // a filter that only allows through the first hit
Filter filt = new Filter() { Filter filt = new Filter() {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
BitSet bs = new BitSet(reader.maxDoc()); BitSet bs = new BitSet(context.reader.maxDoc());
bs.set(0, reader.maxDoc()); bs.set(0, context.reader.maxDoc());
bs.set(docs1.scoreDocs[0].doc); bs.set(docs1.scoreDocs[0].doc);
return new DocIdBitSet(bs); return new DocIdBitSet(bs);
} }

View File

@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
public class TestTermScorer extends LuceneTestCase { public class TestTermScorer extends LuceneTestCase {
@ -71,7 +72,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = termQuery.weight(indexSearcher); Weight weight = termQuery.weight(indexSearcher);
Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true); Scorer ts = weight.scorer(indexSearcher.getTopReaderContext(), true, true);
// we have 2 documents with the term all in them, one document for all the // we have 2 documents with the term all in them, one document for all the
// other values // other values
final List<TestHit> docs = new ArrayList<TestHit>(); final List<TestHit> docs = new ArrayList<TestHit>();
@ -132,7 +133,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = termQuery.weight(indexSearcher); Weight weight = termQuery.weight(indexSearcher);
Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true); Scorer ts = weight.scorer(indexSearcher.getTopReaderContext(), true, true);
assertTrue("next did not return a doc", assertTrue("next did not return a doc",
ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue("score is not correct", ts.score() == 1.6931472f); assertTrue("score is not correct", ts.score() == 1.6931472f);
@ -150,7 +151,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = termQuery.weight(indexSearcher); Weight weight = termQuery.weight(indexSearcher);
Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true); Scorer ts = weight.scorer(indexSearcher.getTopReaderContext(), true, true);
assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS); assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
// The next doc should be doc 5 // The next doc should be doc 5
assertTrue("doc should be number 5", ts.docID() == 5); assertTrue("doc should be number 5", ts.docID() == 5);

View File

@ -168,7 +168,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
public void testSpanNearScorerSkipTo1() throws Exception { public void testSpanNearScorerSkipTo1() throws Exception {
SpanNearQuery q = makeQuery(); SpanNearQuery q = makeQuery();
Weight w = q.weight(searcher); Weight w = q.weight(searcher);
Scorer s = w.scorer(searcher.getIndexReader(), true, false); Scorer s = w.scorer(searcher.getTopReaderContext(), true, false);
assertEquals(1, s.advance(1)); assertEquals(1, s.advance(1));
} }
/** /**
@ -177,7 +177,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
*/ */
public void testSpanNearScorerExplain() throws Exception { public void testSpanNearScorerExplain() throws Exception {
SpanNearQuery q = makeQuery(); SpanNearQuery q = makeQuery();
Explanation e = q.weight(searcher).explain(searcher.getIndexReader(), 1); Explanation e = q.weight(searcher).explain(searcher.getTopReaderContext(), 1);
assertTrue("Scorer explanation value for doc#1 isn't positive: " assertTrue("Scorer explanation value for doc#1 isn't positive: "
+ e.toString(), + e.toString(),
0.0f < e.getValue()); 0.0f < e.getValue());

View File

@ -29,6 +29,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.SlowMultiReaderWrapper;
import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.RandomIndexWriter;
@ -420,7 +421,7 @@ public class TestSpans extends LuceneTestCase {
} }
}; };
Scorer spanScorer = snq.weight(searcher).scorer(new SlowMultiReaderWrapper(searcher.getIndexReader()), true, false); Scorer spanScorer = snq.weight(searcher).scorer(new AtomicReaderContext(new SlowMultiReaderWrapper(searcher.getIndexReader())), true, false);
assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("first doc number", spanScorer.docID(), 11); assertEquals("first doc number", spanScorer.docID(), 11);

View File

@ -1,5 +1,8 @@
package org.apache.solr.request; package org.apache.solr.request;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
@ -67,10 +70,7 @@ class PerSegmentSingleValuedFaceting {
// reuse the translation logic to go from top level set to per-segment set // reuse the translation logic to go from top level set to per-segment set
baseSet = docs.getTopFilter(); baseSet = docs.getTopFilter();
SolrIndexReader topReader = searcher.getReader(); final AtomicReaderContext[] leaves = searcher.getTopReaderContext().leaves();
final SolrIndexReader[] leafReaders = topReader.getLeafReaders();
int[] offsets = topReader.getLeafOffsets();
// The list of pending tasks that aren't immediately submitted // The list of pending tasks that aren't immediately submitted
// TODO: Is there a completion service, or a delegating executor that can // TODO: Is there a completion service, or a delegating executor that can
// limit the number of concurrent tasks submitted to a bigger executor? // limit the number of concurrent tasks submitted to a bigger executor?
@ -78,8 +78,8 @@ class PerSegmentSingleValuedFaceting {
int threads = nThreads <= 0 ? Integer.MAX_VALUE : nThreads; int threads = nThreads <= 0 ? Integer.MAX_VALUE : nThreads;
for (int i=0; i<leafReaders.length; i++) { for (int i=0; i<leaves.length; i++) {
final SegFacet segFacet = new SegFacet(leafReaders[i], offsets[i]); final SegFacet segFacet = new SegFacet(leaves[i]);
Callable<SegFacet> task = new Callable<SegFacet>() { Callable<SegFacet> task = new Callable<SegFacet>() {
public SegFacet call() throws Exception { public SegFacet call() throws Exception {
@ -101,7 +101,7 @@ class PerSegmentSingleValuedFaceting {
// now merge the per-segment results // now merge the per-segment results
PriorityQueue<SegFacet> queue = new PriorityQueue<SegFacet>() { PriorityQueue<SegFacet> queue = new PriorityQueue<SegFacet>() {
{ {
initialize(leafReaders.length); initialize(leaves.length);
} }
@Override @Override
protected boolean lessThan(SegFacet a, SegFacet b) { protected boolean lessThan(SegFacet a, SegFacet b) {
@ -112,7 +112,7 @@ class PerSegmentSingleValuedFaceting {
boolean hasMissingCount=false; boolean hasMissingCount=false;
int missingCount=0; int missingCount=0;
for (int i=0; i<leafReaders.length; i++) { for (int i=0; i<leaves.length; i++) {
SegFacet seg = null; SegFacet seg = null;
try { try {
@ -209,12 +209,9 @@ class PerSegmentSingleValuedFaceting {
} }
class SegFacet { class SegFacet {
SolrIndexReader reader; ReaderContext info;
int readerOffset; SegFacet(ReaderContext info) {
this.info = info;
SegFacet(SolrIndexReader reader, int readerOffset) {
this.reader = reader;
this.readerOffset = readerOffset;
} }
FieldCache.DocTermsIndex si; FieldCache.DocTermsIndex si;
@ -228,7 +225,7 @@ class PerSegmentSingleValuedFaceting {
BytesRef tempBR = new BytesRef(); BytesRef tempBR = new BytesRef();
void countTerms() throws IOException { void countTerms() throws IOException {
si = FieldCache.DEFAULT.getTermsIndex(reader, fieldName); si = FieldCache.DEFAULT.getTermsIndex(info.reader, fieldName);
// SolrCore.log.info("reader= " + reader + " FC=" + System.identityHashCode(si)); // SolrCore.log.info("reader= " + reader + " FC=" + System.identityHashCode(si));
if (prefix!=null) { if (prefix!=null) {
@ -250,7 +247,7 @@ class PerSegmentSingleValuedFaceting {
// count collection array only needs to be as big as the number of terms we are // count collection array only needs to be as big as the number of terms we are
// going to collect counts for. // going to collect counts for.
final int[] counts = this.counts = new int[nTerms]; final int[] counts = this.counts = new int[nTerms];
DocIdSet idSet = baseSet.getDocIdSet(reader); DocIdSet idSet = baseSet.getDocIdSet(info);
DocIdSetIterator iter = idSet.iterator(); DocIdSetIterator iter = idSet.iterator();

View File

@ -19,6 +19,7 @@ package org.apache.solr.schema;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable; import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
import org.apache.lucene.spatial.DistanceUtils; import org.apache.lucene.spatial.DistanceUtils;
@ -27,7 +28,6 @@ import org.apache.lucene.util.Bits;
import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException;
import org.apache.solr.response.TextResponseWriter; import org.apache.solr.response.TextResponseWriter;
import org.apache.solr.search.QParser; import org.apache.solr.search.QParser;
import org.apache.solr.search.SolrIndexReader;
import org.apache.solr.search.SpatialOptions; import org.apache.solr.search.SpatialOptions;
import org.apache.solr.search.function.DocValues; import org.apache.solr.search.function.DocValues;
import org.apache.solr.search.function.ValueSource; import org.apache.solr.search.function.ValueSource;
@ -371,18 +371,13 @@ class SpatialDistanceQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new SpatialScorer(getSimilarity(searcher), reader, this); return new SpatialScorer(getSimilarity(searcher), context.reader, this);
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(ReaderContext context, int doc) throws IOException {
SolrIndexReader topReader = (SolrIndexReader)reader; return ((SpatialScorer)scorer(context, true, true)).explain(doc);
SolrIndexReader[] subReaders = topReader.getLeafReaders();
int[] offsets = topReader.getLeafOffsets();
int readerPos = SolrIndexReader.readerIndex(doc, offsets);
int readerBase = offsets[readerPos];
return ((SpatialScorer)scorer(subReaders[readerPos], true, true)).explain(doc-readerBase);
} }
} }

View File

@ -23,6 +23,7 @@ import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import java.io.IOException; import java.io.IOException;
@ -246,8 +247,9 @@ abstract class DocSetBase implements DocSet {
return new Filter() { return new Filter() {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext ctx) throws IOException {
int offset = 0; int offset = 0;
IndexReader reader = ctx.reader;
SolrIndexReader r = (SolrIndexReader)reader; SolrIndexReader r = (SolrIndexReader)reader;
while (r.getParent() != null) { while (r.getParent() != null) {
offset += r.getBase(); offset += r.getBase();

View File

@ -2,6 +2,7 @@ package org.apache.solr.search;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.solr.search.function.ValueSource; import org.apache.solr.search.function.ValueSource;
import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException;
@ -89,14 +90,14 @@ public class SolrConstantScoreQuery extends ConstantScoreQuery {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new ConstantScorer(similarity, reader, this); return new ConstantScorer(similarity, context, this);
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(ReaderContext context, int doc) throws IOException {
ConstantScorer cs = new ConstantScorer(similarity, reader, this); ConstantScorer cs = new ConstantScorer(similarity, context, this);
boolean exists = cs.docIdSetIterator.advance(doc) == doc; boolean exists = cs.docIdSetIterator.advance(doc) == doc;
ComplexExplanation result = new ComplexExplanation(); ComplexExplanation result = new ComplexExplanation();
@ -123,10 +124,10 @@ public class SolrConstantScoreQuery extends ConstantScoreQuery {
final float theScore; final float theScore;
int doc = -1; int doc = -1;
public ConstantScorer(Similarity similarity, IndexReader reader, ConstantWeight w) throws IOException { public ConstantScorer(Similarity similarity, ReaderContext info, ConstantWeight w) throws IOException {
super(similarity); super(similarity);
theScore = w.getValue(); theScore = w.getValue();
DocIdSet docIdSet = filter instanceof SolrFilter ? ((SolrFilter)filter).getDocIdSet(w.context, reader) : filter.getDocIdSet(reader); DocIdSet docIdSet = filter instanceof SolrFilter ? ((SolrFilter)filter).getDocIdSet(w.context, info) : filter.getDocIdSet(info);
if (docIdSet == null) { if (docIdSet == null) {
docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator(); docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator();
} else { } else {

View File

@ -21,6 +21,7 @@ import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import java.util.Map; import java.util.Map;
import java.io.IOException; import java.io.IOException;
@ -37,10 +38,10 @@ public abstract class SolrFilter extends Filter {
* The context object will be passed to getDocIdSet() where this info can be retrieved. */ * The context object will be passed to getDocIdSet() where this info can be retrieved. */
public abstract void createWeight(Map context, IndexSearcher searcher) throws IOException; public abstract void createWeight(Map context, IndexSearcher searcher) throws IOException;
public abstract DocIdSet getDocIdSet(Map context, IndexReader reader) throws IOException; public abstract DocIdSet getDocIdSet(Map context, ReaderContext readerContext) throws IOException;
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
return getDocIdSet(null, reader); return getDocIdSet(null, context);
} }
} }

View File

@ -19,12 +19,14 @@ package org.apache.solr.search;
import org.apache.lucene.index.*; import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
@ -40,6 +42,7 @@ public class SolrIndexReader extends FilterIndexReader {
private int[] leafOffsets; private int[] leafOffsets;
private final SolrIndexReader parent; private final SolrIndexReader parent;
private final int base; // docid offset of this reader within parent private final int base; // docid offset of this reader within parent
private final ReaderContext topLevelContext;
private static int[] zeroIntArray = new int[]{0}; private static int[] zeroIntArray = new int[]{0};
@ -79,7 +82,7 @@ public class SolrIndexReader extends FilterIndexReader {
leafReaders = new SolrIndexReader[]{this}; leafReaders = new SolrIndexReader[]{this};
leafOffsets = zeroIntArray; leafOffsets = zeroIntArray;
} }
topLevelContext = ReaderUtil.buildReaderContext(this);
} }
private SolrIndexReader[] getLeaves(int numLeaves) { private SolrIndexReader[] getLeaves(int numLeaves) {
@ -363,11 +366,6 @@ public class SolrIndexReader extends FilterIndexReader {
return subReaders; return subReaders;
} }
@Override
public int getSubReaderDocBase(IndexReader subReader) {
return in.getSubReaderDocBase(subReader);
}
@Override @Override
public int hashCode() { public int hashCode() {
return in.hashCode(); return in.hashCode();
@ -493,6 +491,11 @@ public class SolrIndexReader extends FilterIndexReader {
public int getTermInfosIndexDivisor() { public int getTermInfosIndexDivisor() {
return in.getTermInfosIndexDivisor(); return in.getTermInfosIndexDivisor();
} }
@Override
public ReaderContext getTopReaderContext() {
return topLevelContext;
}
} }

View File

@ -904,7 +904,7 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean {
* This method is not cache-aware and no caches are checked. * This method is not cache-aware and no caches are checked.
*/ */
public DocSet convertFilter(Filter lfilter) throws IOException { public DocSet convertFilter(Filter lfilter) throws IOException {
DocIdSet docSet = lfilter.getDocIdSet(this.reader); DocIdSet docSet = lfilter.getDocIdSet(this.reader.getTopReaderContext());
OpenBitSet obs = new OpenBitSet(); OpenBitSet obs = new OpenBitSet();
DocIdSetIterator it = docSet.iterator(); DocIdSetIterator it = docSet.iterator();
int doc; int doc;

View File

@ -22,6 +22,7 @@ import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import java.io.IOException; import java.io.IOException;
@ -551,8 +552,9 @@ public class SortedIntDocSet extends DocSetBase {
int lastEndIdx = 0; int lastEndIdx = 0;
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
int offset = 0; int offset = 0;
IndexReader reader = context.reader;
SolrIndexReader r = (SolrIndexReader)reader; SolrIndexReader r = (SolrIndexReader)reader;
while (r.getParent() != null) { while (r.getParent() != null) {
offset += r.getBase(); offset += r.getBase();

View File

@ -19,6 +19,7 @@ package org.apache.solr.search.function;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.ToStringUtils;
import org.apache.solr.search.SolrIndexReader; import org.apache.solr.search.SolrIndexReader;
@ -91,33 +92,26 @@ public class BoostedQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
Scorer subQueryScorer = qWeight.scorer(reader, true, false); Scorer subQueryScorer = qWeight.scorer(context, true, false);
if(subQueryScorer == null) { if(subQueryScorer == null) {
return null; return null;
} }
return new BoostedQuery.CustomScorer(getSimilarity(searcher), searcher, reader, this, subQueryScorer, boostVal); return new BoostedQuery.CustomScorer(getSimilarity(searcher), searcher, context.reader, this, subQueryScorer, boostVal);
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(ReaderContext readerContext, int doc) throws IOException {
SolrIndexReader topReader = (SolrIndexReader)reader; Explanation subQueryExpl = qWeight.explain(readerContext,doc);
SolrIndexReader[] subReaders = topReader.getLeafReaders();
int[] offsets = topReader.getLeafOffsets();
int readerPos = SolrIndexReader.readerIndex(doc, offsets);
int readerBase = offsets[readerPos];
Explanation subQueryExpl = qWeight.explain(reader,doc);
if (!subQueryExpl.isMatch()) { if (!subQueryExpl.isMatch()) {
return subQueryExpl; return subQueryExpl;
} }
DocValues vals = boostVal.getValues(context, readerContext.reader);
DocValues vals = boostVal.getValues(context, subReaders[readerPos]); float sc = subQueryExpl.getValue() * vals.floatVal(doc);
float sc = subQueryExpl.getValue() * vals.floatVal(doc-readerBase);
Explanation res = new ComplexExplanation( Explanation res = new ComplexExplanation(
true, sc, BoostedQuery.this.toString() + ", product of:"); true, sc, BoostedQuery.this.toString() + ", product of:");
res.addDetail(subQueryExpl); res.addDetail(subQueryExpl);
res.addDetail(vals.explain(doc-readerBase)); res.addDetail(vals.explain(doc));
return res; return res;
} }
} }
@ -168,7 +162,7 @@ public class BoostedQuery extends Query {
} }
public Explanation explain(int doc) throws IOException { public Explanation explain(int doc) throws IOException {
Explanation subQueryExpl = weight.qWeight.explain(reader,doc); Explanation subQueryExpl = weight.qWeight.explain(reader.getTopReaderContext() ,doc);
if (!subQueryExpl.isMatch()) { if (!subQueryExpl.isMatch()) {
return subQueryExpl; return subQueryExpl;
} }

View File

@ -18,10 +18,10 @@
package org.apache.solr.search.function; package org.apache.solr.search.function;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.MultiFields;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.solr.search.SolrIndexReader;
import java.io.IOException; import java.io.IOException;
import java.util.Set; import java.util.Set;
@ -94,18 +94,13 @@ public class FunctionQuery extends Query {
} }
@Override @Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { public Scorer scorer(ReaderContext context, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new AllScorer(getSimilarity(searcher), reader, this); return new AllScorer(getSimilarity(searcher), context, this);
} }
@Override @Override
public Explanation explain(IndexReader reader, int doc) throws IOException { public Explanation explain(ReaderContext context, int doc) throws IOException {
SolrIndexReader topReader = (SolrIndexReader)reader; return ((AllScorer)scorer(context, true, true)).explain(doc);
SolrIndexReader[] subReaders = topReader.getLeafReaders();
int[] offsets = topReader.getLeafOffsets();
int readerPos = SolrIndexReader.readerIndex(doc, offsets);
int readerBase = offsets[readerPos];
return ((AllScorer)scorer(subReaders[readerPos], true, true)).explain(doc-readerBase);
} }
} }
@ -119,16 +114,18 @@ public class FunctionQuery extends Query {
final boolean hasDeletions; final boolean hasDeletions;
final Bits delDocs; final Bits delDocs;
public AllScorer(Similarity similarity, IndexReader reader, FunctionWeight w) throws IOException { public AllScorer(Similarity similarity, ReaderContext context, FunctionWeight w) throws IOException {
super(similarity); super(similarity);
this.weight = w; this.weight = w;
this.qWeight = w.getValue(); this.qWeight = w.getValue();
this.reader = reader; this.reader = context.reader;
this.maxDoc = reader.maxDoc(); this.maxDoc = reader.maxDoc();
this.hasDeletions = reader.hasDeletions(); this.hasDeletions = reader.hasDeletions();
this.delDocs = MultiFields.getDeletedDocs(reader); this.delDocs = MultiFields.getDeletedDocs(reader);
assert !hasDeletions || delDocs != null; assert !hasDeletions || delDocs != null;
vals = func.getValues(weight.context, reader); Map funcContext = weight.context;
funcContext.put(reader, context);
vals = func.getValues(funcContext, reader);
} }
@Override @Override

View File

@ -90,7 +90,7 @@ class QueryDocValues extends DocValues {
try { try {
if (doc < lastDocRequested) { if (doc < lastDocRequested) {
// out-of-order access.... reset scorer. // out-of-order access.... reset scorer.
scorer = weight.scorer(reader, true, false); scorer = weight.scorer(reader.getTopReaderContext(), true, false);
if (scorer==null) return defVal; if (scorer==null) return defVal;
scorerDoc = -1; scorerDoc = -1;
} }

View File

@ -20,7 +20,7 @@ package org.apache.solr.search.function;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.solr.search.SolrFilter; import org.apache.solr.search.SolrFilter;
import java.io.IOException; import java.io.IOException;
@ -49,10 +49,10 @@ public class ValueSourceRangeFilter extends SolrFilter {
this.includeUpper = upperVal != null && includeUpper; this.includeUpper = upperVal != null && includeUpper;
} }
public DocIdSet getDocIdSet(final Map context, final IndexReader reader) throws IOException { public DocIdSet getDocIdSet(final Map context, final ReaderContext readerContext) throws IOException {
return new DocIdSet() { return new DocIdSet() {
public DocIdSetIterator iterator() throws IOException { public DocIdSetIterator iterator() throws IOException {
return valueSource.getValues(context, reader).getRangeScorer(reader, lowerVal, upperVal, includeLower, includeUpper); return valueSource.getValues(context, readerContext.reader).getRangeScorer(readerContext.reader, lowerVal, upperVal, includeLower, includeUpper);
} }
}; };
} }

View File

@ -25,6 +25,7 @@ import java.io.FileReader;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.StringReader; import java.io.StringReader;
import java.net.ConnectException;
import java.net.URL; import java.net.URL;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
@ -65,12 +66,16 @@ public class ContentStreamTest extends LuceneTestCase
{ {
String content = null; String content = null;
URL url = new URL( "http://svn.apache.org/repos/asf/lucene/dev/trunk/" ); URL url = new URL( "http://svn.apache.org/repos/asf/lucene/dev/trunk/" );
InputStream in = url.openStream(); InputStream in = null;
try { try {
in = url.openStream();
content = IOUtils.toString( in ); content = IOUtils.toString( in );
} } catch (ConnectException ex) {
finally { assumeNoException("Unable to connect to " + url + " to run the test.", ex);
IOUtils.closeQuietly(in); }finally {
if (in != null) {
IOUtils.closeQuietly(in);
}
} }
assertTrue( content.length() > 10 ); // found something... assertTrue( content.length() > 10 ); // found something...

View File

@ -26,6 +26,7 @@ import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.OpenBitSetIterator; import org.apache.lucene.util.OpenBitSetIterator;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.FilterIndexReader; import org.apache.lucene.index.FilterIndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.MultiReader;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSet;
@ -404,6 +405,7 @@ public class TestDocSet extends LuceneTestCase {
} }
public void doFilterTest(SolrIndexReader reader) throws IOException { public void doFilterTest(SolrIndexReader reader) throws IOException {
ReaderContext topLevelContext = reader.getTopReaderContext();
OpenBitSet bs = getRandomSet(reader.maxDoc(), rand.nextInt(reader.maxDoc()+1)); OpenBitSet bs = getRandomSet(reader.maxDoc(), rand.nextInt(reader.maxDoc()+1));
DocSet a = new BitDocSet(bs); DocSet a = new BitDocSet(bs);
DocSet b = getIntDocSet(bs); DocSet b = getIntDocSet(bs);
@ -412,23 +414,23 @@ public class TestDocSet extends LuceneTestCase {
Filter fb = b.getTopFilter(); Filter fb = b.getTopFilter();
// test top-level // test top-level
DocIdSet da = fa.getDocIdSet(reader); DocIdSet da = fa.getDocIdSet(topLevelContext);
DocIdSet db = fb.getDocIdSet(reader); DocIdSet db = fb.getDocIdSet(topLevelContext);
doTestIteratorEqual(da, db); doTestIteratorEqual(da, db);
// first test in-sequence sub readers // first test in-sequence sub readers
for (SolrIndexReader sir : reader.getLeafReaders()) { for (ReaderContext readerInfo : topLevelContext.leaves()) {
da = fa.getDocIdSet(sir); da = fa.getDocIdSet(readerInfo);
db = fb.getDocIdSet(sir); db = fb.getDocIdSet(readerInfo);
doTestIteratorEqual(da, db); doTestIteratorEqual(da, db);
} }
int nReaders = reader.getLeafReaders().length; int nReaders = topLevelContext.leaves().length;
// now test out-of-sequence sub readers // now test out-of-sequence sub readers
for (int i=0; i<nReaders; i++) { for (int i=0; i<nReaders; i++) {
SolrIndexReader sir = reader.getLeafReaders()[rand.nextInt(nReaders)]; ReaderContext readerInfo = topLevelContext.leaves()[rand.nextInt(nReaders)];
da = fa.getDocIdSet(sir); da = fa.getDocIdSet(readerInfo);
db = fb.getDocIdSet(sir); db = fb.getDocIdSet(readerInfo);
doTestIteratorEqual(da, db); doTestIteratorEqual(da, db);
} }
} }

View File

@ -21,6 +21,7 @@ import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
@ -106,8 +107,8 @@ public class TestSort extends AbstractSolrTestCase {
for (int i=0; i<qiter; i++) { for (int i=0; i<qiter; i++) {
Filter filt = new Filter() { Filter filt = new Filter() {
@Override @Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException { public DocIdSet getDocIdSet(ReaderContext context) throws IOException {
return randSet(reader.maxDoc()); return randSet(context.reader.maxDoc());
} }
}; };

View File

@ -245,7 +245,7 @@ public class DirectUpdateHandlerTest extends SolrTestCaseJ4 {
SolrQueryRequest sr = req("q","foo"); SolrQueryRequest sr = req("q","foo");
SolrIndexReader r = sr.getSearcher().getReader(); SolrIndexReader r = sr.getSearcher().getReader();
assertTrue(r.maxDoc() > r.numDocs()); // should have deletions assertTrue(r.maxDoc() > r.numDocs()); // should have deletions
assertTrue(r.getLeafReaders().length > 1); // more than 1 segment assertFalse(r.getTopReaderContext().isAtomic); // more than 1 segment
sr.close(); sr.close();
assertU(commit("expungeDeletes","true")); assertU(commit("expungeDeletes","true"));
@ -254,7 +254,7 @@ public class DirectUpdateHandlerTest extends SolrTestCaseJ4 {
r = sr.getSearcher().getReader(); r = sr.getSearcher().getReader();
assertEquals(r.maxDoc(), r.numDocs()); // no deletions assertEquals(r.maxDoc(), r.numDocs()); // no deletions
assertEquals(4,r.maxDoc()); // no dups assertEquals(4,r.maxDoc()); // no dups
assertTrue(r.getLeafReaders().length > 1); // still more than 1 segment assertFalse(r.getTopReaderContext().isAtomic); //still more than 1 segment
sr.close(); sr.close();
} }