LUCENE-3757: Change AtomicReaderContext.leaves() to return itsself as only leave to simplify code and remove an otherwise unneeded ReaderUtil method

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1242233 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2012-02-09 08:14:19 +00:00
parent dfdd1b7363
commit 25cfcfb61e
21 changed files with 36 additions and 58 deletions

View File

@ -28,7 +28,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.Version;
/**
@ -102,7 +101,7 @@ public class PKIndexSplitter {
boolean success = false;
final IndexWriter w = new IndexWriter(target, config);
try {
final AtomicReaderContext[] leaves = ReaderUtil.leaves(reader.getTopReaderContext());
final AtomicReaderContext[] leaves = reader.getTopReaderContext().leaves();
final IndexReader[] subReaders = new IndexReader[leaves.length];
for (int i = 0; i < leaves.length; i++) {
subReaders[i] = new DocumentFilteredAtomicIndexReader(leaves[i], preserveFilter, negateFilter);

View File

@ -32,7 +32,6 @@ import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.ReaderUtil;
public class TestDistance extends LuceneTestCase {
@ -117,7 +116,7 @@ public class TestDistance extends LuceneTestCase {
LatLongDistanceFilter f = new LatLongDistanceFilter(new QueryWrapperFilter(new MatchAllDocsQuery()),
lat, lng, 1.0, latField, lngField);
AtomicReaderContext[] leaves = ReaderUtil.leaves(r.getTopReaderContext());
AtomicReaderContext[] leaves = r.getTopReaderContext().leaves();
for (int i = 0; i < leaves.length; i++) {
f.getDocIdSet(leaves[i], leaves[i].reader().getLiveDocs());
}

View File

@ -28,7 +28,8 @@ public final class AtomicReaderContext extends IndexReaderContext {
public final int docBase;
private final AtomicReader reader;
private final AtomicReaderContext[] leaves;
/**
* Creates a new {@link AtomicReaderContext}
*/
@ -38,6 +39,7 @@ public final class AtomicReaderContext extends IndexReaderContext {
this.ord = leafOrd;
this.docBase = leafDocBase;
this.reader = reader;
this.leaves = isTopLevel ? new AtomicReaderContext[] { this } : null;
}
AtomicReaderContext(AtomicReader atomicReader) {
@ -46,7 +48,7 @@ public final class AtomicReaderContext extends IndexReaderContext {
@Override
public AtomicReaderContext[] leaves() {
return null;
return leaves;
}
@Override

View File

@ -45,7 +45,8 @@ public abstract class IndexReaderContext {
/**
* Returns the context's leaves if this context is a top-level context
* otherwise <code>null</code>.
* otherwise <code>null</code>. For convenience, if this is an
* {@link AtomicReaderContext} this returns itsself as the only leaf.
* <p>
* Note: this is convenience method since leaves can always be obtained by
* walking the context tree.

View File

@ -144,7 +144,7 @@ public class IndexSearcher {
reader = context.reader();
this.executor = executor;
this.readerContext = context;
leafContexts = ReaderUtil.leaves(context);
leafContexts = context.leaves();
this.leafSlices = executor == null ? null : slices(leafContexts);
}

View File

@ -30,7 +30,6 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.TermContext;
import org.apache.lucene.util.ReaderUtil;
abstract class TermCollectingRewrite<Q extends Query> extends MultiTermQuery.RewriteMethod {
@ -49,7 +48,7 @@ abstract class TermCollectingRewrite<Q extends Query> extends MultiTermQuery.Rew
protected final void collectTerms(IndexReader reader, MultiTermQuery query, TermCollector collector) throws IOException {
IndexReaderContext topReaderContext = reader.getTopReaderContext();
Comparator<BytesRef> lastTermComp = null;
final AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
final AtomicReaderContext[] leaves = topReaderContext.leaves();
for (AtomicReaderContext context : leaves) {
final Fields fields = context.reader().fields();
if (fields == null) {

View File

@ -43,7 +43,6 @@ import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.TermContext;
/**
@ -184,7 +183,7 @@ public class PayloadSpanUtil {
for (Term term : terms) {
termContexts.put(term, TermContext.build(context, term, true));
}
final AtomicReaderContext[] leaves = ReaderUtil.leaves(context);
final AtomicReaderContext[] leaves = context.leaves();
for (AtomicReaderContext atomicReaderContext : leaves) {
final Spans spans = query.getSpans(atomicReaderContext, atomicReaderContext.reader().getLiveDocs(), termContexts);
while (spans.next() == true) {

View File

@ -120,21 +120,6 @@ public final class ReaderUtil {
protected abstract void add(int base, AtomicReader r) throws IOException;
}
/**
* Returns the context's leaves or the context itself as the only element of
* the returned array. If the context's #leaves() method returns
* <code>null</code> the given context must be an instance of
* {@link AtomicReaderContext}
*/
public static AtomicReaderContext[] leaves(IndexReaderContext context) {
assert context != null && context.isTopLevel : "context must be non-null & top-level";
final AtomicReaderContext[] leaves = context.leaves();
if (leaves == null) {
return new AtomicReaderContext[] { (AtomicReaderContext) context };
}
return leaves;
}
/**
* Walks up the reader tree and return the given context's top level reader
* context, or in other words the reader tree's root context.

View File

@ -85,7 +85,7 @@ public final class TermContext {
final String field = term.field();
final BytesRef bytes = term.bytes();
final TermContext perReaderTermState = new TermContext(context);
final AtomicReaderContext[] leaves = ReaderUtil.leaves(context);
final AtomicReaderContext[] leaves = context.leaves();
//if (DEBUG) System.out.println("prts.build term=" + term);
for (int i = 0; i < leaves.length; i++) {
//if (DEBUG) System.out.println(" r=" + leaves[i].reader);

View File

@ -30,7 +30,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util._TestUtil;
public class TestDocsAndPositions extends LuceneTestCase {
@ -65,7 +64,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("1");
IndexReaderContext topReaderContext = reader.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
AtomicReaderContext[] leaves = topReaderContext.leaves();
for (AtomicReaderContext atomicReaderContext : leaves) {
DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
atomicReaderContext.reader(), bytes, null);
@ -141,7 +140,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("" + term);
IndexReaderContext topReaderContext = reader.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
AtomicReaderContext[] leaves = topReaderContext.leaves();
for (AtomicReaderContext atomicReaderContext : leaves) {
DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
atomicReaderContext.reader(), bytes, null);
@ -217,7 +216,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("" + term);
IndexReaderContext topReaderContext = reader.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
AtomicReaderContext[] leaves = topReaderContext.leaves();
for (AtomicReaderContext context : leaves) {
int maxDoc = context.reader().maxDoc();
DocsEnum docsEnum = _TestUtil.docs(random, context.reader(), fieldName, bytes, null, null, true);
@ -296,7 +295,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
BytesRef bytes = new BytesRef("even");
IndexReaderContext topReaderContext = reader.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
AtomicReaderContext[] leaves = topReaderContext.leaves();
for (AtomicReaderContext atomicReaderContext : leaves) {
DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
atomicReaderContext.reader(), bytes, null);

View File

@ -60,7 +60,7 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t
for (Term term : terms) {
termContexts.put(term, TermContext.build(topLevelReaderContext, term, true));
}
AtomicReaderContext[] leaves = ReaderUtil.leaves(topLevelReaderContext);
AtomicReaderContext[] leaves = topLevelReaderContext.leaves();
if(leaves.length == 1) {
return query.getSpans(leaves[0], leaves[0].reader().getLiveDocs(), termContexts);
}

View File

@ -32,7 +32,6 @@ import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.ReaderUtil;
public class TestNearSpansOrdered extends LuceneTestCase {
protected IndexSearcher searcher;
@ -167,7 +166,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
SpanNearQuery q = makeQuery();
Weight w = searcher.createNormalizedWeight(q);
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
AtomicReaderContext[] leaves = topReaderContext.leaves();
Scorer s = w.scorer(leaves[0], true, false, leaves[0].reader().getLiveDocs());
assertEquals(1, s.advance(1));
}

View File

@ -404,7 +404,7 @@ public class TestSpans extends LuceneTestCase {
boolean ordered = true;
int slop = 1;
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
AtomicReaderContext[] leaves = topReaderContext.leaves();
int subIndex = ReaderUtil.subIndex(11, leaves);
for (int i = 0; i < leaves.length; i++) {

View File

@ -37,7 +37,6 @@ import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util._TestUtil;
import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT;
@ -216,7 +215,7 @@ public class QueryUtils {
*/
public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException {
//System.out.println("Checking "+q);
final AtomicReaderContext[] readerContextArray = ReaderUtil.leaves(s.getTopReaderContext());
final AtomicReaderContext[] readerContextArray = s.getTopReaderContext().leaves();
if (s.createNormalizedWeight(q).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next().
final int skip_op = 0;
@ -349,7 +348,7 @@ public class QueryUtils {
final float maxDiff = 1e-3f;
final int lastDoc[] = {-1};
final AtomicReader lastReader[] = {null};
final AtomicReaderContext[] context = ReaderUtil.leaves(s.getTopReaderContext());
final AtomicReaderContext[] context = s.getTopReaderContext().leaves();
s.search(q,new Collector() {
private Scorer scorer;
private int leafPtr;

View File

@ -156,7 +156,7 @@ public class TestBlockJoin extends LuceneTestCase {
}
private Document getParentDoc(IndexReader reader, Filter parents, int childDocID) throws IOException {
final AtomicReaderContext[] leaves = ReaderUtil.leaves(reader.getTopReaderContext());
final AtomicReaderContext[] leaves = reader.getTopReaderContext().leaves();
final int subIndex = ReaderUtil.subIndex(childDocID, leaves);
final AtomicReaderContext leaf = leaves[subIndex];
final FixedBitSet bits = (FixedBitSet) parents.getDocIdSet(leaf, null);
@ -818,7 +818,7 @@ public class TestBlockJoin extends LuceneTestCase {
ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ToParentBlockJoinQuery.ScoreMode.Avg);
Weight weight = s.createNormalizedWeight(q);
DocIdSetIterator disi = weight.scorer(ReaderUtil.leaves(s.getIndexReader().getTopReaderContext())[0], true, true, null);
DocIdSetIterator disi = weight.scorer(s.getIndexReader().getTopReaderContext().leaves()[0], true, true, null);
assertEquals(1, disi.advance(1));
r.close();
dir.close();
@ -852,7 +852,7 @@ public class TestBlockJoin extends LuceneTestCase {
ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ToParentBlockJoinQuery.ScoreMode.Avg);
Weight weight = s.createNormalizedWeight(q);
DocIdSetIterator disi = weight.scorer(ReaderUtil.leaves(s.getIndexReader().getTopReaderContext())[0], true, true, null);
DocIdSetIterator disi = weight.scorer(s.getIndexReader().getTopReaderContext().leaves()[0], true, true, null);
assertEquals(2, disi.advance(0));
r.close();
dir.close();

View File

@ -60,7 +60,7 @@ public class ScaleFloatFunction extends ValueSource {
}
private ScaleInfo createScaleInfo(Map context, AtomicReaderContext readerContext) throws IOException {
final AtomicReaderContext[] leaves = ReaderUtil.leaves(ReaderUtil.getTopLevelContext(readerContext));
final AtomicReaderContext[] leaves = ReaderUtil.getTopLevelContext(readerContext).leaves();
float minVal = Float.POSITIVE_INFINITY;
float maxVal = Float.NEGATIVE_INFINITY;

View File

@ -31,7 +31,6 @@ import org.apache.lucene.search.Filter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.ReaderUtil;
public class TermsFilterTest extends LuceneTestCase {
@ -111,7 +110,7 @@ public class TermsFilterTest extends LuceneTestCase {
tf.addTerm(new Term(fieldName, "content1"));
MultiReader multi = new MultiReader(reader1, reader2);
for (AtomicReaderContext context : ReaderUtil.leaves(multi.getTopReaderContext())) {
for (AtomicReaderContext context : multi.getTopReaderContext().leaves()) {
FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs());
assertTrue("Must be >= 0", bits.cardinality() >= 0);
}

View File

@ -584,7 +584,7 @@ public class QueryComponent extends SearchComponent
NamedList<Object[]> sortVals = new NamedList<Object[]>(); // order is important for the sort fields
Field field = new StringField("dummy", ""); // a dummy Field
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
AtomicReaderContext[] leaves = topReaderContext.leaves();
AtomicReaderContext currentLeaf = null;
if (leaves.length==1) {
// if there is a single segment, use that subReader and avoid looking up each time

View File

@ -26,7 +26,6 @@ import org.apache.lucene.search.Filter;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.packed.PackedInts;
import org.apache.solr.common.SolrException;
@ -83,7 +82,7 @@ class PerSegmentSingleValuedFaceting {
// reuse the translation logic to go from top level set to per-segment set
baseSet = docs.getTopFilter();
final AtomicReaderContext[] leaves = ReaderUtil.leaves(searcher.getTopReaderContext());
final AtomicReaderContext[] leaves = searcher.getTopReaderContext().leaves();
// The list of pending tasks that aren't immediately submitted
// TODO: Is there a completion service, or a delegating executor that can
// limit the number of concurrent tasks submitted to a bigger executor?

View File

@ -23,7 +23,6 @@ import java.util.Random;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.FilterIndexReader;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.MultiReader;
@ -430,15 +429,15 @@ public class TestDocSet extends LuceneTestCase {
DocIdSet da;
DocIdSet db;
AtomicReaderContext[] leaves = topLevelContext.leaves();
// first test in-sequence sub readers
for (AtomicReaderContext readerContext : ReaderUtil.leaves(topLevelContext)) {
for (AtomicReaderContext readerContext : leaves) {
da = fa.getDocIdSet(readerContext, null);
db = fb.getDocIdSet(readerContext, null);
doTestIteratorEqual(da, db);
}
AtomicReaderContext[] leaves = ReaderUtil.leaves(topLevelContext);
int nReaders = leaves.length;
// now test out-of-sequence sub readers
for (int i=0; i<nReaders; i++) {

View File

@ -50,7 +50,7 @@ public class TestIndexSearcher extends SolrTestCaseJ4 {
Map context = ValueSource.newContext(sqr.getSearcher());
vs.createWeight(context, sqr.getSearcher());
IndexReaderContext topReaderContext = sqr.getSearcher().getTopReaderContext();
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
AtomicReaderContext[] leaves = topReaderContext.leaves();
int idx = ReaderUtil.subIndex(doc, leaves);
AtomicReaderContext leaf = leaves[idx];
FunctionValues vals = vs.getValues(context, leaf);
@ -78,7 +78,7 @@ public class TestIndexSearcher extends SolrTestCaseJ4 {
// make sure the readers share the first segment
// Didn't work w/ older versions of lucene2.9 going from segment -> multi
assertEquals(ReaderUtil.leaves(rCtx1)[0].reader(), ReaderUtil.leaves(rCtx2)[0].reader());
assertEquals(rCtx1.leaves()[0].reader(), rCtx2.leaves()[0].reader());
assertU(adoc("id","5", "v_f","3.14159"));
assertU(adoc("id","6", "v_f","8983", "v_s1","string6"));
@ -88,8 +88,8 @@ public class TestIndexSearcher extends SolrTestCaseJ4 {
IndexReaderContext rCtx3 = sr3.getSearcher().getTopReaderContext();
// make sure the readers share segments
// assertEquals(r1.getLeafReaders()[0], r3.getLeafReaders()[0]);
assertEquals(ReaderUtil.leaves(rCtx2)[0].reader(), ReaderUtil.leaves(rCtx3)[0].reader());
assertEquals(ReaderUtil.leaves(rCtx2)[1].reader(), ReaderUtil.leaves(rCtx3)[1].reader());
assertEquals(rCtx2.leaves()[0].reader(), rCtx3.leaves()[0].reader());
assertEquals(rCtx2.leaves()[1].reader(), rCtx3.leaves()[1].reader());
sr1.close();
sr2.close();
@ -123,8 +123,8 @@ public class TestIndexSearcher extends SolrTestCaseJ4 {
assertU(commit());
SolrQueryRequest sr6 = req("q","foo");
IndexReaderContext rCtx6 = sr6.getSearcher().getTopReaderContext();
assertEquals(1, ReaderUtil.leaves(rCtx6)[0].reader().numDocs()); // only a single doc left in the first segment
assertTrue( !ReaderUtil.leaves(rCtx5)[0].reader().equals(ReaderUtil.leaves(rCtx6)[0].reader()) ); // readers now different
assertEquals(1, rCtx6.leaves()[0].reader().numDocs()); // only a single doc left in the first segment
assertTrue( !rCtx5.leaves()[0].reader().equals(rCtx6.leaves()[0].reader()) ); // readers now different
sr5.close();
sr6.close();