LUCENE-4307: rename IR.getTopReaderContext to IR.getContext

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1373072 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-08-14 20:43:16 +00:00
parent c607f548bb
commit 796fb31c89
29 changed files with 58 additions and 53 deletions

View File

@ -47,6 +47,9 @@ API Changes
leaves() (LUCENE-4152), which lists AtomicReaderContexts including
the doc base of each leaf. (Uwe Schindler, Robert Muir)
* LUCENE-4307: Renamed IndexReader.getTopReaderContext to
IndexReader.getContext. (Robert Muir)
Bug Fixes
* LUCENE-4297: BooleanScorer2 would multiply the coord() factor

View File

@ -322,7 +322,7 @@ CompositeReader itsself to build its reader tree. To get all atomic leaves
of a reader, use IndexReader#leaves(), which also provides the doc base
of each leave. Readers that are already atomic return itsself as leaf with
doc base 0. To emulate Lucene 3.x getSequentialSubReaders(),
use getTopReaderContext().children().
use getContext().children().
## LUCENE-2413,LUCENE-3396: Analyzer package changes

View File

@ -54,7 +54,7 @@ public abstract class AtomicReader extends IndexReader {
}
@Override
public final AtomicReaderContext getTopReaderContext() {
public final AtomicReaderContext getContext() {
ensureOpen();
return readerContext;
}

View File

@ -439,7 +439,7 @@ class BufferedDeletesStream {
// Delete by query
private static long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, ReadersAndLiveDocs rld, final SegmentReader reader) throws IOException {
long delCount = 0;
final AtomicReaderContext readerContext = reader.getTopReaderContext();
final AtomicReaderContext readerContext = reader.getContext();
boolean any = false;
for (QueryAndLimit ent : queriesIter) {
Query query = ent.query;

View File

@ -90,7 +90,7 @@ public abstract class CompositeReader extends IndexReader {
protected abstract List<? extends IndexReader> getSequentialSubReaders();
@Override
public final CompositeReaderContext getTopReaderContext() {
public final CompositeReaderContext getContext() {
ensureOpen();
// lazy init without thread safety for perf reasons: Building the readerContext twice does not hurt!
if (readerContext == null) {

View File

@ -378,9 +378,11 @@ public abstract class IndexReader implements Closeable {
protected abstract void doClose() throws IOException;
/**
* Expert: Returns a the root {@link IndexReaderContext} for this
* {@link IndexReader}'s sub-reader tree. Iff this reader is composed of sub
* readers ,ie. this reader being a composite reader, this method returns a
* Expert: Returns the root {@link IndexReaderContext} for this
* {@link IndexReader}'s sub-reader tree.
* <p>
* Iff this reader is composed of sub
* readers, i.e. this reader being a composite reader, this method returns a
* {@link CompositeReaderContext} holding the reader's direct children as well as a
* view of the reader tree's atomic leaf contexts. All sub-
* {@link IndexReaderContext} instances referenced from this readers top-level
@ -396,13 +398,13 @@ public abstract class IndexReader implements Closeable {
*
* @lucene.experimental
*/
public abstract IndexReaderContext getTopReaderContext();
public abstract IndexReaderContext getContext();
/**
* Returns the reader's leaves, or itself if this reader is Atomic.
*/
public final List<AtomicReaderContext> leaves() {
return getTopReaderContext().leaves();
return getContext().leaves();
}
/** Expert: Returns a key for this IndexReader, so FieldCache/CachingWrapperFilter can find

View File

@ -36,7 +36,7 @@ import org.apache.lucene.util.packed.PackedInts.Reader;
*
* <p><b>NOTE</b>: for multi readers, you'll get better
* performance by gathering the sub readers using
* {@link IndexReader#getTopReaderContext()} to get the
* {@link IndexReader#getContext()} to get the
* atomic leaves and then operate per-AtomicReader,
* instead of using this class.
*

View File

@ -38,7 +38,7 @@ import org.apache.lucene.util.BytesRef;
*
* <p><b>NOTE</b>: for composite readers, you'll get better
* performance by gathering the sub readers using
* {@link IndexReader#getTopReaderContext()} to get the
* {@link IndexReader#getContext()} to get the
* atomic leaves and then operate per-AtomicReader,
* instead of using this class.
*

View File

@ -37,7 +37,7 @@ import org.apache.lucene.index.MultiReader; // javadoc
* <p><b>NOTE</b>: this class almost always results in a
* performance hit. If this is important to your use case,
* you'll get better performance by gathering the sub readers using
* {@link IndexReader#getTopReaderContext()} to get the
* {@link IndexReader#getContext()} to get the
* atomic leaves and then operate per-AtomicReader,
* instead of using this class.
*/

View File

@ -122,7 +122,7 @@ public class IndexSearcher {
*
* @lucene.experimental */
public IndexSearcher(IndexReader r, ExecutorService executor) {
this(r.getTopReaderContext(), executor);
this(r.getContext(), executor);
}
/**
@ -138,7 +138,7 @@ public class IndexSearcher {
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
*
* @see IndexReaderContext
* @see IndexReader#getTopReaderContext()
* @see IndexReader#getContext()
* @lucene.experimental
*/
public IndexSearcher(IndexReaderContext context, ExecutorService executor) {
@ -154,7 +154,7 @@ public class IndexSearcher {
* Creates a searcher searching the provided top-level {@link IndexReaderContext}.
*
* @see IndexReaderContext
* @see IndexReader#getTopReaderContext()
* @see IndexReader#getContext()
* @lucene.experimental
*/
public IndexSearcher(IndexReaderContext context) {
@ -639,7 +639,7 @@ public class IndexSearcher {
/**
* Returns this searchers the top-level {@link IndexReaderContext}.
* @see IndexReader#getTopReaderContext()
* @see IndexReader#getContext()
*/
/* sugar for #getReader().getTopReaderContext() */
public IndexReaderContext getTopReaderContext() {

View File

@ -50,7 +50,7 @@ public class QueryWrapperFilter extends Filter {
@Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
// get a private context that is used to rewrite, createWeight and score eventually
final AtomicReaderContext privateContext = context.reader().getTopReaderContext();
final AtomicReaderContext privateContext = context.reader().getContext();
final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query);
return new DocIdSet() {
@Override

View File

@ -46,7 +46,7 @@ abstract class TermCollectingRewrite<Q extends Query> extends MultiTermQuery.Rew
final void collectTerms(IndexReader reader, MultiTermQuery query, TermCollector collector) throws IOException {
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
Comparator<BytesRef> lastTermComp = null;
for (AtomicReaderContext context : topReaderContext.leaves()) {
final Fields fields = context.reader().fields();

View File

@ -60,7 +60,7 @@ public class PayloadSpanUtil {
* @param context
* that contains doc with payloads to extract
*
* @see IndexReader#getTopReaderContext()
* @see IndexReader#getContext()
*/
public PayloadSpanUtil(IndexReaderContext context) {
this.context = context;

View File

@ -286,7 +286,7 @@ public final class FieldCacheSanityChecker {
if (obj instanceof IndexReader) {
try {
final List<IndexReaderContext> childs =
((IndexReader) obj).getTopReaderContext().children();
((IndexReader) obj).getContext().children();
if (childs != null) { // it is composite reader
for (final IndexReaderContext ctx : childs) {
all.add(ctx.reader().getCoreCacheKey());

View File

@ -143,7 +143,7 @@ public class TestCustomNorms extends LuceneTestCase {
IndexReader reader = writer.getReader();
writer.close();
assertEquals(numAdded, reader.numDocs());
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (final AtomicReaderContext ctx : topReaderContext.leaves()) {
AtomicReader atomicReader = ctx.reader();
Source source = random().nextBoolean() ? atomicReader.normValues("foo").getSource() : atomicReader.normValues("foo").getDirectSource();

View File

@ -63,7 +63,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("1");
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) {
DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
atomicReaderContext.reader(), bytes, null);
@ -138,7 +138,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("" + term);
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) {
DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
atomicReaderContext.reader(), bytes, null);
@ -214,7 +214,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("" + term);
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (AtomicReaderContext context : topReaderContext.leaves()) {
int maxDoc = context.reader().maxDoc();
DocsEnum docsEnum = _TestUtil.docs(random(), context.reader(), fieldName, bytes, null, null, DocsEnum.FLAG_FREQS);
@ -292,7 +292,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("even");
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) {
DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
atomicReaderContext.reader(), bytes, null);

View File

@ -339,13 +339,13 @@ public class TestParallelCompositeReader extends LuceneTestCase {
if (compositeComposite) {
rd1 = new MultiReader(DirectoryReader.open(dir1), DirectoryReader.open(dir1));
rd2 = new MultiReader(DirectoryReader.open(dir2), DirectoryReader.open(dir2));
assertEquals(2, rd1.getTopReaderContext().children().size());
assertEquals(2, rd2.getTopReaderContext().children().size());
assertEquals(2, rd1.getContext().children().size());
assertEquals(2, rd2.getContext().children().size());
} else {
rd1 = DirectoryReader.open(dir1);
rd2 = DirectoryReader.open(dir2);
assertEquals(3, rd1.getTopReaderContext().children().size());
assertEquals(3, rd2.getTopReaderContext().children().size());
assertEquals(3, rd1.getContext().children().size());
assertEquals(3, rd2.getContext().children().size());
}
ParallelCompositeReader pr = new ParallelCompositeReader(rd1, rd2);
return newSearcher(pr);

View File

@ -121,7 +121,7 @@ public class TestTypePromotion extends LuceneTestCase {
throws IOException {
DirectoryReader reader = DirectoryReader.open(dir);
assertEquals(1, reader.leaves().size());
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
List<AtomicReaderContext> leaves = topReaderContext.leaves();
assertEquals(1, leaves.size());
DocValues docValues = leaves.get(0).reader().docValues("promote");
@ -372,7 +372,7 @@ public class TestTypePromotion extends LuceneTestCase {
writer.close();
DirectoryReader reader = DirectoryReader.open(dir);
assertEquals(1, reader.leaves().size());
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
List<AtomicReaderContext> leaves = topReaderContext.leaves();
DocValues docValues = leaves.get(0).reader().docValues("promote");
assertNotNull(docValues);

View File

@ -43,7 +43,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.close();
IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
MockFilter filter = new MockFilter();
CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
@ -69,7 +69,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.close();
IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
final Filter filter = new Filter() {
@Override
@ -92,7 +92,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
writer.close();
IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
final Filter filter = new Filter() {
@Override
@ -115,8 +115,8 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
}
private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException {
assertTrue(reader.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
assertTrue(reader.getContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
final DocIdSet originalSet = filter.getDocIdSet(context, context.reader().getLiveDocs());
final DocIdSet cachedSet = cacher.getDocIdSet(context, context.reader().getLiveDocs());

View File

@ -197,7 +197,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
@Test
public void testInverseRange() throws Exception {
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(reader).getTopReaderContext();
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(reader).getContext();
NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange("field8", 8, 1000, -1000, true, true);
assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newIntRange("field8", 8, Integer.MAX_VALUE, null, false, false);

View File

@ -211,7 +211,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
@Test
public void testInverseRange() throws Exception {
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()).getTopReaderContext();
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()).getContext();
NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange("field8", 8, 1000L, -1000L, true, true);
assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET,
f.getDocIdSet(context, context.reader().getLiveDocs()));

View File

@ -67,12 +67,12 @@ public class TestPayloadSpans extends LuceneTestCase {
SpanTermQuery stq;
Spans spans;
stq = new SpanTermQuery(new Term(PayloadHelper.FIELD, "seventy"));
spans = MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), stq);
spans = MultiSpansWrapper.wrap(indexReader.getContext(), stq);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 100, 1, 1, 1);
stq = new SpanTermQuery(new Term(PayloadHelper.NO_PAYLOAD_FIELD, "seventy"));
spans = MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), stq);
spans = MultiSpansWrapper.wrap(indexReader.getContext(), stq);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 100, 0, 0, 0);
}
@ -83,7 +83,7 @@ public class TestPayloadSpans extends LuceneTestCase {
SpanFirstQuery sfq;
match = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
sfq = new SpanFirstQuery(match, 2);
Spans spans = MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), sfq);
Spans spans = MultiSpansWrapper.wrap(indexReader.getContext(), sfq);
checkSpans(spans, 109, 1, 1, 1);
//Test more complicated subclause
SpanQuery[] clauses = new SpanQuery[2];
@ -91,11 +91,11 @@ public class TestPayloadSpans extends LuceneTestCase {
clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "hundred"));
match = new SpanNearQuery(clauses, 0, true);
sfq = new SpanFirstQuery(match, 2);
checkSpans(MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), sfq), 100, 2, 1, 1);
checkSpans(MultiSpansWrapper.wrap(indexReader.getContext(), sfq), 100, 2, 1, 1);
match = new SpanNearQuery(clauses, 0, false);
sfq = new SpanFirstQuery(match, 2);
checkSpans(MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), sfq), 100, 2, 1, 1);
checkSpans(MultiSpansWrapper.wrap(indexReader.getContext(), sfq), 100, 2, 1, 1);
}
@ -119,7 +119,7 @@ public class TestPayloadSpans extends LuceneTestCase {
writer.close();
checkSpans(MultiSpansWrapper.wrap(reader.getTopReaderContext(), snq), 1,new int[]{2});
checkSpans(MultiSpansWrapper.wrap(reader.getContext(), snq), 1,new int[]{2});
reader.close();
directory.close();
}

View File

@ -132,7 +132,7 @@ public class BooleanFilterTest extends LuceneTestCase {
private void tstFilterCard(String mes, int expected, Filter filt)
throws Exception {
// BooleanFilter never returns null DIS or null DISI!
DocIdSetIterator disi = filt.getDocIdSet(reader.getTopReaderContext(), reader.getLiveDocs()).iterator();
DocIdSetIterator disi = filt.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator();
int actual = 0;
while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
actual++;

View File

@ -62,8 +62,8 @@ public class TermsFilterTest extends LuceneTestCase {
w.addDocument(doc);
}
IndexReader reader = new SlowCompositeReaderWrapper(w.getReader());
assertTrue(reader.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
assertTrue(reader.getContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
w.close();
TermsFilter tf = new TermsFilter();

View File

@ -66,7 +66,7 @@ public class TestNumericRangeFilterBuilder extends LuceneTestCase {
try {
AtomicReader reader = new SlowCompositeReaderWrapper(DirectoryReader.open(ramDir));
try {
assertNull(filter.getDocIdSet(reader.getTopReaderContext(), reader.getLiveDocs()));
assertNull(filter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
}
finally {
reader.close();

View File

@ -353,7 +353,7 @@ public class QueryUtils {
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
AtomicReaderContext ctx = previousReader.getTopReaderContext();
AtomicReaderContext ctx = previousReader.getContext();
Scorer scorer = w.scorer(ctx, true, false, ctx.reader().getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;

View File

@ -179,7 +179,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase {
}
try {
for(Term term : terms) {
final TermContext termContext = TermContext.build(s.getIndexReader().getTopReaderContext(), term, false);
final TermContext termContext = TermContext.build(s.getIndexReader().getContext(), term, false);
stats.put(term, s.termStatistics(term, termContext));
}
} finally {

View File

@ -1168,7 +1168,7 @@ public abstract class LuceneTestCase extends Assert {
if (maybeWrap) {
r = maybeWrapReader(r);
}
IndexSearcher ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getTopReaderContext());
IndexSearcher ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getContext());
ret.setSimilarity(classEnvRule.similarity);
return ret;
} else {
@ -1197,7 +1197,7 @@ public abstract class LuceneTestCase extends Assert {
}
IndexSearcher ret = random.nextBoolean()
? new AssertingIndexSearcher(random, r, ex)
: new AssertingIndexSearcher(random, r.getTopReaderContext(), ex);
: new AssertingIndexSearcher(random, r.getContext(), ex);
ret.setSimilarity(classEnvRule.similarity);
return ret;
}

View File

@ -455,7 +455,7 @@ public class TestDocSet extends LuceneTestCase {
}
public void doFilterTest(IndexReader reader) throws IOException {
IndexReaderContext topLevelContext = reader.getTopReaderContext();
IndexReaderContext topLevelContext = reader.getContext();
OpenBitSet bs = getRandomSet(reader.maxDoc(), rand.nextInt(reader.maxDoc()+1));
DocSet a = new BitDocSet(bs);
DocSet b = getIntDocSet(bs);