From ae212ff6f6af5ba08e5a743cb7f541ad7358bc50 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Tue, 19 Jun 2012 07:24:29 +0000 Subject: [PATCH] LUCENE-3866: CompositeReader.getSequentialSubReaders() now returns unmodifiable List. ReaderUtil.Gather was removed, as IndexReaderContext.leaves() is now the preferred way to access sub-readers git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1351590 13f79535-47bb-0310-9956-ffa450edef68 --- lucene/CHANGES.txt | 9 ++ .../lucene/index/AtomicReaderContext.java | 14 +- .../lucene/index/BaseCompositeReader.java | 26 +++- .../apache/lucene/index/CompositeReader.java | 17 ++- .../lucene/index/CompositeReaderContext.java | 65 ++++----- .../apache/lucene/index/DirectoryReader.java | 1 - .../lucene/index/IndexReaderContext.java | 22 ++- .../apache/lucene/index/MultiDocValues.java | 124 ++++++++--------- .../index/MultiDocsAndPositionsEnum.java | 4 +- .../apache/lucene/index/MultiDocsEnum.java | 5 +- .../org/apache/lucene/index/MultiFields.java | 126 ++++++++---------- .../apache/lucene/index/MultiFieldsEnum.java | 8 +- .../org/apache/lucene/index/MultiReader.java | 6 +- .../org/apache/lucene/index/MultiTerms.java | 6 +- .../apache/lucene/index/MultiTermsEnum.java | 8 +- .../lucene/index/ParallelCompositeReader.java | 42 +++--- .../apache/lucene/index/SegmentMerger.java | 20 +-- .../index/SlowCompositeReaderWrapper.java | 14 +- .../lucene/index/StandardDirectoryReader.java | 21 +-- .../apache/lucene/search/IndexSearcher.java | 41 +++--- .../lucene/search/TermCollectingRewrite.java | 3 +- .../search/payloads/PayloadSpanUtil.java | 3 +- .../org/apache/lucene/util/BitsSlice.java | 2 +- .../lucene/util/FieldCacheSanityChecker.java | 6 +- .../org/apache/lucene/util/MultiBits.java | 17 +-- .../org/apache/lucene/util/ReaderSlice.java | 39 ++++++ .../org/apache/lucene/util/ReaderUtil.java | 97 +------------- .../org/apache/lucene/util/TermContext.java | 9 +- .../codecs/lucene40/TestReuseDocsEnum.java | 40 +++--- .../apache/lucene/index/TestCustomNorms.java | 35 +++-- .../lucene/index/TestDeletionPolicy.java | 14 +- .../lucene/index/TestDirectoryReader.java | 20 +-- .../index/TestDirectoryReaderReopen.java | 10 +- .../lucene/index/TestDocValuesIndexing.java | 7 +- .../lucene/index/TestDocsAndPositions.java | 12 +- .../lucene/index/TestIndexWriterCommit.java | 6 +- .../index/TestIndexWriterForceMerge.java | 4 +- .../lucene/index/TestIndexWriterReader.java | 2 +- .../lucene/index/TestIndexWriterUnicode.java | 10 +- .../index/TestParallelCompositeReader.java | 8 +- .../lucene/index/TestStressIndexing2.java | 2 +- .../apache/lucene/index/TestTermsEnum.java | 2 +- .../lucene/index/TestThreadedForceMerge.java | 2 +- .../lucene/index/TestTypePromotion.java | 15 ++- .../lucene/search/TestBooleanQuery.java | 4 +- .../lucene/search/TestMatchAllDocsQuery.java | 2 +- .../lucene/search/TestShardSearching.java | 6 +- .../lucene/search/TestTopDocsMerge.java | 19 +-- .../search/spans/MultiSpansWrapper.java | 46 ++++--- .../search/spans/TestNearSpansOrdered.java | 4 +- .../apache/lucene/search/spans/TestSpans.java | 11 +- .../lucene/search/grouping/TestGrouping.java | 29 ++-- .../lucene/search/join/TestBlockJoin.java | 8 +- .../lucene/index/MultiPassIndexSplitter.java | 33 ++--- .../apache/lucene/index/PKIndexSplitter.java | 10 +- .../org/apache/lucene/misc/HighFreqTerms.java | 47 +++---- .../index/TestBalancedSegmentMergePolicy.java | 2 +- .../lucene/index/TestIndexSplitter.java | 4 +- .../valuesource/ScaleFloatFunction.java | 3 +- .../lucene/queryparser/xml/TestParser.java | 5 +- .../lucene/search/spell/SpellChecker.java | 14 +- .../org/apache/lucene/search/QueryUtils.java | 11 +- .../apache/lucene/util/LuceneTestCase.java | 11 +- .../handler/admin/LukeRequestHandler.java | 2 +- .../handler/component/QueryComponent.java | 8 +- .../component/SpellCheckComponent.java | 2 +- .../PerSegmentSingleValuedFaceting.java | 10 +- .../transform/ValueSourceAugmenter.java | 8 +- .../apache/solr/search/SolrIndexSearcher.java | 19 +-- .../org/apache/solr/update/VersionInfo.java | 2 +- .../org/apache/solr/search/TestDocSet.java | 7 +- .../apache/solr/search/TestIndexSearcher.java | 15 ++- .../test/org/apache/solr/search/TestSort.java | 2 +- 73 files changed, 608 insertions(+), 680 deletions(-) create mode 100644 lucene/core/src/java/org/apache/lucene/util/ReaderSlice.java diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 6be48481cf8..7c2b9189bd7 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -278,6 +278,11 @@ Changes in backwards compatibility policy * LUCENE-4055: You can't put foreign files into the index dir anymore. +* LUCENE-3866: CompositeReader.getSequentialSubReaders() now returns + unmodifiable List. ReaderUtil.Gather was + removed, as IndexReaderContext.leaves() is now the preferred way + to access sub-readers. (Uwe Schindler) + Changes in Runtime Behavior * LUCENE-2846: omitNorms now behaves like omitTermFrequencyAndPositions, if you @@ -536,6 +541,10 @@ API Changes which can be used to change the IndexWriter's live settings. IndexWriterConfig is used only for initializing the IndexWriter. (Shai Erera) +* LUCENE-3866: IndexReaderContext.leaves() is now the preferred way to access + atomic sub-readers of any kind of IndexReader (for AtomicReaders it returns + itsself as only leaf with docBase=0). (Uwe Schindler) + New features * LUCENE-2604: Added RegexpQuery support to QueryParser. Regular expressions diff --git a/lucene/core/src/java/org/apache/lucene/index/AtomicReaderContext.java b/lucene/core/src/java/org/apache/lucene/index/AtomicReaderContext.java index af7881d70e2..66ac94d1137 100644 --- a/lucene/core/src/java/org/apache/lucene/index/AtomicReaderContext.java +++ b/lucene/core/src/java/org/apache/lucene/index/AtomicReaderContext.java @@ -1,5 +1,8 @@ package org.apache.lucene.index; +import java.util.Collections; +import java.util.List; + /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -28,7 +31,7 @@ public final class AtomicReaderContext extends IndexReaderContext { public final int docBase; private final AtomicReader reader; - private final AtomicReaderContext[] leaves; + private final List leaves; /** * Creates a new {@link AtomicReaderContext} @@ -39,7 +42,7 @@ public final class AtomicReaderContext extends IndexReaderContext { this.ord = leafOrd; this.docBase = leafDocBase; this.reader = reader; - this.leaves = isTopLevel ? new AtomicReaderContext[] { this } : null; + this.leaves = isTopLevel ? Collections.singletonList(this) : null; } AtomicReaderContext(AtomicReader atomicReader) { @@ -47,12 +50,15 @@ public final class AtomicReaderContext extends IndexReaderContext { } @Override - public AtomicReaderContext[] leaves() { + public List leaves() { + if (!isTopLevel) + throw new UnsupportedOperationException("This is not a top-level context."); + assert leaves != null; return leaves; } @Override - public IndexReaderContext[] children() { + public List children() { return null; } diff --git a/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java b/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java index 653630ac54f..0021e510001 100644 --- a/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java @@ -18,6 +18,9 @@ package org.apache.lucene.index; */ import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ReaderUtil; @@ -47,12 +50,16 @@ import org.apache.lucene.util.ReaderUtil; * @lucene.internal */ public abstract class BaseCompositeReader extends CompositeReader { - protected final R[] subReaders; - protected final int[] starts; // 1st docno for each reader + private final R[] subReaders; + private final int[] starts; // 1st docno for each reader private final int maxDoc; private final int numDocs; private final boolean hasDeletions; - + + /** List view solely for {@link #getSequentialSubReaders()}, + * for effectiveness the array is used internally. */ + private final List subReadersList; + /** * Constructs a {@code BaseCompositeReader} on the given subReaders. * @param subReaders the wrapped sub-readers. This array is returned by @@ -63,6 +70,7 @@ public abstract class BaseCompositeReader extends Composi */ protected BaseCompositeReader(R[] subReaders) throws IOException { this.subReaders = subReaders; + this.subReadersList = Collections.unmodifiableList(Arrays.asList(subReaders)); starts = new int[subReaders.length + 1]; // build starts array int maxDoc = 0, numDocs = 0; boolean hasDeletions = false; @@ -135,8 +143,16 @@ public abstract class BaseCompositeReader extends Composi return ReaderUtil.subIndex(docID, this.starts); } + /** Helper method for subclasses to get the docBase of the given sub-reader index. */ + protected final int readerBase(int readerIndex) { + if (readerIndex < 0 || readerIndex >= subReaders.length) { + throw new IllegalArgumentException("readerIndex must be >= 0 and < getSequentialSubReaders().size()"); + } + return this.starts[readerIndex]; + } + @Override - public final R[] getSequentialSubReaders() { - return subReaders; + public final List getSequentialSubReaders() { + return subReadersList; } } diff --git a/lucene/core/src/java/org/apache/lucene/index/CompositeReader.java b/lucene/core/src/java/org/apache/lucene/index/CompositeReader.java index 7e0d4ed927d..277adf61f35 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CompositeReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/CompositeReader.java @@ -17,6 +17,8 @@ package org.apache.lucene.index; * limitations under the License. */ +import java.util.List; + import org.apache.lucene.search.SearcherManager; // javadocs import org.apache.lucene.store.*; @@ -63,12 +65,12 @@ public abstract class CompositeReader extends IndexReader { final StringBuilder buffer = new StringBuilder(); buffer.append(getClass().getSimpleName()); buffer.append('('); - final IndexReader[] subReaders = getSequentialSubReaders(); + final List subReaders = getSequentialSubReaders(); assert subReaders != null; - if (subReaders.length > 0) { - buffer.append(subReaders[0]); - for (int i = 1; i < subReaders.length; ++i) { - buffer.append(" ").append(subReaders[i]); + if (!subReaders.isEmpty()) { + buffer.append(subReaders.get(0)); + for (int i = 1, c = subReaders.size(); i < c; ++i) { + buffer.append(" ").append(subReaders.get(i)); } } buffer.append(')'); @@ -81,11 +83,8 @@ public abstract class CompositeReader extends IndexReader { * If this method returns an empty array, that means this * reader is a null reader (for example a MultiReader * that has no sub readers). - *

Warning: Don't modify the returned array! - * Doing so will corrupt the internal structure of this - * {@code CompositeReader}. */ - public abstract IndexReader[] getSequentialSubReaders(); + public abstract List getSequentialSubReaders(); @Override public final CompositeReaderContext getTopReaderContext() { diff --git a/lucene/core/src/java/org/apache/lucene/index/CompositeReaderContext.java b/lucene/core/src/java/org/apache/lucene/index/CompositeReaderContext.java index 55dd2fcd8ee..dcf942aa09f 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CompositeReaderContext.java +++ b/lucene/core/src/java/org/apache/lucene/index/CompositeReaderContext.java @@ -17,16 +17,18 @@ package org.apache.lucene.index; * limitations under the License. */ -import java.io.IOException; -import org.apache.lucene.util.ReaderUtil; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; /** * {@link IndexReaderContext} for {@link CompositeReader} instance. * @lucene.experimental */ public final class CompositeReaderContext extends IndexReaderContext { - private final IndexReaderContext[] children; - private final AtomicReaderContext[] leaves; + private final List children; + private final List leaves; private final CompositeReader reader; static CompositeReaderContext create(CompositeReader reader) { @@ -38,34 +40,37 @@ public final class CompositeReaderContext extends IndexReaderContext { * not top-level readers in the current context */ CompositeReaderContext(CompositeReaderContext parent, CompositeReader reader, - int ordInParent, int docbaseInParent, IndexReaderContext[] children) { + int ordInParent, int docbaseInParent, List children) { this(parent, reader, ordInParent, docbaseInParent, children, null); } /** * Creates a {@link CompositeReaderContext} for top-level readers with parent set to null */ - CompositeReaderContext(CompositeReader reader, IndexReaderContext[] children, AtomicReaderContext[] leaves) { + CompositeReaderContext(CompositeReader reader, List children, List leaves) { this(null, reader, 0, 0, children, leaves); } private CompositeReaderContext(CompositeReaderContext parent, CompositeReader reader, - int ordInParent, int docbaseInParent, IndexReaderContext[] children, - AtomicReaderContext[] leaves) { + int ordInParent, int docbaseInParent, List children, + List leaves) { super(parent, ordInParent, docbaseInParent); - this.children = children; - this.leaves = leaves; + this.children = Collections.unmodifiableList(children); + this.leaves = leaves == null ? null : Collections.unmodifiableList(leaves); this.reader = reader; } @Override - public AtomicReaderContext[] leaves() { + public List leaves() { + if (!isTopLevel) + throw new UnsupportedOperationException("This is not a top-level context."); + assert leaves != null; return leaves; } @Override - public IndexReaderContext[] children() { + public List children() { return children; } @@ -76,13 +81,11 @@ public final class CompositeReaderContext extends IndexReaderContext { private static final class Builder { private final CompositeReader reader; - private final AtomicReaderContext[] leaves; - private int leafOrd = 0; + private final List leaves = new ArrayList(); private int leafDocBase = 0; public Builder(CompositeReader reader) { this.reader = reader; - leaves = new AtomicReaderContext[numLeaves(reader)]; } public CompositeReaderContext build() { @@ -92,14 +95,14 @@ public final class CompositeReaderContext extends IndexReaderContext { private IndexReaderContext build(CompositeReaderContext parent, IndexReader reader, int ord, int docBase) { if (reader instanceof AtomicReader) { final AtomicReader ar = (AtomicReader) reader; - final AtomicReaderContext atomic = new AtomicReaderContext(parent, ar, ord, docBase, leafOrd, leafDocBase); - leaves[leafOrd++] = atomic; + final AtomicReaderContext atomic = new AtomicReaderContext(parent, ar, ord, docBase, leaves.size(), leafDocBase); + leaves.add(atomic); leafDocBase += reader.maxDoc(); return atomic; } else { final CompositeReader cr = (CompositeReader) reader; - final IndexReader[] sequentialSubReaders = cr.getSequentialSubReaders(); - final IndexReaderContext[] children = new IndexReaderContext[sequentialSubReaders.length]; + final List sequentialSubReaders = cr.getSequentialSubReaders(); + final List children = Arrays.asList(new IndexReaderContext[sequentialSubReaders.size()]); final CompositeReaderContext newParent; if (parent == null) { newParent = new CompositeReaderContext(cr, children, leaves); @@ -107,31 +110,15 @@ public final class CompositeReaderContext extends IndexReaderContext { newParent = new CompositeReaderContext(parent, cr, ord, docBase, children); } int newDocBase = 0; - for (int i = 0; i < sequentialSubReaders.length; i++) { - children[i] = build(newParent, sequentialSubReaders[i], i, newDocBase); - newDocBase += sequentialSubReaders[i].maxDoc(); + for (int i = 0, c = sequentialSubReaders.size(); i < c; i++) { + final IndexReader r = sequentialSubReaders.get(i); + children.set(i, build(newParent, r, i, newDocBase)); + newDocBase += r.maxDoc(); } assert newDocBase == cr.maxDoc(); return newParent; } } - - private int numLeaves(IndexReader reader) { - final int[] numLeaves = new int[1]; - try { - new ReaderUtil.Gather(reader) { - @Override - protected void add(int base, AtomicReader r) { - numLeaves[0]++; - } - }.run(); - } catch (IOException ioe) { - // won't happen - throw new RuntimeException(ioe); - } - return numLeaves[0]; - } - } } \ No newline at end of file diff --git a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java index e710d0beace..2a15ac3e6d1 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Map; import org.apache.lucene.search.SearcherManager; // javadocs import org.apache.lucene.store.Directory; diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexReaderContext.java b/lucene/core/src/java/org/apache/lucene/index/IndexReaderContext.java index b8bf0c24d13..a1315ff5256 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexReaderContext.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexReaderContext.java @@ -1,5 +1,7 @@ package org.apache.lucene.index; +import java.util.List; + /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -45,23 +47,19 @@ public abstract class IndexReaderContext { public abstract IndexReader reader(); /** - * Returns the context's leaves if this context is a top-level context - * otherwise null. For convenience, if this is an - * {@link AtomicReaderContext} this returns itsself as the only leaf. + * Returns the context's leaves if this context is a top-level context. + * For convenience, if this is an {@link AtomicReaderContext} this + * returns itself as the only leaf. *

Note: this is convenience method since leaves can always be obtained by - * walking the context tree. - *

Warning: Don't modify the returned array! - * Doing so will corrupt the internal structure of this - * {@code IndexReaderContext}. + * walking the context tree using {@link #children()}. + * @throws UnsupportedOperationExceception if this is not a top-level context. + * @see #children() */ - public abstract AtomicReaderContext[] leaves(); + public abstract List leaves(); /** * Returns the context's children iff this context is a composite context * otherwise null. - *

Warning: Don't modify the returned array! - * Doing so will corrupt the internal structure of this - * {@code IndexReaderContext}. */ - public abstract IndexReaderContext[] children(); + public abstract List children(); } \ No newline at end of file diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java index f658fac800d..27cb8e916d3 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java @@ -29,13 +29,18 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PagedBytes; import org.apache.lucene.util.ReaderUtil; -import org.apache.lucene.util.ReaderUtil.Gather; import org.apache.lucene.util.packed.PackedInts.Reader; /** * A wrapper for CompositeIndexReader providing access to per segment * {@link DocValues} * + *

NOTE: for multi readers, you'll get better + * performance by gathering the sub readers using + * {@link IndexReader#getTopReaderContext()} to get the + * atomic leaves and then operate per-AtomicReader, + * instead of using this class. + * * @lucene.experimental * @lucene.internal */ @@ -69,6 +74,8 @@ public class MultiDocValues extends DocValues { } private static class DocValuesPuller { + public DocValuesPuller() {} + public DocValues pull(AtomicReader reader, String field) throws IOException { return reader.docValues(field); } @@ -94,8 +101,9 @@ public class MultiDocValues extends DocValues { * their values on the fly. * *

- * NOTE: this is a slow way to access DocValues. It's better to get the - * sub-readers (using {@link Gather}) and iterate through them yourself. + * NOTE: this is a slow way to access DocValues. + * It's better to get the sub-readers and iterate through them + * yourself. */ public static DocValues getDocValues(IndexReader r, final String field) throws IOException { return getDocValues(r, field, DEFAULT_PULLER); @@ -106,80 +114,74 @@ public class MultiDocValues extends DocValues { * their values on the fly. * *

- * NOTE: this is a slow way to access DocValues. It's better to get the - * sub-readers (using {@link Gather}) and iterate through them yourself. + * NOTE: this is a slow way to access DocValues. + * It's better to get the sub-readers and iterate through them + * yourself. */ public static DocValues getNormDocValues(IndexReader r, final String field) throws IOException { return getDocValues(r, field, NORMS_PULLER); } - private static DocValues getDocValues(IndexReader r, final String field, final DocValuesPuller puller) throws IOException { - if (r instanceof AtomicReader) { + private static DocValues getDocValues(IndexReader reader, final String field, final DocValuesPuller puller) throws IOException { + if (reader instanceof AtomicReader) { // already an atomic reader - return puller.pull((AtomicReader) r, field); + return puller.pull((AtomicReader) reader, field); } - assert r instanceof CompositeReader; - final IndexReader[] subs = ((CompositeReader) r).getSequentialSubReaders(); - if (subs.length == 0) { - // no fields - return null; - } else if (subs.length == 1) { - return getDocValues(subs[0], field, puller); - } else { - final List slices = new ArrayList(); - - final TypePromoter promotedType[] = new TypePromoter[1]; - promotedType[0] = TypePromoter.getIdentityPromoter(); - - // gather all docvalues fields, accumulating a promoted type across - // potentially incompatible types - - new ReaderUtil.Gather(r) { - boolean stop = false; - @Override - protected void add(int base, AtomicReader r) throws IOException { - if (stop) { - return; - } + assert reader instanceof CompositeReader; + final List leaves = reader.getTopReaderContext().leaves(); + switch (leaves.size()) { + case 0: + // no fields + return null; + case 1: + // already an atomic reader / reader with one leave + return getDocValues(leaves.get(0).reader(), field, puller); + default: + final List slices = new ArrayList(); + + TypePromoter promotedType = TypePromoter.getIdentityPromoter(); + + // gather all docvalues fields, accumulating a promoted type across + // potentially incompatible types + for (final AtomicReaderContext ctx : leaves) { + final AtomicReader r = ctx.reader(); final DocValues d = puller.pull(r, field); if (d != null) { TypePromoter incoming = TypePromoter.create(d.getType(), d.getValueSize()); - promotedType[0] = promotedType[0].promote(incoming); + promotedType = promotedType.promote(incoming); } else if (puller.stopLoadingOnNull(r, field)){ - promotedType[0] = TypePromoter.getIdentityPromoter(); // set to identity to return null - stop = true; + return null; } - slices.add(new DocValuesSlice(d, base, r.maxDoc())); + slices.add(new DocValuesSlice(d, ctx.docBase, r.maxDoc())); } - }.run(); - - // return null if no docvalues encountered anywhere - if (promotedType[0] == TypePromoter.getIdentityPromoter()) { - return null; - } - - // populate starts and fill gaps with empty docvalues - int starts[] = new int[slices.size()]; - for (int i = 0; i < slices.size(); i++) { - DocValuesSlice slice = slices.get(i); - starts[i] = slice.start; - if (slice.docValues == null) { - Type promoted = promotedType[0].type(); - switch(promoted) { - case BYTES_FIXED_DEREF: - case BYTES_FIXED_STRAIGHT: - case BYTES_FIXED_SORTED: - assert promotedType[0].getValueSize() >= 0; - slice.docValues = new EmptyFixedDocValues(slice.length, promoted, promotedType[0].getValueSize()); - break; - default: - slice.docValues = new EmptyDocValues(slice.length, promoted); + + // return null if no docvalues encountered anywhere + if (promotedType == TypePromoter.getIdentityPromoter()) { + return null; + } + + // populate starts and fill gaps with empty docvalues + int starts[] = new int[slices.size()]; + for (int i = 0; i < slices.size(); i++) { + DocValuesSlice slice = slices.get(i); + starts[i] = slice.start; + if (slice.docValues == null) { + Type promoted = promotedType.type(); + switch(promoted) { + case BYTES_FIXED_DEREF: + case BYTES_FIXED_STRAIGHT: + case BYTES_FIXED_SORTED: + assert promotedType.getValueSize() >= 0; + slice.docValues = new EmptyFixedDocValues(slice.length, promoted, promotedType.getValueSize()); + break; + default: + slice.docValues = new EmptyDocValues(slice.length, promoted); + } } } - } - - return new MultiDocValues(slices.toArray(new DocValuesSlice[slices.size()]), starts, promotedType[0]); + + return new MultiDocValues(slices.toArray(new DocValuesSlice[slices.size()]), starts, promotedType); } } diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java index 38fe76f29ff..04a383c9626 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java @@ -17,8 +17,8 @@ package org.apache.lucene.index; * limitations under the License. */ -import org.apache.lucene.util.ReaderUtil; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.ReaderSlice; import java.io.IOException; @@ -148,7 +148,7 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum { // TODO: implement bulk read more efficiently than super public final static class EnumWithSlice { public DocsAndPositionsEnum docsAndPositionsEnum; - public ReaderUtil.Slice slice; + public ReaderSlice slice; } } diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java index 9cefd21331d..c9bbd71bc8e 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java @@ -17,7 +17,8 @@ package org.apache.lucene.index; * limitations under the License. */ -import org.apache.lucene.util.ReaderUtil; +import org.apache.lucene.util.ReaderSlice; + import java.io.IOException; import java.util.Arrays; @@ -123,7 +124,7 @@ public final class MultiDocsEnum extends DocsEnum { // TODO: implement bulk read more efficiently than super public final static class EnumWithSlice { public DocsEnum docsEnum; - public ReaderUtil.Slice slice; + public ReaderSlice slice; @Override public String toString() { diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java index ed6f51b2ef4..240e4f516de 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java @@ -28,8 +28,7 @@ import java.util.concurrent.ConcurrentHashMap; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.MultiBits; -import org.apache.lucene.util.ReaderUtil.Gather; // for javadocs -import org.apache.lucene.util.ReaderUtil; +import org.apache.lucene.util.ReaderSlice; /** * Exposes flex API, merged from flex API of sub-segments. @@ -38,9 +37,10 @@ import org.apache.lucene.util.ReaderUtil; * sub-readers (eg {@link DirectoryReader} or {@link * MultiReader}). * - *

NOTE: for multi readers, you'll get better - * performance by gathering the sub readers using {@link - * ReaderUtil#gatherSubReaders} and then operate per-reader, + *

NOTE: for composite readers, you'll get better + * performance by gathering the sub readers using + * {@link IndexReader#getTopReaderContext()} to get the + * atomic leaves and then operate per-AtomicReader, * instead of using this class. * * @lucene.experimental @@ -48,7 +48,7 @@ import org.apache.lucene.util.ReaderUtil; public final class MultiFields extends Fields { private final Fields[] subs; - private final ReaderUtil.Slice[] subSlices; + private final ReaderSlice[] subSlices; private final Map terms = new ConcurrentHashMap(); /** Returns a single {@link Fields} instance for this @@ -57,72 +57,57 @@ public final class MultiFields extends Fields { * has no postings. * *

NOTE: this is a slow way to access postings. - * It's better to get the sub-readers (using {@link - * Gather}) and iterate through them + * It's better to get the sub-readers and iterate through them * yourself. */ - public static Fields getFields(IndexReader r) throws IOException { - if (r instanceof AtomicReader) { - // already an atomic reader - return ((AtomicReader) r).fields(); - } - assert r instanceof CompositeReader; - final IndexReader[] subs = ((CompositeReader) r).getSequentialSubReaders(); - if (subs.length == 0) { - // no fields - return null; - } else { - final List fields = new ArrayList(); - final List slices = new ArrayList(); - - new ReaderUtil.Gather(r) { - @Override - protected void add(int base, AtomicReader r) throws IOException { + public static Fields getFields(IndexReader reader) throws IOException { + final List leaves = reader.getTopReaderContext().leaves(); + switch (leaves.size()) { + case 0: + // no fields + return null; + case 1: + // already an atomic reader / reader with one leave + return leaves.get(0).reader().fields(); + default: + final List fields = new ArrayList(); + final List slices = new ArrayList(); + for (final AtomicReaderContext ctx : leaves) { + final AtomicReader r = ctx.reader(); final Fields f = r.fields(); if (f != null) { fields.add(f); - slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1)); + slices.add(new ReaderSlice(ctx.docBase, r.maxDoc(), fields.size()-1)); } } - }.run(); - - if (fields.isEmpty()) { - return null; - } else if (fields.size() == 1) { - return fields.get(0); - } else { - return new MultiFields(fields.toArray(Fields.EMPTY_ARRAY), - slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY)); - } + if (fields.isEmpty()) { + return null; + } else if (fields.size() == 1) { + return fields.get(0); + } else { + return new MultiFields(fields.toArray(Fields.EMPTY_ARRAY), + slices.toArray(ReaderSlice.EMPTY_ARRAY)); + } } } - public static Bits getLiveDocs(IndexReader r) { - if (r.hasDeletions()) { - final List liveDocs = new ArrayList(); - final List starts = new ArrayList(); - - try { - final int maxDoc = new ReaderUtil.Gather(r) { - @Override - protected void add(int base, AtomicReader r) throws IOException { - // record all liveDocs, even if they are null - liveDocs.add(r.getLiveDocs()); - starts.add(base); - } - }.run(); - starts.add(maxDoc); - } catch (IOException ioe) { - // should not happen - throw new RuntimeException(ioe); + public static Bits getLiveDocs(IndexReader reader) { + if (reader.hasDeletions()) { + final List leaves = reader.getTopReaderContext().leaves(); + final int size = leaves.size(); + assert size > 0 : "A reader with deletions must have at least one leave"; + if (size == 1) { + return leaves.get(0).reader().getLiveDocs(); } - - assert liveDocs.size() > 0; - if (liveDocs.size() == 1) { - // Only one actual sub reader -- optimize this case - return liveDocs.get(0); - } else { - return new MultiBits(liveDocs, starts, true); + final Bits[] liveDocs = new Bits[size]; + final int[] starts = new int[size + 1]; + for (int i = 0; i < size; i++) { + // record all liveDocs, even if they are null + final AtomicReaderContext ctx = leaves.get(i); + liveDocs[i] = ctx.reader().getLiveDocs(); + starts[i] = ctx.docBase; } + starts[size] = reader.maxDoc(); + return new MultiBits(liveDocs, starts, true); } else { return null; } @@ -170,7 +155,7 @@ public final class MultiFields extends Fields { return null; } - public MultiFields(Fields[] subs, ReaderUtil.Slice[] subSlices) { + public MultiFields(Fields[] subs, ReaderSlice[] subSlices) { this.subs = subs; this.subSlices = subSlices; } @@ -179,7 +164,7 @@ public final class MultiFields extends Fields { public FieldsEnum iterator() throws IOException { final List fieldsEnums = new ArrayList(); - final List fieldsSlices = new ArrayList(); + final List fieldsSlices = new ArrayList(); for(int i=0;i subs2 = new ArrayList(); - final List slices2 = new ArrayList(); + final List slices2 = new ArrayList(); // Gather all sub-readers that share this field for(int i=0;i subReaders = new ArrayList(); - ReaderUtil.gatherSubReaders(subReaders, reader); final FieldInfos.Builder builder = new FieldInfos.Builder(); - for(AtomicReader subReader : subReaders) { - builder.add(subReader.getFieldInfos()); + for(final AtomicReaderContext ctx : reader.getTopReaderContext().leaves()) { + builder.add(ctx.reader().getFieldInfos()); } return builder.finish(); } public static Collection getIndexedFields(IndexReader reader) { final Collection fields = new HashSet(); - for(FieldInfo fieldInfo : getMergedFieldInfos(reader)) { + for(final FieldInfo fieldInfo : getMergedFieldInfos(reader)) { if (fieldInfo.isIndexed()) { fields.add(fieldInfo.name); } diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFieldsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiFieldsEnum.java index 69e3ed81032..dba1b488295 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiFieldsEnum.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiFieldsEnum.java @@ -18,7 +18,7 @@ package org.apache.lucene.index; */ import org.apache.lucene.util.PriorityQueue; -import org.apache.lucene.util.ReaderUtil; +import org.apache.lucene.util.ReaderSlice; import java.io.IOException; import java.util.List; @@ -47,7 +47,7 @@ public final class MultiFieldsEnum extends FieldsEnum { /** The subs array must be newly initialized FieldsEnum * (ie, {@link FieldsEnum#next} has not been called. */ - public MultiFieldsEnum(MultiFields fields, FieldsEnum[] subs, ReaderUtil.Slice[] subSlices) throws IOException { + public MultiFieldsEnum(MultiFields fields, FieldsEnum[] subs, ReaderSlice[] subSlices) throws IOException { this.fields = fields; queue = new FieldMergeQueue(subs.length); top = new FieldsEnumWithSlice[subs.length]; @@ -107,11 +107,11 @@ public final class MultiFieldsEnum extends FieldsEnum { public final static class FieldsEnumWithSlice { public static final FieldsEnumWithSlice[] EMPTY_ARRAY = new FieldsEnumWithSlice[0]; final FieldsEnum fields; - final ReaderUtil.Slice slice; + final ReaderSlice slice; final int index; String current; - public FieldsEnumWithSlice(FieldsEnum fields, ReaderUtil.Slice slice, int index) throws IOException { + public FieldsEnumWithSlice(FieldsEnum fields, ReaderSlice slice, int index) throws IOException { this.slice = slice; this.index = index; assert slice.length >= 0: "length=" + slice.length; diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiReader.java b/lucene/core/src/java/org/apache/lucene/index/MultiReader.java index 434110f0da8..19aaa905a18 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiReader.java @@ -68,12 +68,12 @@ public class MultiReader extends BaseCompositeReader { @Override protected synchronized void doClose() throws IOException { IOException ioe = null; - for (int i = 0; i < subReaders.length; i++) { + for (final IndexReader r : getSequentialSubReaders()) { try { if (closeSubReaders) { - subReaders[i].close(); + r.close(); } else { - subReaders[i].decRef(); + r.decRef(); } } catch (IOException e) { if (ioe == null) ioe = e; diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java b/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java index 95ef3a593e8..d42d69aac6c 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java @@ -23,7 +23,7 @@ import java.util.Comparator; import java.util.List; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.ReaderUtil; +import org.apache.lucene.util.ReaderSlice; import org.apache.lucene.util.automaton.CompiledAutomaton; @@ -36,10 +36,10 @@ import org.apache.lucene.util.automaton.CompiledAutomaton; public final class MultiTerms extends Terms { private final Terms[] subs; - private final ReaderUtil.Slice[] subSlices; + private final ReaderSlice[] subSlices; private final Comparator termComp; - public MultiTerms(Terms[] subs, ReaderUtil.Slice[] subSlices) throws IOException { + public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException { this.subs = subs; this.subSlices = subSlices; diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java index 65a1fc1f17f..ab42935098b 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java @@ -22,7 +22,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BitsSlice; import org.apache.lucene.util.MultiBits; -import org.apache.lucene.util.ReaderUtil; +import org.apache.lucene.util.ReaderSlice; import java.io.IOException; import java.util.Arrays; @@ -71,7 +71,7 @@ public final class MultiTermsEnum extends TermsEnum { return top; } - public MultiTermsEnum(ReaderUtil.Slice[] slices) { + public MultiTermsEnum(ReaderSlice[] slices) { queue = new TermMergeQueue(slices.length); top = new TermsEnumWithSlice[slices.length]; subs = new TermsEnumWithSlice[slices.length]; @@ -494,12 +494,12 @@ public final class MultiTermsEnum extends TermsEnum { } private final static class TermsEnumWithSlice { - private final ReaderUtil.Slice subSlice; + private final ReaderSlice subSlice; private TermsEnum terms; public BytesRef current; final int index; - public TermsEnumWithSlice(int index, ReaderUtil.Slice subSlice) { + public TermsEnumWithSlice(int index, ReaderSlice subSlice) { this.subSlice = subSlice; this.index = index; assert subSlice.length >= 0: "length=" + subSlice.length; diff --git a/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java b/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java index 96af799bb43..3fcc76e1988 100644 --- a/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/ParallelCompositeReader.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.Collections; import java.util.IdentityHashMap; import java.util.Iterator; +import java.util.List; import java.util.Set; /** An {@link CompositeReader} which reads multiple, parallel indexes. Each index added @@ -85,44 +86,45 @@ public final class ParallelCompositeReader extends BaseCompositeReader firstSubReaders = readers[0].getSequentialSubReaders(); // check compatibility: - final int maxDoc = readers[0].maxDoc(); - final int[] childMaxDoc = new int[firstSubReaders.length]; - final boolean[] childAtomic = new boolean[firstSubReaders.length]; - for (int i = 0; i < firstSubReaders.length; i++) { - childMaxDoc[i] = firstSubReaders[i].maxDoc(); - childAtomic[i] = firstSubReaders[i] instanceof AtomicReader; + final int maxDoc = readers[0].maxDoc(), noSubs = firstSubReaders.size(); + final int[] childMaxDoc = new int[noSubs]; + final boolean[] childAtomic = new boolean[noSubs]; + for (int i = 0; i < noSubs; i++) { + final IndexReader r = firstSubReaders.get(i); + childMaxDoc[i] = r.maxDoc(); + childAtomic[i] = r instanceof AtomicReader; } validate(readers, maxDoc, childMaxDoc, childAtomic); validate(storedFieldsReaders, maxDoc, childMaxDoc, childAtomic); // hierarchically build the same subreader structure as the first CompositeReader with Parallel*Readers: - final IndexReader[] subReaders = new IndexReader[firstSubReaders.length]; + final IndexReader[] subReaders = new IndexReader[noSubs]; for (int i = 0; i < subReaders.length; i++) { - if (firstSubReaders[i] instanceof AtomicReader) { + if (firstSubReaders.get(i) instanceof AtomicReader) { final AtomicReader[] atomicSubs = new AtomicReader[readers.length]; for (int j = 0; j < readers.length; j++) { - atomicSubs[j] = (AtomicReader) readers[j].getSequentialSubReaders()[i]; + atomicSubs[j] = (AtomicReader) readers[j].getSequentialSubReaders().get(i); } final AtomicReader[] storedSubs = new AtomicReader[storedFieldsReaders.length]; for (int j = 0; j < storedFieldsReaders.length; j++) { - storedSubs[j] = (AtomicReader) storedFieldsReaders[j].getSequentialSubReaders()[i]; + storedSubs[j] = (AtomicReader) storedFieldsReaders[j].getSequentialSubReaders().get(i); } // we simply enable closing of subReaders, to prevent incRefs on subReaders // -> for synthetic subReaders, close() is never // called by our doClose() subReaders[i] = new ParallelAtomicReader(true, atomicSubs, storedSubs); } else { - assert firstSubReaders[i] instanceof CompositeReader; + assert firstSubReaders.get(i) instanceof CompositeReader; final CompositeReader[] compositeSubs = new CompositeReader[readers.length]; for (int j = 0; j < readers.length; j++) { - compositeSubs[j] = (CompositeReader) readers[j].getSequentialSubReaders()[i]; + compositeSubs[j] = (CompositeReader) readers[j].getSequentialSubReaders().get(i); } final CompositeReader[] storedSubs = new CompositeReader[storedFieldsReaders.length]; for (int j = 0; j < storedFieldsReaders.length; j++) { - storedSubs[j] = (CompositeReader) storedFieldsReaders[j].getSequentialSubReaders()[i]; + storedSubs[j] = (CompositeReader) storedFieldsReaders[j].getSequentialSubReaders().get(i); } // we simply enable closing of subReaders, to prevent incRefs on subReaders // -> for synthetic subReaders, close() is never called by our doClose() @@ -136,18 +138,20 @@ public final class ParallelCompositeReader extends BaseCompositeReader subs = reader.getSequentialSubReaders(); if (reader.maxDoc() != maxDoc) { throw new IllegalArgumentException("All readers must have same maxDoc: "+maxDoc+"!="+reader.maxDoc()); } - if (subs.length != childMaxDoc.length) { + final int noSubs = subs.size(); + if (noSubs != childMaxDoc.length) { throw new IllegalArgumentException("All readers must have same number of subReaders"); } - for (int subIDX = 0; subIDX < subs.length; subIDX++) { - if (subs[subIDX].maxDoc() != childMaxDoc[subIDX]) { + for (int subIDX = 0; subIDX < noSubs; subIDX++) { + final IndexReader r = subs.get(subIDX); + if (r.maxDoc() != childMaxDoc[subIDX]) { throw new IllegalArgumentException("All readers must have same corresponding subReader maxDoc"); } - if (!(childAtomic[subIDX] ? (subs[subIDX] instanceof AtomicReader) : (subs[subIDX] instanceof CompositeReader))) { + if (!(childAtomic[subIDX] ? (r instanceof AtomicReader) : (r instanceof CompositeReader))) { throw new IllegalArgumentException("All readers must have same corresponding subReader types (atomic or composite)"); } } diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java index c52ace7b19e..96ec622bce4 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.ReaderUtil; +import org.apache.lucene.util.ReaderSlice; /** * The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add}, @@ -76,16 +77,9 @@ final class SegmentMerger { * @param reader */ final void add(IndexReader reader) { - try { - new ReaderUtil.Gather(reader) { - @Override - protected void add(int base, AtomicReader r) { - mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(r, r.getLiveDocs(), r.numDeletedDocs())); - } - }.run(); - } catch (IOException ioe) { - // won't happen - throw new RuntimeException(ioe); + for (final AtomicReaderContext ctx : reader.getTopReaderContext().leaves()) { + final AtomicReader r = ctx.reader(); + mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(r, r.getLiveDocs(), r.numDeletedDocs())); } } @@ -311,7 +305,7 @@ final class SegmentMerger { private final void mergeTerms(SegmentWriteState segmentWriteState) throws CorruptIndexException, IOException { final List fields = new ArrayList(); - final List slices = new ArrayList(); + final List slices = new ArrayList(); int docBase = 0; @@ -320,7 +314,7 @@ final class SegmentMerger { final Fields f = r.reader.fields(); final int maxDoc = r.reader.maxDoc(); if (f != null) { - slices.add(new ReaderUtil.Slice(docBase, maxDoc, readerIndex)); + slices.add(new ReaderSlice(docBase, maxDoc, readerIndex)); fields.add(f); } docBase += maxDoc; @@ -331,7 +325,7 @@ final class SegmentMerger { try { consumer.merge(mergeState, new MultiFields(fields.toArray(Fields.EMPTY_ARRAY), - slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY))); + slices.toArray(ReaderSlice.EMPTY_ARRAY))); success = true; } finally { if (success) { diff --git a/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java index c6810198a9e..e0824b31374 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java +++ b/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.Map; import org.apache.lucene.util.Bits; -import org.apache.lucene.util.ReaderUtil; // javadoc import org.apache.lucene.index.DirectoryReader; // javadoc import org.apache.lucene.index.MultiReader; // javadoc @@ -32,17 +31,16 @@ import org.apache.lucene.index.MultiReader; // javadoc * MultiReader} or {@link DirectoryReader}) to emulate an * atomic reader. This requires implementing the postings * APIs on-the-fly, using the static methods in {@link - * MultiFields}, {@link MultiDocValues}, - * by stepping through the sub-readers to merge fields/terms, - * appending docs, etc. + * MultiFields}, {@link MultiDocValues}, by stepping through + * the sub-readers to merge fields/terms, appending docs, etc. * *

NOTE: this class almost always results in a * performance hit. If this is important to your use case, - * it's better to get the sequential sub readers (see {@link - * ReaderUtil#gatherSubReaders}, instead, and iterate through them - * yourself.

+ * you'll get better performance by gathering the sub readers using + * {@link IndexReader#getTopReaderContext()} to get the + * atomic leaves and then operate per-AtomicReader, + * instead of using this class. */ - public final class SlowCompositeReaderWrapper extends AtomicReader { private final CompositeReader in; diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java index aa95a6abef0..a8cd8d0eedc 100644 --- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java @@ -118,8 +118,8 @@ final class StandardDirectoryReader extends DirectoryReader { writer, segmentInfos, writer.getConfig().getReaderTermsIndexDivisor(), applyAllDeletes); } - /** This constructor is only used for {@link #doOpenIfChanged()} */ - private static DirectoryReader open(Directory directory, IndexWriter writer, SegmentInfos infos, AtomicReader[] oldReaders, + /** This constructor is only used for {@link #doOpenIfChanged(SegmentInfos, IndexWriter)} */ + private static DirectoryReader open(Directory directory, IndexWriter writer, SegmentInfos infos, List oldReaders, int termInfosIndexDivisor) throws IOException { // we put the old SegmentReaders in a map, that allows us // to lookup a reader using its segment name @@ -127,8 +127,9 @@ final class StandardDirectoryReader extends DirectoryReader { if (oldReaders != null) { // create a Map SegmentName->SegmentReader - for (int i = 0; i < oldReaders.length; i++) { - segmentReaders.put(((SegmentReader) oldReaders[i]).getSegmentName(), Integer.valueOf(i)); + for (int i = 0, c = oldReaders.size(); i < c; i++) { + final SegmentReader sr = (SegmentReader) oldReaders.get(i); + segmentReaders.put(sr.getSegmentName(), Integer.valueOf(i)); } } @@ -146,7 +147,7 @@ final class StandardDirectoryReader extends DirectoryReader { newReaders[i] = null; } else { // there is an old reader for this segment - we'll try to reopen it - newReaders[i] = (SegmentReader) oldReaders[oldReaderIndex.intValue()]; + newReaders[i] = (SegmentReader) oldReaders.get(oldReaderIndex.intValue()); } boolean success = false; @@ -216,9 +217,9 @@ final class StandardDirectoryReader extends DirectoryReader { if (writer != null) { buffer.append(":nrt"); } - for(int i=0;i leafContexts; // used with executor - each slice holds a set of leafs executed within one thread protected final LeafSlice[] leafSlices; @@ -165,10 +167,10 @@ public class IndexSearcher { * Each {@link LeafSlice} is executed in a single thread. By default there * will be one {@link LeafSlice} per leaf ({@link AtomicReaderContext}). */ - protected LeafSlice[] slices(AtomicReaderContext...leaves) { - LeafSlice[] slices = new LeafSlice[leaves.length]; + protected LeafSlice[] slices(List leaves) { + LeafSlice[] slices = new LeafSlice[leaves.size()]; for (int i = 0; i < slices.length; i++) { - slices[i] = new LeafSlice(leaves[i]); + slices[i] = new LeafSlice(leaves.get(i)); } return slices; } @@ -440,7 +442,7 @@ public class IndexSearcher { * {@link IndexSearcher#search(Query,Filter,int)} instead. * @throws BooleanQuery.TooManyClauses */ - protected TopDocs search(AtomicReaderContext[] leaves, Weight weight, ScoreDoc after, int nDocs) throws IOException { + protected TopDocs search(List leaves, Weight weight, ScoreDoc after, int nDocs) throws IOException { // single thread int limit = reader.maxDoc(); if (limit == 0) { @@ -477,7 +479,7 @@ public class IndexSearcher { *

NOTE: this does not compute scores by default. If you * need scores, create a {@link TopFieldCollector} * instance by calling {@link TopFieldCollector#create} and - * then pass that to {@link #search(AtomicReaderContext[], Weight, + * then pass that to {@link #search(List, Weight, * Collector)}.

*/ protected TopFieldDocs search(Weight weight, FieldDoc after, int nDocs, @@ -525,7 +527,7 @@ public class IndexSearcher { * whether or not the fields in the returned {@link FieldDoc} instances should * be set by specifying fillFields. */ - protected TopFieldDocs search(AtomicReaderContext[] leaves, Weight weight, FieldDoc after, int nDocs, + protected TopFieldDocs search(List leaves, Weight weight, FieldDoc after, int nDocs, Sort sort, boolean fillFields, boolean doDocScores, boolean doMaxScore) throws IOException { // single thread int limit = reader.maxDoc(); @@ -559,15 +561,15 @@ public class IndexSearcher { * to receive hits * @throws BooleanQuery.TooManyClauses */ - protected void search(AtomicReaderContext[] leaves, Weight weight, Collector collector) + protected void search(List leaves, Weight weight, Collector collector) throws IOException { // TODO: should we make this // threaded...? the Collector could be sync'd? // always use single thread: - for (int i = 0; i < leaves.length; i++) { // search each subreader - collector.setNextReader(leaves[i]); - Scorer scorer = weight.scorer(leaves[i], !collector.acceptsDocsOutOfOrder(), true, leaves[i].reader().getLiveDocs()); + for (AtomicReaderContext ctx : leaves) { // search each subreader + collector.setNextReader(ctx); + Scorer scorer = weight.scorer(ctx, !collector.acceptsDocsOutOfOrder(), true, ctx.reader().getLiveDocs()); if (scorer != null) { scorer.score(collector); } @@ -611,9 +613,10 @@ public class IndexSearcher { */ protected Explanation explain(Weight weight, int doc) throws IOException { int n = ReaderUtil.subIndex(doc, leafContexts); - int deBasedDoc = doc - leafContexts[n].docBase; + final AtomicReaderContext ctx = leafContexts.get(n); + int deBasedDoc = doc - ctx.docBase; - return weight.explain(leafContexts[n], deBasedDoc); + return weight.explain(ctx, deBasedDoc); } /** @@ -669,7 +672,7 @@ public class IndexSearcher { } public TopDocs call() throws IOException { - final TopDocs docs = searcher.search(slice.leaves, weight, after, nDocs); + final TopDocs docs = searcher.search(Arrays.asList(slice.leaves), weight, after, nDocs); final ScoreDoc[] scoreDocs = docs.scoreDocs; //it would be so nice if we had a thread-safe insert lock.lock(); @@ -757,11 +760,13 @@ public class IndexSearcher { public TopFieldDocs call() throws IOException { assert slice.leaves.length == 1; - final TopFieldDocs docs = searcher.search(slice.leaves, weight, after, nDocs, sort, true, doDocScores, doMaxScore); + final TopFieldDocs docs = searcher.search(Arrays.asList(slice.leaves), + weight, after, nDocs, sort, true, doDocScores, doMaxScore); lock.lock(); try { - final int base = slice.leaves[0].docBase; - hq.setNextReader(slice.leaves[0]); + final AtomicReaderContext ctx = slice.leaves[0]; + final int base = ctx.docBase; + hq.setNextReader(ctx); hq.setScorer(fakeScorer); for(ScoreDoc scoreDoc : docs.scoreDocs) { fakeScorer.doc = scoreDoc.doc - base; @@ -837,7 +842,7 @@ public class IndexSearcher { public static class LeafSlice { final AtomicReaderContext[] leaves; - public LeafSlice(AtomicReaderContext...leaves) { + public LeafSlice(AtomicReaderContext... leaves) { this.leaves = leaves; } } diff --git a/lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java b/lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java index c893a3b5877..3f5a6a3924a 100644 --- a/lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java +++ b/lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java @@ -48,8 +48,7 @@ abstract class TermCollectingRewrite extends MultiTermQuery.Rew final void collectTerms(IndexReader reader, MultiTermQuery query, TermCollector collector) throws IOException { IndexReaderContext topReaderContext = reader.getTopReaderContext(); Comparator lastTermComp = null; - final AtomicReaderContext[] leaves = topReaderContext.leaves(); - for (AtomicReaderContext context : leaves) { + for (AtomicReaderContext context : topReaderContext.leaves()) { final Fields fields = context.reader().fields(); if (fields == null) { // reader has no fields diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java index 252cdcd1ff5..07488364b04 100644 --- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java +++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java @@ -184,8 +184,7 @@ public class PayloadSpanUtil { for (Term term : terms) { termContexts.put(term, TermContext.build(context, term, true)); } - final AtomicReaderContext[] leaves = context.leaves(); - for (AtomicReaderContext atomicReaderContext : leaves) { + for (AtomicReaderContext atomicReaderContext : context.leaves()) { final Spans spans = query.getSpans(atomicReaderContext, atomicReaderContext.reader().getLiveDocs(), termContexts); while (spans.next() == true) { if (spans.isPayloadAvailable()) { diff --git a/lucene/core/src/java/org/apache/lucene/util/BitsSlice.java b/lucene/core/src/java/org/apache/lucene/util/BitsSlice.java index 7cc5dda3724..b294ede7b21 100644 --- a/lucene/core/src/java/org/apache/lucene/util/BitsSlice.java +++ b/lucene/core/src/java/org/apache/lucene/util/BitsSlice.java @@ -25,7 +25,7 @@ public final class BitsSlice implements Bits { private final int length; // start is inclusive; end is exclusive (length = end-start) - public BitsSlice(Bits parent, ReaderUtil.Slice slice) { + public BitsSlice(Bits parent, ReaderSlice slice) { this.parent = parent; this.start = slice.start; this.length = slice.length; diff --git a/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java b/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java index 32e8f2aa879..6946329b405 100644 --- a/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java +++ b/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java @@ -280,9 +280,9 @@ public final class FieldCacheSanityChecker { for (int i = 0; i < all.size(); i++) { Object obj = all.get(i); if (obj instanceof CompositeReader) { - IndexReader[] subs = ((CompositeReader)obj).getSequentialSubReaders(); - for (int j = 0; (null != subs) && (j < subs.length); j++) { - all.add(subs[j].getCoreCacheKey()); + List subs = ((CompositeReader)obj).getSequentialSubReaders(); + for (int j = 0; (null != subs) && (j < subs.size()); j++) { + all.add(subs.get(j).getCoreCacheKey()); } } diff --git a/lucene/core/src/java/org/apache/lucene/util/MultiBits.java b/lucene/core/src/java/org/apache/lucene/util/MultiBits.java index d779b109321..4fa6efa7b71 100644 --- a/lucene/core/src/java/org/apache/lucene/util/MultiBits.java +++ b/lucene/core/src/java/org/apache/lucene/util/MultiBits.java @@ -17,8 +17,6 @@ package org.apache.lucene.util; * limitations under the License. */ -import java.util.List; - /** * Concatenates multiple Bits together, on every lookup. * @@ -36,13 +34,10 @@ public final class MultiBits implements Bits { private final boolean defaultValue; - public MultiBits(List bits, List starts, boolean defaultValue) { - assert starts.size() == 1+bits.size(); - this.subs = bits.toArray(Bits.EMPTY_ARRAY); - this.starts = new int[starts.size()]; - for(int i=0;in in the * array used to construct this searcher/reader. */ - public static int subIndex(int n, AtomicReaderContext[] leaves) { // find + public static int subIndex(int n, List leaves) { // find // searcher/reader for doc n: - int size = leaves.length; + int size = leaves.size(); int lo = 0; // search starts array int hi = size - 1; // for first element less than n, return its index while (hi >= lo) { int mid = (lo + hi) >>> 1; - int midValue = leaves[mid].docBase; + int midValue = leaves.get(mid).docBase; if (n < midValue) hi = mid - 1; else if (n > midValue) lo = mid + 1; else { // found a match - while (mid + 1 < size && leaves[mid + 1].docBase == midValue) { + while (mid + 1 < size && leaves.get(mid + 1).docBase == midValue) { mid++; // scan to last match } return mid; diff --git a/lucene/core/src/java/org/apache/lucene/util/TermContext.java b/lucene/core/src/java/org/apache/lucene/util/TermContext.java index 76fc180ccc9..d4d570f2a24 100644 --- a/lucene/core/src/java/org/apache/lucene/util/TermContext.java +++ b/lucene/core/src/java/org/apache/lucene/util/TermContext.java @@ -57,7 +57,7 @@ public final class TermContext { if (context.leaves() == null) { len = 1; } else { - len = context.leaves().length; + len = context.leaves().size(); } states = new TermState[len]; } @@ -85,11 +85,10 @@ public final class TermContext { final String field = term.field(); final BytesRef bytes = term.bytes(); final TermContext perReaderTermState = new TermContext(context); - final AtomicReaderContext[] leaves = context.leaves(); //if (DEBUG) System.out.println("prts.build term=" + term); - for (int i = 0; i < leaves.length; i++) { + for (final AtomicReaderContext ctx : context.leaves()) { //if (DEBUG) System.out.println(" r=" + leaves[i].reader); - final Fields fields = leaves[i].reader().fields(); + final Fields fields = ctx.reader().fields(); if (fields != null) { final Terms terms = fields.terms(field); if (terms != null) { @@ -97,7 +96,7 @@ public final class TermContext { if (termsEnum.seekExact(bytes, cache)) { final TermState termState = termsEnum.termState(); //if (DEBUG) System.out.println(" found"); - perReaderTermState.register(termState, leaves[i].ord, termsEnum.docFreq(), termsEnum.totalTermFreq()); + perReaderTermState.register(termState, ctx.ord, termsEnum.docFreq(), termsEnum.totalTermFreq()); } } } diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java index 40ebc10a09d..7c71327fb42 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java @@ -17,6 +17,7 @@ package org.apache.lucene.codecs.lucene40; */ import java.io.IOException; import java.util.IdentityHashMap; +import java.util.List; import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; @@ -35,7 +36,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.ReaderUtil; import org.apache.lucene.util._TestUtil; public class TestReuseDocsEnum extends LuceneTestCase { @@ -50,21 +50,18 @@ public class TestReuseDocsEnum extends LuceneTestCase { writer.commit(); DirectoryReader open = DirectoryReader.open(dir); - new ReaderUtil.Gather(open) { - @Override - protected void add(int base, AtomicReader r) throws IOException { - Terms terms = r.terms("body"); - TermsEnum iterator = terms.iterator(null); - IdentityHashMap enums = new IdentityHashMap(); - MatchNoBits bits = new Bits.MatchNoBits(r.maxDoc()); - while ((iterator.next()) != null) { - DocsEnum docs = iterator.docs(random().nextBoolean() ? bits : new Bits.MatchNoBits(r.maxDoc()), null, random().nextBoolean()); - enums.put(docs, true); - } - - assertEquals(terms.size(), enums.size()); + for (AtomicReader indexReader : open.getSequentialSubReaders()) { + Terms terms = indexReader.terms("body"); + TermsEnum iterator = terms.iterator(null); + IdentityHashMap enums = new IdentityHashMap(); + MatchNoBits bits = new Bits.MatchNoBits(indexReader.maxDoc()); + while ((iterator.next()) != null) { + DocsEnum docs = iterator.docs(random().nextBoolean() ? bits : new Bits.MatchNoBits(indexReader.maxDoc()), null, random().nextBoolean()); + enums.put(docs, true); } - }.run(); + + assertEquals(terms.size(), enums.size()); + } IOUtils.close(writer, open, dir); } @@ -79,9 +76,8 @@ public class TestReuseDocsEnum extends LuceneTestCase { writer.commit(); DirectoryReader open = DirectoryReader.open(dir); - IndexReader[] sequentialSubReaders = open.getSequentialSubReaders(); - for (IndexReader indexReader : sequentialSubReaders) { - Terms terms = ((AtomicReader) indexReader).terms("body"); + for (AtomicReader indexReader : open.getSequentialSubReaders()) { + Terms terms = indexReader.terms("body"); TermsEnum iterator = terms.iterator(null); IdentityHashMap enums = new IdentityHashMap(); MatchNoBits bits = new Bits.MatchNoBits(open.maxDoc()); @@ -125,8 +121,8 @@ public class TestReuseDocsEnum extends LuceneTestCase { DirectoryReader firstReader = DirectoryReader.open(dir); DirectoryReader secondReader = DirectoryReader.open(dir); - IndexReader[] sequentialSubReaders = firstReader.getSequentialSubReaders(); - IndexReader[] sequentialSubReaders2 = secondReader.getSequentialSubReaders(); + List sequentialSubReaders = firstReader.getSequentialSubReaders(); + List sequentialSubReaders2 = secondReader.getSequentialSubReaders(); for (IndexReader indexReader : sequentialSubReaders) { Terms terms = ((AtomicReader) indexReader).terms("body"); @@ -154,11 +150,11 @@ public class TestReuseDocsEnum extends LuceneTestCase { IOUtils.close(writer, firstReader, secondReader, dir); } - public DocsEnum randomDocsEnum(String field, BytesRef term, IndexReader[] readers, Bits bits) throws IOException { + public DocsEnum randomDocsEnum(String field, BytesRef term, List readers, Bits bits) throws IOException { if (random().nextInt(10) == 0) { return null; } - AtomicReader indexReader = (AtomicReader) readers[random().nextInt(readers.length)]; + AtomicReader indexReader = (AtomicReader) readers.get(random().nextInt(readers.size())); return indexReader.termDocsEnum(bits, field, term, random().nextBoolean()); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java index 471b79652b6..a9addc86097 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestCustomNorms.java @@ -145,25 +145,24 @@ public class TestCustomNorms extends LuceneTestCase { writer.close(); assertEquals(numAdded, reader.numDocs()); IndexReaderContext topReaderContext = reader.getTopReaderContext(); - AtomicReaderContext[] leaves = topReaderContext.leaves(); - for (int j = 0; j < leaves.length; j++) { - AtomicReader atomicReader = leaves[j].reader(); - Source source = random().nextBoolean() ? atomicReader.normValues("foo").getSource() : atomicReader.normValues("foo").getDirectSource(); - Bits liveDocs = atomicReader.getLiveDocs(); - Type t = source.getType(); - for (int i = 0; i < atomicReader.maxDoc(); i++) { - assertEquals(0, source.getFloat(i), 0.000f); - } - - - source = random().nextBoolean() ? atomicReader.normValues("bar").getSource() : atomicReader.normValues("bar").getDirectSource(); - for (int i = 0; i < atomicReader.maxDoc(); i++) { - if (liveDocs == null || liveDocs.get(i)) { - assertEquals("type: " + t, 1, source.getFloat(i), 0.000f); - } else { - assertEquals("type: " + t, 0, source.getFloat(i), 0.000f); + for (final AtomicReaderContext ctx : topReaderContext.leaves()) { + AtomicReader atomicReader = ctx.reader(); + Source source = random().nextBoolean() ? atomicReader.normValues("foo").getSource() : atomicReader.normValues("foo").getDirectSource(); + Bits liveDocs = atomicReader.getLiveDocs(); + Type t = source.getType(); + for (int i = 0; i < atomicReader.maxDoc(); i++) { + assertEquals(0, source.getFloat(i), 0.000f); + } + + + source = random().nextBoolean() ? atomicReader.normValues("bar").getSource() : atomicReader.normValues("bar").getDirectSource(); + for (int i = 0; i < atomicReader.maxDoc(); i++) { + if (liveDocs == null || liveDocs.get(i)) { + assertEquals("type: " + t, 1, source.getFloat(i), 0.000f); + } else { + assertEquals("type: " + t, 0, source.getFloat(i), 0.000f); + } } - } } reader.close(); dir.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java index 5ff626a5ed2..f810b45448b 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDeletionPolicy.java @@ -67,7 +67,7 @@ public class TestDeletionPolicy extends LuceneTestCase { public void onCommit(List commits) throws IOException { IndexCommit lastCommit = commits.get(commits.size()-1); DirectoryReader r = DirectoryReader.open(dir); - assertEquals("lastCommit.segmentCount()=" + lastCommit.getSegmentCount() + " vs IndexReader.segmentCount=" + r.getSequentialSubReaders().length, r.getSequentialSubReaders().length, lastCommit.getSegmentCount()); + assertEquals("lastCommit.segmentCount()=" + lastCommit.getSegmentCount() + " vs IndexReader.segmentCount=" + r.getSequentialSubReaders().size(), r.getSequentialSubReaders().size(), lastCommit.getSegmentCount()); r.close(); verifyCommitOrder(commits); numOnCommit++; @@ -325,7 +325,7 @@ public class TestDeletionPolicy extends LuceneTestCase { final boolean needsMerging; { DirectoryReader r = DirectoryReader.open(dir); - needsMerging = r.getSequentialSubReaders().length != 1; + needsMerging = r.getSequentialSubReaders().size() != 1; r.close(); } if (needsMerging) { @@ -442,7 +442,7 @@ public class TestDeletionPolicy extends LuceneTestCase { DirectoryReader r = DirectoryReader.open(dir); // Still merged, still 11 docs - assertEquals(1, r.getSequentialSubReaders().length); + assertEquals(1, r.getSequentialSubReaders().size()); assertEquals(11, r.numDocs()); r.close(); @@ -458,7 +458,7 @@ public class TestDeletionPolicy extends LuceneTestCase { r = DirectoryReader.open(dir); // Not fully merged because we rolled it back, and now only // 10 docs - assertTrue(r.getSequentialSubReaders().length > 1); + assertTrue(r.getSequentialSubReaders().size() > 1); assertEquals(10, r.numDocs()); r.close(); @@ -468,7 +468,7 @@ public class TestDeletionPolicy extends LuceneTestCase { writer.close(); r = DirectoryReader.open(dir); - assertEquals(1, r.getSequentialSubReaders().length); + assertEquals(1, r.getSequentialSubReaders().size()); assertEquals(10, r.numDocs()); r.close(); @@ -480,7 +480,7 @@ public class TestDeletionPolicy extends LuceneTestCase { // Reader still sees fully merged index, because writer // opened on the prior commit has not yet committed: r = DirectoryReader.open(dir); - assertEquals(1, r.getSequentialSubReaders().length); + assertEquals(1, r.getSequentialSubReaders().size()); assertEquals(10, r.numDocs()); r.close(); @@ -488,7 +488,7 @@ public class TestDeletionPolicy extends LuceneTestCase { // Now reader sees not-fully-merged index: r = DirectoryReader.open(dir); - assertTrue(r.getSequentialSubReaders().length > 1); + assertTrue(r.getSequentialSubReaders().size() > 1); assertEquals(10, r.numDocs()); r.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java index 61eadce6493..18d5c5e566d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java @@ -550,7 +550,7 @@ public void testFilesOpenClose() throws IOException { assertEquals("IndexReaders have different values for numDocs.", index1.numDocs(), index2.numDocs()); assertEquals("IndexReaders have different values for maxDoc.", index1.maxDoc(), index2.maxDoc()); assertEquals("Only one IndexReader has deletions.", index1.hasDeletions(), index2.hasDeletions()); - assertEquals("Single segment test differs.", index1.getSequentialSubReaders().length == 1, index2.getSequentialSubReaders().length == 1); + assertEquals("Single segment test differs.", index1.getSequentialSubReaders().size() == 1, index2.getSequentialSubReaders().size() == 1); // check field names FieldInfos fieldInfos1 = MultiFields.getMergedFieldInfos(index1); @@ -785,7 +785,7 @@ public void testFilesOpenClose() throws IOException { DirectoryReader r2 = DirectoryReader.openIfChanged(r); assertNotNull(r2); r.close(); - AtomicReader sub0 = r2.getSequentialSubReaders()[0]; + AtomicReader sub0 = r2.getSequentialSubReaders().get(0); final int[] ints2 = FieldCache.DEFAULT.getInts(sub0, "number", false); r2.close(); assertTrue(ints == ints2); @@ -814,9 +814,9 @@ public void testFilesOpenClose() throws IOException { assertNotNull(r2); r.close(); - IndexReader[] subs = r2.getSequentialSubReaders(); - for(int i=0;i subs = r2.getSequentialSubReaders(); + for(AtomicReader s : subs) { + assertEquals(36, s.getUniqueTermCount()); } r2.close(); writer.close(); @@ -842,7 +842,7 @@ public void testFilesOpenClose() throws IOException { // expected } - assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor()); + assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders().get(0)).getTermInfosIndexDivisor()); writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())). @@ -857,11 +857,11 @@ public void testFilesOpenClose() throws IOException { assertNotNull(r2); assertNull(DirectoryReader.openIfChanged(r2)); r.close(); - IndexReader[] subReaders = r2.getSequentialSubReaders(); - assertEquals(2, subReaders.length); - for(int i=0;i<2;i++) { + List subReaders = r2.getSequentialSubReaders(); + assertEquals(2, subReaders.size()); + for(AtomicReader s : subReaders) { try { - subReaders[i].docFreq(new Term("field", "f")); + s.docFreq(new Term("field", "f")); fail("did not hit expected exception"); } catch (IllegalStateException ise) { // expected diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java index 79cc94f6f91..8e6f3ea588e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java @@ -469,9 +469,9 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { DirectoryReader r = DirectoryReader.open(dir); if (multiSegment) { - assertTrue(r.getSequentialSubReaders().length > 1); + assertTrue(r.getSequentialSubReaders().size() > 1); } else { - assertTrue(r.getSequentialSubReaders().length == 1); + assertTrue(r.getSequentialSubReaders().size() == 1); } r.close(); } @@ -542,9 +542,9 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { } if (checkSubReaders && reader instanceof CompositeReader) { - IndexReader[] subReaders = ((CompositeReader) reader).getSequentialSubReaders(); - for (int i = 0; i < subReaders.length; i++) { - assertReaderClosed(subReaders[i], checkSubReaders, checkNormsClosed); + List subReaders = ((CompositeReader) reader).getSequentialSubReaders(); + for (IndexReader r : subReaders) { + assertReaderClosed(r, checkSubReaders, checkNormsClosed); } } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java index 1dab8a83235..7b103bd796e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java @@ -77,7 +77,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { writer.close(true); DirectoryReader reader = DirectoryReader.open(dir, 1); - assertEquals(1, reader.getSequentialSubReaders().length); + assertEquals(1, reader.getSequentialSubReaders().size()); IndexSearcher searcher = new IndexSearcher(reader); @@ -750,7 +750,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { w.forceMerge(1); DirectoryReader r = w.getReader(); w.close(); - assertEquals(17, r.getSequentialSubReaders()[0].docValues("field").load().getInt(0)); + assertEquals(17, getOnlySegmentReader(r).docValues("field").load().getInt(0)); r.close(); d.close(); } @@ -994,8 +994,9 @@ public class TestDocValuesIndexing extends LuceneTestCase { w.addDocument(doc); bytes[0] = 1; w.addDocument(doc); + w.forceMerge(1); DirectoryReader r = w.getReader(); - Source s = r.getSequentialSubReaders()[0].docValues("field").getSource(); + Source s = getOnlySegmentReader(r).docValues("field").getSource(); BytesRef bytes1 = s.getBytes(0, new BytesRef()); assertEquals(bytes.length, bytes1.length); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java index cc83293faf2..48d0dc8444c 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java @@ -64,8 +64,7 @@ public class TestDocsAndPositions extends LuceneTestCase { for (int i = 0; i < num; i++) { BytesRef bytes = new BytesRef("1"); IndexReaderContext topReaderContext = reader.getTopReaderContext(); - AtomicReaderContext[] leaves = topReaderContext.leaves(); - for (AtomicReaderContext atomicReaderContext : leaves) { + for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) { DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions( atomicReaderContext.reader(), bytes, null); assertNotNull(docsAndPosEnum); @@ -140,8 +139,7 @@ public class TestDocsAndPositions extends LuceneTestCase { for (int i = 0; i < num; i++) { BytesRef bytes = new BytesRef("" + term); IndexReaderContext topReaderContext = reader.getTopReaderContext(); - AtomicReaderContext[] leaves = topReaderContext.leaves(); - for (AtomicReaderContext atomicReaderContext : leaves) { + for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) { DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions( atomicReaderContext.reader(), bytes, null); assertNotNull(docsAndPosEnum); @@ -216,8 +214,7 @@ public class TestDocsAndPositions extends LuceneTestCase { for (int i = 0; i < num; i++) { BytesRef bytes = new BytesRef("" + term); IndexReaderContext topReaderContext = reader.getTopReaderContext(); - AtomicReaderContext[] leaves = topReaderContext.leaves(); - for (AtomicReaderContext context : leaves) { + for (AtomicReaderContext context : topReaderContext.leaves()) { int maxDoc = context.reader().maxDoc(); DocsEnum docsEnum = _TestUtil.docs(random(), context.reader(), fieldName, bytes, null, null, true); if (findNext(freqInDoc, context.docBase, context.docBase + maxDoc) == Integer.MAX_VALUE) { @@ -295,8 +292,7 @@ public class TestDocsAndPositions extends LuceneTestCase { BytesRef bytes = new BytesRef("even"); IndexReaderContext topReaderContext = reader.getTopReaderContext(); - AtomicReaderContext[] leaves = topReaderContext.leaves(); - for (AtomicReaderContext atomicReaderContext : leaves) { + for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) { DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions( atomicReaderContext.reader(), bytes, null); assertNotNull(docsAndPosEnum); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java index 5bf41b7d4d9..376f747f638 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java @@ -282,7 +282,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { // Reader should see index as multi-seg at this // point: - assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().length > 1); + assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1); reader.close(); // Abort the writer: @@ -293,7 +293,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { reader = DirectoryReader.open(dir); // Reader should still see index as multi-segment - assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().length > 1); + assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().size() > 1); reader.close(); if (VERBOSE) { @@ -312,7 +312,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { reader = DirectoryReader.open(dir); // Reader should see index as one segment - assertEquals("Reader incorrectly sees more than one segment", 1, reader.getSequentialSubReaders().length); + assertEquals("Reader incorrectly sees more than one segment", 1, reader.getSequentialSubReaders().size()); reader.close(); dir.close(); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java index 9cadd9740bd..50c36a12acc 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java @@ -187,7 +187,7 @@ public class TestIndexWriterForceMerge extends LuceneTestCase { if (0 == pass) { writer.close(); DirectoryReader reader = DirectoryReader.open(dir); - assertEquals(1, reader.getSequentialSubReaders().length); + assertEquals(1, reader.getSequentialSubReaders().size()); reader.close(); } else { // Get another segment to flush so we can verify it is @@ -197,7 +197,7 @@ public class TestIndexWriterForceMerge extends LuceneTestCase { writer.close(); DirectoryReader reader = DirectoryReader.open(dir); - assertTrue(reader.getSequentialSubReaders().length > 1); + assertTrue(reader.getSequentialSubReaders().size() > 1); reader.close(); SegmentInfos infos = new SegmentInfos(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java index 34338cd29c0..0a4f1c98ce3 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -982,7 +982,7 @@ public class TestIndexWriterReader extends LuceneTestCase { Document doc = new Document(); doc.add(new TextField("f", "val", Field.Store.NO)); w.addDocument(doc); - IndexReader r = DirectoryReader.open(w, true).getSequentialSubReaders()[0]; + SegmentReader r = getOnlySegmentReader(DirectoryReader.open(w, true)); try { _TestUtil.docs(random(), r, "f", new BytesRef("val"), null, null, false); fail("should have failed to seek since terms index was not loaded."); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java index e1df6ac22f7..11a19e880c9 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java @@ -30,7 +30,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.ReaderUtil; import org.apache.lucene.util.UnicodeUtil; public class TestIndexWriterUnicode extends LuceneTestCase { @@ -316,12 +315,9 @@ public class TestIndexWriterUnicode extends LuceneTestCase { IndexReader r = writer.getReader(); // Test each sub-segment - new ReaderUtil.Gather(r) { - @Override - protected void add(int base, AtomicReader r) throws IOException { - checkTermsOrder(r, allTerms, false); - } - }.run(); + for (AtomicReaderContext ctx : r.getTopReaderContext().leaves()) { + checkTermsOrder(ctx.reader(), allTerms, false); + } checkTermsOrder(r, allTerms, true); // Test multi segment diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java index 7656ae006d4..68b417945c2 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java @@ -339,13 +339,13 @@ public class TestParallelCompositeReader extends LuceneTestCase { if (compositeComposite) { rd1 = new MultiReader(DirectoryReader.open(dir1), DirectoryReader.open(dir1)); rd2 = new MultiReader(DirectoryReader.open(dir2), DirectoryReader.open(dir2)); - assertEquals(2, rd1.getSequentialSubReaders().length); - assertEquals(2, rd2.getSequentialSubReaders().length); + assertEquals(2, rd1.getSequentialSubReaders().size()); + assertEquals(2, rd2.getSequentialSubReaders().size()); } else { rd1 = DirectoryReader.open(dir1); rd2 = DirectoryReader.open(dir2); - assertEquals(3, rd1.getSequentialSubReaders().length); - assertEquals(3, rd2.getSequentialSubReaders().length); + assertEquals(3, rd1.getSequentialSubReaders().size()); + assertEquals(3, rd2.getSequentialSubReaders().size()); } ParallelCompositeReader pr = new ParallelCompositeReader(rd1, rd2); return newSearcher(pr); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java index 651b559f42e..e9caa08ea95 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java @@ -281,7 +281,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } private static void printDocs(DirectoryReader r) throws Throwable { - IndexReader[] subs = r.getSequentialSubReaders(); + List subs = r.getSequentialSubReaders(); for(IndexReader sub : subs) { // TODO: improve this Bits liveDocs = ((AtomicReader)sub).getLiveDocs(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java index bf674ec0db6..e5b05676c33 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java @@ -742,7 +742,7 @@ public class TestTermsEnum extends LuceneTestCase { w.forceMerge(1); DirectoryReader r = w.getReader(); w.close(); - AtomicReader sub = r.getSequentialSubReaders()[0]; + AtomicReader sub = getOnlySegmentReader(r); Terms terms = sub.fields().terms("field"); Automaton automaton = new RegExp(".*", RegExp.NONE).toAutomaton(); CompiledAutomaton ca = new CompiledAutomaton(automaton, false, false); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java b/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java index e74b60f4de2..3b786296130 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java @@ -130,7 +130,7 @@ public class TestThreadedForceMerge extends LuceneTestCase { OpenMode.APPEND).setMaxBufferedDocs(2)); DirectoryReader reader = DirectoryReader.open(directory); - assertEquals("reader=" + reader, 1, reader.getSequentialSubReaders().length); + assertEquals("reader=" + reader, 1, reader.getSequentialSubReaders().size()); assertEquals(expectedDocCount, reader.numDocs()); reader.close(); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java b/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java index a18235b4f49..70e67b3da70 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java @@ -19,6 +19,7 @@ package org.apache.lucene.index; import java.io.IOException; import java.util.EnumSet; +import java.util.List; import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; @@ -120,11 +121,11 @@ public class TestTypePromotion extends LuceneTestCase { private void assertValues(TestType type, Directory dir, long[] values, Type[] sourceType) throws CorruptIndexException, IOException { DirectoryReader reader = DirectoryReader.open(dir); - assertEquals(1, reader.getSequentialSubReaders().length); + assertEquals(1, reader.getSequentialSubReaders().size()); IndexReaderContext topReaderContext = reader.getTopReaderContext(); - AtomicReaderContext[] children = topReaderContext.leaves(); - assertEquals(1, children.length); - DocValues docValues = children[0].reader().docValues("promote"); + List leaves = topReaderContext.leaves(); + assertEquals(1, leaves.size()); + DocValues docValues = leaves.get(0).reader().docValues("promote"); Source directSource = docValues.getDirectSource(); for (int i = 0; i < values.length; i++) { int id = Integer.parseInt(reader.document(i).get("id")); @@ -372,10 +373,10 @@ public class TestTypePromotion extends LuceneTestCase { writer.forceMerge(1); writer.close(); DirectoryReader reader = DirectoryReader.open(dir); - assertEquals(1, reader.getSequentialSubReaders().length); + assertEquals(1, reader.getSequentialSubReaders().size()); IndexReaderContext topReaderContext = reader.getTopReaderContext(); - AtomicReaderContext[] children = topReaderContext.leaves(); - DocValues docValues = children[0].reader().docValues("promote"); + List leaves = topReaderContext.leaves(); + DocValues docValues = leaves.get(0).reader().docValues("promote"); assertNotNull(docValues); assertValues(TestType.Byte, dir, values, sourceType); assertEquals(Type.BYTES_VAR_STRAIGHT, docValues.getType()); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java index 68956d1d352..28dc800ab5b 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java @@ -226,7 +226,7 @@ public class TestBooleanQuery extends LuceneTestCase { Weight weight = s.createNormalizedWeight(q); - Scorer scorer = weight.scorer(s.leafContexts[0], + Scorer scorer = weight.scorer(s.leafContexts.get(0), true, false, null); // First pass: just use .nextDoc() to gather all hits @@ -244,7 +244,7 @@ public class TestBooleanQuery extends LuceneTestCase { for(int iter2=0;iter2<10;iter2++) { weight = s.createNormalizedWeight(q); - scorer = weight.scorer(s.leafContexts[0], + scorer = weight.scorer(s.leafContexts.get(0), true, false, null); if (VERBOSE) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java index 1082209cad3..0d06c5438ff 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java @@ -54,7 +54,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase { IndexSearcher is = newSearcher(ir); ScoreDoc[] hits; - + hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs; assertEquals(3, hits.length); assertEquals("one", is.doc(hits[0].doc).get("key")); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java b/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java index 21f94be5c75..172b5ead96a 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java @@ -311,13 +311,13 @@ public class TestShardSearching extends ShardSearchingTestBase { final int numNodes = shardSearcher.nodeVersions.length; int[] base = new int[numNodes]; - final IndexReader[] subs = ((CompositeReader) mockSearcher.getIndexReader()).getSequentialSubReaders(); - assertEquals(numNodes, subs.length); + final List subs = ((CompositeReader) mockSearcher.getIndexReader()).getSequentialSubReaders(); + assertEquals(numNodes, subs.size()); int docCount = 0; for(int nodeID=0;nodeID ctx; public ShardSearcher(AtomicReaderContext ctx, IndexReaderContext parent) { super(parent); - this.ctx = new AtomicReaderContext[] {ctx}; + this.ctx = Collections.singletonList(ctx); } public void search(Weight weight, Collector collector) throws IOException { @@ -56,7 +57,7 @@ public class TestTopDocsMerge extends LuceneTestCase { @Override public String toString() { - return "ShardSearcher(" + ctx[0] + ")"; + return "ShardSearcher(" + ctx.get(0) + ")"; } } @@ -131,13 +132,15 @@ public class TestTopDocsMerge extends LuceneTestCase { docStarts[0] = 0; } else { final CompositeReaderContext compCTX = (CompositeReaderContext) ctx; - subSearchers = new ShardSearcher[compCTX.leaves().length]; - docStarts = new int[compCTX.leaves().length]; + final int size = compCTX.leaves().size(); + subSearchers = new ShardSearcher[size]; + docStarts = new int[size]; int docBase = 0; - for(int searcherIDX=0;searcherIDX leaves; private int leafOrd = 0; private Spans current; private Map termContexts; + private final int numLeaves; - private MultiSpansWrapper(AtomicReaderContext[] leaves, SpanQuery query, Map termContexts) { + private MultiSpansWrapper(List leaves, SpanQuery query, Map termContexts) { this.query = query; this.leaves = leaves; + this.numLeaves = leaves.size(); this.termContexts = termContexts; } @@ -61,27 +63,30 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t for (Term term : terms) { termContexts.put(term, TermContext.build(topLevelReaderContext, term, true)); } - AtomicReaderContext[] leaves = topLevelReaderContext.leaves(); - if(leaves.length == 1) { - return query.getSpans(leaves[0], leaves[0].reader().getLiveDocs(), termContexts); + final List leaves = topLevelReaderContext.leaves(); + if(leaves.size() == 1) { + final AtomicReaderContext ctx = leaves.get(0); + return query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts); } return new MultiSpansWrapper(leaves, query, termContexts); } @Override public boolean next() throws IOException { - if (leafOrd >= leaves.length) { + if (leafOrd >= numLeaves) { return false; } if (current == null) { - current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader().getLiveDocs(), termContexts); + final AtomicReaderContext ctx = leaves.get(leafOrd); + current = query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts); } while(true) { if (current.next()) { return true; } - if (++leafOrd < leaves.length) { - current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader().getLiveDocs(), termContexts); + if (++leafOrd < numLeaves) { + final AtomicReaderContext ctx = leaves.get(leafOrd); + current = query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts); } else { current = null; break; @@ -92,27 +97,30 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t @Override public boolean skipTo(int target) throws IOException { - if (leafOrd >= leaves.length) { + if (leafOrd >= numLeaves) { return false; } int subIndex = ReaderUtil.subIndex(target, leaves); assert subIndex >= leafOrd; if (subIndex != leafOrd) { - current = query.getSpans(leaves[subIndex], leaves[subIndex].reader().getLiveDocs(), termContexts); + final AtomicReaderContext ctx = leaves.get(subIndex); + current = query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts); leafOrd = subIndex; } else if (current == null) { - current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader().getLiveDocs(), termContexts); + final AtomicReaderContext ctx = leaves.get(leafOrd); + current = query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts); } while (true) { - if (current.skipTo(target - leaves[leafOrd].docBase)) { + if (current.skipTo(target - leaves.get(leafOrd).docBase)) { return true; } - if (++leafOrd < leaves.length) { - current = query.getSpans(leaves[leafOrd], leaves[leafOrd].reader().getLiveDocs(), termContexts); + if (++leafOrd < numLeaves) { + final AtomicReaderContext ctx = leaves.get(leafOrd); + current = query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts); } else { - current = null; - break; + current = null; + break; } } @@ -124,7 +132,7 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t if (current == null) { return DocIdSetIterator.NO_MORE_DOCS; } - return current.doc() + leaves[leafOrd].docBase; + return current.doc() + leaves.get(leafOrd).docBase; } @Override diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java index 670e5f94af3..48a4b4fc65c 100644 --- a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java +++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java @@ -166,8 +166,8 @@ public class TestNearSpansOrdered extends LuceneTestCase { SpanNearQuery q = makeQuery(); Weight w = searcher.createNormalizedWeight(q); IndexReaderContext topReaderContext = searcher.getTopReaderContext(); - AtomicReaderContext[] leaves = topReaderContext.leaves(); - Scorer s = w.scorer(leaves[0], true, false, leaves[0].reader().getLiveDocs()); + AtomicReaderContext leave = topReaderContext.leaves().get(0); + Scorer s = w.scorer(leave, true, false, leave.reader().getLiveDocs()); assertEquals(1, s.advance(1)); } diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java index 7bd5ccad69f..0c1e8924214 100644 --- a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java +++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java @@ -18,6 +18,7 @@ package org.apache.lucene.search.spans; */ import java.io.IOException; +import java.util.List; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -404,10 +405,10 @@ public class TestSpans extends LuceneTestCase { boolean ordered = true; int slop = 1; IndexReaderContext topReaderContext = searcher.getTopReaderContext(); - AtomicReaderContext[] leaves = topReaderContext.leaves(); + List leaves = topReaderContext.leaves(); int subIndex = ReaderUtil.subIndex(11, leaves); - for (int i = 0; i < leaves.length; i++) { - + for (int i = 0, c = leaves.size(); i < c; i++) { + final AtomicReaderContext ctx = leaves.get(i); final Similarity sim = new DefaultSimilarity() { @Override @@ -427,13 +428,13 @@ public class TestSpans extends LuceneTestCase { slop, ordered); - spanScorer = searcher.createNormalizedWeight(snq).scorer(leaves[i], true, false, leaves[i].reader().getLiveDocs()); + spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, true, false, ctx.reader().getLiveDocs()); } finally { searcher.setSimilarity(oldSim); } if (i == subIndex) { assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); - assertEquals("first doc number", spanScorer.docID() + leaves[i].docBase, 11); + assertEquals("first doc number", spanScorer.docID() + ctx.docBase, 11); float score = spanScorer.score(); assertTrue("first doc score should be zero, " + score, score == 0.0f); } else { diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java index 0a26b863e6e..808022c0e92 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java @@ -629,25 +629,16 @@ public class TestGrouping extends LuceneTestCase { public final int[] docStarts; public ShardState(IndexSearcher s) { - List subReaders = new ArrayList(); - ReaderUtil.gatherSubReaders(subReaders, s.getIndexReader()); - subSearchers = new ShardSearcher[subReaders.size()]; final IndexReaderContext ctx = s.getTopReaderContext(); - if (ctx instanceof AtomicReaderContext) { - assert subSearchers.length == 1; - subSearchers[0] = new ShardSearcher((AtomicReaderContext) ctx, ctx); - } else { - final CompositeReaderContext compCTX = (CompositeReaderContext) ctx; - for(int searcherIDX=0;searcherIDX leaves = ctx.leaves(); + subSearchers = new ShardSearcher[leaves.size()]; + for(int searcherIDX=0;searcherIDX leaves = reader.getTopReaderContext().leaves(); final int subIndex = ReaderUtil.subIndex(childDocID, leaves); - final AtomicReaderContext leaf = leaves[subIndex]; + final AtomicReaderContext leaf = leaves.get(subIndex); final FixedBitSet bits = (FixedBitSet) parents.getDocIdSet(leaf, null); return leaf.reader().document(bits.nextSetBit(childDocID - leaf.docBase)); } @@ -961,7 +961,7 @@ public class TestBlockJoin extends LuceneTestCase { ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg); Weight weight = s.createNormalizedWeight(q); - DocIdSetIterator disi = weight.scorer(s.getIndexReader().getTopReaderContext().leaves()[0], true, true, null); + DocIdSetIterator disi = weight.scorer(s.getIndexReader().getTopReaderContext().leaves().get(0), true, true, null); assertEquals(1, disi.advance(1)); r.close(); dir.close(); @@ -995,7 +995,7 @@ public class TestBlockJoin extends LuceneTestCase { ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg); Weight weight = s.createNormalizedWeight(q); - DocIdSetIterator disi = weight.scorer(s.getIndexReader().getTopReaderContext().leaves()[0], true, true, null); + DocIdSetIterator disi = weight.scorer(s.getIndexReader().getTopReaderContext().leaves().get(0), true, true, null); assertEquals(2, disi.advance(0)); r.close(); dir.close(); diff --git a/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java index 9d88a4c7d73..f26a0e44922 100644 --- a/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java +++ b/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java @@ -20,13 +20,13 @@ package org.apache.lucene.index; import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.List; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.Bits; -import org.apache.lucene.util.ReaderUtil; import org.apache.lucene.util.Version; /** @@ -101,7 +101,8 @@ public class MultiPassIndexSplitter { .setOpenMode(OpenMode.CREATE)); System.err.println("Writing part " + (i + 1) + " ..."); // pass the subreaders directly, as our wrapper's numDocs/hasDeletetions are not up-to-date - w.addIndexes(input.getSequentialSubReaders()); + final List sr = input.getSequentialSubReaders(); + w.addIndexes(sr.toArray(new IndexReader[sr.size()])); // TODO: maybe take List here? w.close(); } System.err.println("Done."); @@ -177,34 +178,36 @@ public class MultiPassIndexSplitter { /** * This class emulates deletions on the underlying index. */ - private static final class FakeDeleteIndexReader extends MultiReader { + private static final class FakeDeleteIndexReader extends BaseCompositeReader { public FakeDeleteIndexReader(IndexReader reader) throws IOException { super(initSubReaders(reader)); } - private static AtomicReader[] initSubReaders(IndexReader reader) throws IOException { - final ArrayList subs = new ArrayList(); - new ReaderUtil.Gather(reader) { - @Override - protected void add(int base, AtomicReader r) { - subs.add(new FakeDeleteAtomicIndexReader(r)); - } - }.run(); - return subs.toArray(new AtomicReader[subs.size()]); + private static FakeDeleteAtomicIndexReader[] initSubReaders(IndexReader reader) throws IOException { + final List leaves = reader.getTopReaderContext().leaves(); + final FakeDeleteAtomicIndexReader[] subs = new FakeDeleteAtomicIndexReader[leaves.size()]; + int i = 0; + for (final AtomicReaderContext ctx : leaves) { + subs[i++] = new FakeDeleteAtomicIndexReader(ctx.reader()); + } + return subs; } public void deleteDocument(int docID) { final int i = readerIndex(docID); - ((FakeDeleteAtomicIndexReader) subReaders[i]).deleteDocument(docID - starts[i]); + getSequentialSubReaders().get(i).deleteDocument(docID - readerBase(i)); } public void undeleteAll() { - for (IndexReader r : subReaders) { - ((FakeDeleteAtomicIndexReader) r).undeleteAll(); + for (FakeDeleteAtomicIndexReader r : getSequentialSubReaders()) { + r.undeleteAll(); } } + @Override + protected void doClose() throws IOException {} + // no need to override numDocs/hasDeletions, // as we pass the subreaders directly to IW.addIndexes(). } diff --git a/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java index ab331963c9e..9044004703d 100644 --- a/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java +++ b/lucene/misc/src/java/org/apache/lucene/index/PKIndexSplitter.java @@ -18,6 +18,7 @@ package org.apache.lucene.index; */ import java.io.IOException; +import java.util.List; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DocIdSet; @@ -101,10 +102,11 @@ public class PKIndexSplitter { boolean success = false; final IndexWriter w = new IndexWriter(target, config); try { - final AtomicReaderContext[] leaves = reader.getTopReaderContext().leaves(); - final IndexReader[] subReaders = new IndexReader[leaves.length]; - for (int i = 0; i < leaves.length; i++) { - subReaders[i] = new DocumentFilteredAtomicIndexReader(leaves[i], preserveFilter, negateFilter); + final List leaves = reader.getTopReaderContext().leaves(); + final IndexReader[] subReaders = new IndexReader[leaves.size()]; + int i = 0; + for (final AtomicReaderContext ctx : leaves) { + subReaders[i++] = new DocumentFilteredAtomicIndexReader(ctx, preserveFilter, negateFilter); } w.addIndexes(subReaders); success = true; diff --git a/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java b/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java index 9e342b31003..52e2bb98ea8 100644 --- a/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java +++ b/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java @@ -18,6 +18,7 @@ package org.apache.lucene.misc; */ import org.apache.lucene.index.AtomicReader; +import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiFields; @@ -184,33 +185,29 @@ public class HighFreqTerms { } public static long getTotalTermFreq(IndexReader reader, final String field, final BytesRef termText) throws Exception { - final long totalTF[] = new long[1]; - - new ReaderUtil.Gather(reader) { - - @Override - protected void add(int base, AtomicReader r) throws IOException { - Bits liveDocs = r.getLiveDocs(); - if (liveDocs == null) { - // TODO: we could do this up front, during the scan - // (next()), instead of after-the-fact here w/ seek, - // if the codec supports it and there are no del - // docs... - final long totTF = r.totalTermFreq(field, termText); - if (totTF != -1) { - totalTF[0] += totTF; - return; - } - } - DocsEnum de = r.termDocsEnum(liveDocs, field, termText, true); - if (de != null) { - while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) - totalTF[0] += de.freq(); - } + long totalTF = 0L; + for (final AtomicReaderContext ctx : reader.getTopReaderContext().leaves()) { + AtomicReader r = ctx.reader(); + Bits liveDocs = r.getLiveDocs(); + if (liveDocs == null) { + // TODO: we could do this up front, during the scan + // (next()), instead of after-the-fact here w/ seek, + // if the codec supports it and there are no del + // docs... + final long totTF = r.totalTermFreq(field, termText); + if (totTF != -1) { + totalTF += totTF; + continue; + } // otherwise we fall-through } - }.run(); + DocsEnum de = r.termDocsEnum(liveDocs, field, termText, true); + if (de != null) { + while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) + totalTF += de.freq(); + } + } - return totalTF[0]; + return totalTF; } } diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestBalancedSegmentMergePolicy.java b/lucene/misc/src/test/org/apache/lucene/index/TestBalancedSegmentMergePolicy.java index f2ce026017b..8db6164676b 100644 --- a/lucene/misc/src/test/org/apache/lucene/index/TestBalancedSegmentMergePolicy.java +++ b/lucene/misc/src/test/org/apache/lucene/index/TestBalancedSegmentMergePolicy.java @@ -67,7 +67,7 @@ public class TestBalancedSegmentMergePolicy extends LuceneTestCase { int numSegments = _TestUtil.nextInt(random(), 1, 4); iw.forceMerge(numSegments); DirectoryReader ir = iw.getReader(); - assertTrue(ir.getSequentialSubReaders().length <= numSegments); + assertTrue(ir.getSequentialSubReaders().size() <= numSegments); ir.close(); } diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java index f10b747986c..f3502dc0d9c 100644 --- a/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java +++ b/lucene/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java @@ -59,7 +59,7 @@ public class TestIndexSplitter extends LuceneTestCase { } iw.commit(); DirectoryReader iwReader = iw.getReader(); - assertEquals(3, iwReader.getSequentialSubReaders().length); + assertEquals(3, iwReader.getSequentialSubReaders().size()); iwReader.close(); iw.close(); // we should have 2 segments now @@ -87,7 +87,7 @@ public class TestIndexSplitter extends LuceneTestCase { // now remove the copied segment from src IndexSplitter.main(new String[] {dir.getAbsolutePath(), "-d", splitSegName}); r = DirectoryReader.open(fsDir); - assertEquals(2, r.getSequentialSubReaders().length); + assertEquals(2, r.getSequentialSubReaders().size()); r.close(); fsDir.close(); } diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ScaleFloatFunction.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ScaleFloatFunction.java index 6766ad32c0a..c09814e81a5 100755 --- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ScaleFloatFunction.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/ScaleFloatFunction.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.ReaderUtil; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -60,7 +61,7 @@ public class ScaleFloatFunction extends ValueSource { } private ScaleInfo createScaleInfo(Map context, AtomicReaderContext readerContext) throws IOException { - final AtomicReaderContext[] leaves = ReaderUtil.getTopLevelContext(readerContext).leaves(); + final List leaves = ReaderUtil.getTopLevelContext(readerContext).leaves(); float minVal = Float.POSITIVE_INFINITY; float maxVal = Float.NEGATIVE_INFINITY; diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java index ae583e0e25f..4f7e4380633 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java @@ -44,6 +44,7 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; +import java.util.List; public class TestParser extends LuceneTestCase { @@ -194,8 +195,8 @@ public class TestParser extends LuceneTestCase { } public void testDuplicateFilterQueryXML() throws ParserException, IOException { - AtomicReaderContext leaves[] = searcher.getTopReaderContext().leaves(); - Assume.assumeTrue(leaves == null || leaves.length == 1); + List leaves = searcher.getTopReaderContext().leaves(); + Assume.assumeTrue(leaves.size() == 1); Query q = parse("DuplicateFilterQuery.xml"); int h = searcher.search(q, null, 1000).totalHits; assertEquals("DuplicateFilterQuery should produce 1 result ", 1, h); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java index 70a9f1880f3..5d6b0f6df5c 100755 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java @@ -28,6 +28,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.document.StringField; import org.apache.lucene.index.AtomicReader; +import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; @@ -497,14 +498,11 @@ public class SpellChecker implements java.io.Closeable { final IndexReader reader = searcher.getIndexReader(); if (reader.maxDoc() > 0) { - new ReaderUtil.Gather(reader) { - @Override - protected void add(int base, AtomicReader r) throws IOException { - Terms terms = r.terms(F_WORD); - if (terms != null) - termsEnums.add(terms.iterator(null)); - } - }.run(); + for (final AtomicReaderContext ctx : reader.getTopReaderContext().leaves()) { + Terms terms = ctx.reader().terms(F_WORD); + if (terms != null) + termsEnums.add(terms.iterator(null)); + } } boolean isEmpty = termsEnums.isEmpty(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java index b80914d5b14..d6172ced84b 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java @@ -18,6 +18,7 @@ package org.apache.lucene.search; */ import java.io.IOException; +import java.util.List; import java.util.Random; import junit.framework.Assert; @@ -233,7 +234,7 @@ public class QueryUtils { */ public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException { //System.out.println("Checking "+q); - final AtomicReaderContext[] readerContextArray = s.getTopReaderContext().leaves(); + final List readerContextArray = s.getTopReaderContext().leaves(); if (s.createNormalizedWeight(q).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next(). final int skip_op = 0; @@ -278,7 +279,7 @@ public class QueryUtils { try { if (scorer == null) { Weight w = s.createNormalizedWeight(q); - AtomicReaderContext context = readerContextArray[leafPtr]; + AtomicReaderContext context = readerContextArray.get(leafPtr); scorer = w.scorer(context, true, false, context.reader().getLiveDocs()); } @@ -334,7 +335,7 @@ public class QueryUtils { leafPtr++; } lastReader[0] = context.reader(); - assert readerContextArray[leafPtr].reader() == context.reader(); + assert readerContextArray.get(leafPtr).reader() == context.reader(); this.scorer = null; lastDoc[0] = -1; } @@ -368,7 +369,7 @@ public class QueryUtils { final float maxDiff = 1e-3f; final int lastDoc[] = {-1}; final AtomicReader lastReader[] = {null}; - final AtomicReaderContext[] context = s.getTopReaderContext().leaves(); + final List context = s.getTopReaderContext().leaves(); s.search(q,new Collector() { private Scorer scorer; private int leafPtr; @@ -384,7 +385,7 @@ public class QueryUtils { long startMS = System.currentTimeMillis(); for (int i=lastDoc[0]+1; i<=doc; i++) { Weight w = s.createNormalizedWeight(q); - Scorer scorer = w.scorer(context[leafPtr], true, false, liveDocs); + Scorer scorer = w.scorer(context.get(leafPtr), true, false, liveDocs); Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS); Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID()); float skipToScore = scorer.score(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java index 9e9e13ce808..39320c87791 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java @@ -435,11 +435,12 @@ public abstract class LuceneTestCase extends Assert { * do tests on that segment's reader. This is an utility method to help them. */ public static SegmentReader getOnlySegmentReader(DirectoryReader reader) { - IndexReader[] subReaders = reader.getSequentialSubReaders(); - if (subReaders.length != 1) - throw new IllegalArgumentException(reader + " has " + subReaders.length + " segments instead of exactly one"); - assertTrue(subReaders[0] instanceof SegmentReader); - return (SegmentReader) subReaders[0]; + List subReaders = reader.getSequentialSubReaders(); + if (subReaders.size() != 1) + throw new IllegalArgumentException(reader + " has " + subReaders.size() + " segments instead of exactly one"); + final IndexReader r = subReaders.get(0); + assertTrue(r instanceof SegmentReader); + return (SegmentReader) r; } /** diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java index 950a5157a26..16eb4cb140d 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java @@ -544,7 +544,7 @@ public class LukeRequestHandler extends RequestHandlerBase indexInfo.add("maxDoc", reader.maxDoc()); indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )? - indexInfo.add("segmentCount", reader.getSequentialSubReaders().length); + indexInfo.add("segmentCount", reader.getTopReaderContext().leaves().size()); indexInfo.add("current", reader.isCurrent() ); indexInfo.add("hasDeletions", reader.hasDeletions() ); indexInfo.add("directory", dir ); diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java index 02e463d36cc..dc3452bb374 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java @@ -437,11 +437,11 @@ public class QueryComponent extends SearchComponent NamedList sortVals = new NamedList(); // order is important for the sort fields Field field = new StringField("dummy", "", Field.Store.NO); // a dummy Field IndexReaderContext topReaderContext = searcher.getTopReaderContext(); - AtomicReaderContext[] leaves = topReaderContext.leaves(); + List leaves = topReaderContext.leaves(); AtomicReaderContext currentLeaf = null; - if (leaves.length==1) { + if (leaves.size()==1) { // if there is a single segment, use that subReader and avoid looking up each time - currentLeaf = leaves[0]; + currentLeaf = leaves.get(0); leaves=null; } @@ -478,7 +478,7 @@ public class QueryComponent extends SearchComponent if (leaves != null) { idx = ReaderUtil.subIndex(doc, leaves); - currentLeaf = leaves[idx]; + currentLeaf = leaves.get(idx); if (idx != lastIdx) { // we switched segments. invalidate comparator. comparator = null; diff --git a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java index 4fc5ff62005..c606679d8dc 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java @@ -679,7 +679,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar if (buildOnCommit) { buildSpellIndex(newSearcher); } else if (buildOnOptimize) { - if (newSearcher.getIndexReader().getSequentialSubReaders().length == 1) { + if (newSearcher.getIndexReader().getSequentialSubReaders().size() == 1) { buildSpellIndex(newSearcher); } else { LOG.info("Index is not optimized therefore skipping building spell check index for: " + checker.getDictionaryName()); diff --git a/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java b/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java index d81a7fa0b9b..60e62370804 100755 --- a/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java +++ b/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java @@ -82,7 +82,7 @@ class PerSegmentSingleValuedFaceting { // reuse the translation logic to go from top level set to per-segment set baseSet = docs.getTopFilter(); - final AtomicReaderContext[] leaves = searcher.getTopReaderContext().leaves(); + final List leaves = searcher.getTopReaderContext().leaves(); // The list of pending tasks that aren't immediately submitted // TODO: Is there a completion service, or a delegating executor that can // limit the number of concurrent tasks submitted to a bigger executor? @@ -90,8 +90,8 @@ class PerSegmentSingleValuedFaceting { int threads = nThreads <= 0 ? Integer.MAX_VALUE : nThreads; - for (int i=0; i task = new Callable() { public SegFacet call() throws Exception { @@ -111,7 +111,7 @@ class PerSegmentSingleValuedFaceting { // now merge the per-segment results - PriorityQueue queue = new PriorityQueue(leaves.length) { + PriorityQueue queue = new PriorityQueue(leaves.size()) { @Override protected boolean lessThan(SegFacet a, SegFacet b) { return a.tempBR.compareTo(b.tempBR) < 0; @@ -121,7 +121,7 @@ class PerSegmentSingleValuedFaceting { boolean hasMissingCount=false; int missingCount=0; - for (int i=0; i readerContexts; FunctionValues docValuesArr[]; @@ -89,7 +89,7 @@ public class ValueSourceAugmenter extends DocTransformer // TODO: calculate this stuff just once across diff functions int idx = ReaderUtil.subIndex(docid, readerContexts); - AtomicReaderContext rcontext = readerContexts[idx]; + AtomicReaderContext rcontext = readerContexts.get(idx); FunctionValues values = docValuesArr[idx]; if (values == null) { docValuesArr[idx] = values = valueSource.getValues(fcontext, rcontext); diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java index 0fa4063e108..035b97197bd 100644 --- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java +++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java @@ -602,11 +602,9 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn */ public long lookupId(BytesRef idBytes) throws IOException { String field = schema.getUniqueKeyField().getName(); - final AtomicReaderContext[] leaves = leafContexts; - - for (int i=0; i=end) { - AtomicReaderContext leaf = leafContexts[readerIndex++]; + AtomicReaderContext leaf = leafContexts.get(readerIndex++); base = leaf.docBase; end = base + leaf.reader().maxDoc(); topCollector.setNextReader(leaf); diff --git a/solr/core/src/java/org/apache/solr/update/VersionInfo.java b/solr/core/src/java/org/apache/solr/update/VersionInfo.java index 52269c7d0c9..b9aaae4f484 100644 --- a/solr/core/src/java/org/apache/solr/update/VersionInfo.java +++ b/solr/core/src/java/org/apache/solr/update/VersionInfo.java @@ -159,7 +159,7 @@ public class VersionInfo { ValueSource vs = versionField.getType().getValueSource(versionField, null); Map context = ValueSource.newContext(searcher); vs.createWeight(context, searcher); - FunctionValues fv = vs.getValues(context, searcher.getTopReaderContext().leaves()[(int)(lookup>>32)]); + FunctionValues fv = vs.getValues(context, searcher.getTopReaderContext().leaves().get((int)(lookup>>32))); long ver = fv.longVal((int)lookup); return ver; diff --git a/solr/core/src/test/org/apache/solr/search/TestDocSet.java b/solr/core/src/test/org/apache/solr/search/TestDocSet.java index 61cc9055779..b96c341472a 100644 --- a/solr/core/src/test/org/apache/solr/search/TestDocSet.java +++ b/solr/core/src/test/org/apache/solr/search/TestDocSet.java @@ -19,6 +19,7 @@ package org.apache.solr.search; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Random; import org.apache.lucene.index.FieldInfo; @@ -471,7 +472,7 @@ public class TestDocSet extends LuceneTestCase { DocIdSet da; DocIdSet db; - AtomicReaderContext[] leaves = topLevelContext.leaves(); + List leaves = topLevelContext.leaves(); // first test in-sequence sub readers for (AtomicReaderContext readerContext : leaves) { @@ -480,10 +481,10 @@ public class TestDocSet extends LuceneTestCase { doTestIteratorEqual(da, db); } - int nReaders = leaves.length; + int nReaders = leaves.size(); // now test out-of-sequence sub readers for (int i=0; i leaves = topReaderContext.leaves(); int idx = ReaderUtil.subIndex(doc, leaves); - AtomicReaderContext leaf = leaves[idx]; + AtomicReaderContext leaf = leaves.get(idx); FunctionValues vals = vs.getValues(context, leaf); return vals.strVal(doc-leaf.docBase); } @@ -78,7 +79,7 @@ public class TestIndexSearcher extends SolrTestCaseJ4 { // make sure the readers share the first segment // Didn't work w/ older versions of lucene2.9 going from segment -> multi - assertEquals(rCtx1.leaves()[0].reader(), rCtx2.leaves()[0].reader()); + assertEquals(rCtx1.leaves().get(0).reader(), rCtx2.leaves().get(0).reader()); assertU(adoc("id","5", "v_f","3.14159")); assertU(adoc("id","6", "v_f","8983", "v_s1","string6")); @@ -88,8 +89,8 @@ public class TestIndexSearcher extends SolrTestCaseJ4 { IndexReaderContext rCtx3 = sr3.getSearcher().getTopReaderContext(); // make sure the readers share segments // assertEquals(r1.getLeafReaders()[0], r3.getLeafReaders()[0]); - assertEquals(rCtx2.leaves()[0].reader(), rCtx3.leaves()[0].reader()); - assertEquals(rCtx2.leaves()[1].reader(), rCtx3.leaves()[1].reader()); + assertEquals(rCtx2.leaves().get(0).reader(), rCtx3.leaves().get(0).reader()); + assertEquals(rCtx2.leaves().get(1).reader(), rCtx3.leaves().get(1).reader()); sr1.close(); sr2.close(); @@ -123,8 +124,8 @@ public class TestIndexSearcher extends SolrTestCaseJ4 { assertU(commit()); SolrQueryRequest sr6 = req("q","foo"); IndexReaderContext rCtx6 = sr6.getSearcher().getTopReaderContext(); - assertEquals(1, rCtx6.leaves()[0].reader().numDocs()); // only a single doc left in the first segment - assertTrue( !rCtx5.leaves()[0].reader().equals(rCtx6.leaves()[0].reader()) ); // readers now different + assertEquals(1, rCtx6.leaves().get(0).reader().numDocs()); // only a single doc left in the first segment + assertTrue( !rCtx5.leaves().get(0).reader().equals(rCtx6.leaves().get(0).reader()) ); // readers now different sr5.close(); sr6.close(); diff --git a/solr/core/src/test/org/apache/solr/search/TestSort.java b/solr/core/src/test/org/apache/solr/search/TestSort.java index d3f0354a9ef..3461f79cc32 100755 --- a/solr/core/src/test/org/apache/solr/search/TestSort.java +++ b/solr/core/src/test/org/apache/solr/search/TestSort.java @@ -198,7 +198,7 @@ public class TestSort extends SolrTestCaseJ4 { DirectoryReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); // System.out.println("segments="+searcher.getIndexReader().getSequentialSubReaders().length); - assertTrue(reader.getSequentialSubReaders().length > 1); + assertTrue(reader.getSequentialSubReaders().size() > 1); for (int i=0; i