diff --git a/lucene/src/java/org/apache/lucene/codecs/PerDocConsumer.java b/lucene/src/java/org/apache/lucene/codecs/PerDocConsumer.java
index 407c465e612..0851dd972e3 100644
--- a/lucene/src/java/org/apache/lucene/codecs/PerDocConsumer.java
+++ b/lucene/src/java/org/apache/lucene/codecs/PerDocConsumer.java
@@ -20,7 +20,7 @@ import java.io.IOException;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.DocValues.Type;
@@ -65,13 +65,13 @@ public abstract class PerDocConsumer implements Closeable {
/**
* Returns a {@link DocValues} instance for merging from the given reader for the given
* {@link FieldInfo}. This method is used for merging and uses
- * {@link IndexReader#docValues(String)} by default.
+ * {@link AtomicIndexReader#docValues(String)} by default.
*
* To enable {@link DocValues} merging for different {@link DocValues} than
* the default override this method accordingly.
*
*/
- protected DocValues getDocValuesForMerge(IndexReader reader, FieldInfo info) throws IOException {
+ protected DocValues getDocValuesForMerge(AtomicIndexReader reader, FieldInfo info) throws IOException {
return reader.docValues(info.name);
}
diff --git a/lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsFormat.java b/lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsFormat.java
index 288aee47ad8..6564663d6b9 100644
--- a/lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsFormat.java
+++ b/lucene/src/java/org/apache/lucene/codecs/lucene40/Lucene40NormsFormat.java
@@ -26,7 +26,7 @@ import org.apache.lucene.index.DocValues.Type;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.index.SegmentReadState;
@@ -95,7 +95,7 @@ public class Lucene40NormsFormat extends NormsFormat {
}
@Override
- protected DocValues getDocValuesForMerge(IndexReader reader, FieldInfo info)
+ protected DocValues getDocValuesForMerge(AtomicIndexReader reader, FieldInfo info)
throws IOException {
return reader.normValues(info.name);
}
diff --git a/lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsConsumer.java b/lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsConsumer.java
index e53e4b4d06d..5d1bec7d439 100644
--- a/lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsConsumer.java
+++ b/lucene/src/java/org/apache/lucene/codecs/simpletext/SimpleTextNormsConsumer.java
@@ -28,7 +28,7 @@ import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.store.Directory;
@@ -87,7 +87,7 @@ public class SimpleTextNormsConsumer extends PerDocConsumer {
}
@Override
- protected DocValues getDocValuesForMerge(IndexReader reader, FieldInfo info)
+ protected DocValues getDocValuesForMerge(AtomicIndexReader reader, FieldInfo info)
throws IOException {
return reader.normValues(info.name);
}
diff --git a/lucene/src/java/org/apache/lucene/index/AtomicIndexReader.java b/lucene/src/java/org/apache/lucene/index/AtomicIndexReader.java
new file mode 100644
index 00000000000..fe9a3c4753a
--- /dev/null
+++ b/lucene/src/java/org/apache/lucene/index/AtomicIndexReader.java
@@ -0,0 +1,291 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DocumentStoredFieldVisitor;
+import org.apache.lucene.search.SearcherManager; // javadocs
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ReaderUtil; // for javadocs
+
+/** IndexReader is an abstract class, providing an interface for accessing an
+ index. Search of an index is done entirely through this abstract interface,
+ so that any subclass which implements it is searchable.
+
+
Concrete subclasses of IndexReader are usually constructed with a call to
+ one of the static open() methods, e.g. {@link
+ #open(Directory)}.
+
+
For efficiency, in this API documents are often referred to via
+ document numbers, non-negative integers which each name a unique
+ document in the index. These document numbers are ephemeral--they may change
+ as documents are added to and deleted from an index. Clients should thus not
+ rely on a given document having the same number between sessions.
+
+
+ NOTE: for backwards API compatibility, several methods are not listed
+ as abstract, but have no useful implementations in this base class and
+ instead always throw UnsupportedOperationException. Subclasses are
+ strongly encouraged to override these methods, but in many cases may not
+ need to.
+
+
+
+
+
NOTE: {@link
+ IndexReader} instances are completely thread
+ safe, meaning multiple threads can call any of its methods,
+ concurrently. If your application requires external
+ synchronization, you should not synchronize on the
+ IndexReader instance; use your own
+ (non-Lucene) objects instead.
+*/
+public abstract class AtomicIndexReader extends IndexReader {
+
+ protected AtomicIndexReader() {
+ super();
+ }
+
+ @Override
+ public abstract AtomicReaderContext getTopReaderContext();
+
+ /** Returns true if there are norms stored for this field. */
+ public boolean hasNorms(String field) throws IOException {
+ // backward compatible implementation.
+ // SegmentReader has an efficient implementation.
+ ensureOpen();
+ return normValues(field) != null;
+ }
+
+ /**
+ * Returns {@link Fields} for this reader.
+ * This method may return null if the reader has no
+ * postings.
+ *
+ *
NOTE: if this is a multi reader ({@link
+ * #getSequentialSubReaders} is not null) then this
+ * method will throw UnsupportedOperationException. If
+ * you really need a {@link Fields} for such a reader,
+ * use {@link MultiFields#getFields}. However, for
+ * performance reasons, it's best to get all sub-readers
+ * using {@link ReaderUtil#gatherSubReaders} and iterate
+ * through them yourself. */
+ public abstract Fields fields() throws IOException;
+
+ public final int docFreq(Term term) throws IOException {
+ return docFreq(term.field(), term.bytes());
+ }
+
+ /** Returns the number of documents containing the term
+ * t. This method returns 0 if the term or
+ * field does not exists. This method does not take into
+ * account deleted documents that have not yet been merged
+ * away. */
+ public int docFreq(String field, BytesRef term) throws IOException {
+ final Fields fields = fields();
+ if (fields == null) {
+ return 0;
+ }
+ final Terms terms = fields.terms(field);
+ if (terms == null) {
+ return 0;
+ }
+ final TermsEnum termsEnum = terms.iterator(null);
+ if (termsEnum.seekExact(term, true)) {
+ return termsEnum.docFreq();
+ } else {
+ return 0;
+ }
+ }
+
+ /** Returns the number of documents containing the term
+ * t. This method returns 0 if the term or
+ * field does not exists. This method does not take into
+ * account deleted documents that have not yet been merged
+ * away. */
+ public final long totalTermFreq(String field, BytesRef term) throws IOException {
+ final Fields fields = fields();
+ if (fields == null) {
+ return 0;
+ }
+ final Terms terms = fields.terms(field);
+ if (terms == null) {
+ return 0;
+ }
+ final TermsEnum termsEnum = terms.iterator(null);
+ if (termsEnum.seekExact(term, true)) {
+ return termsEnum.totalTermFreq();
+ } else {
+ return 0;
+ }
+ }
+
+ /** This may return null if the field does not exist.*/
+ public final Terms terms(String field) throws IOException {
+ final Fields fields = fields();
+ if (fields == null) {
+ return null;
+ }
+ return fields.terms(field);
+ }
+
+ /** Returns {@link DocsEnum} for the specified field &
+ * term. This may return null, if either the field or
+ * term does not exist. */
+ public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, boolean needsFreqs) throws IOException {
+ assert field != null;
+ assert term != null;
+ final Fields fields = fields();
+ if (fields != null) {
+ final Terms terms = fields.terms(field);
+ if (terms != null) {
+ final TermsEnum termsEnum = terms.iterator(null);
+ if (termsEnum.seekExact(term, true)) {
+ return termsEnum.docs(liveDocs, null, needsFreqs);
+ }
+ }
+ }
+ return null;
+ }
+
+ /** Returns {@link DocsAndPositionsEnum} for the specified
+ * field & term. This may return null, if either the
+ * field or term does not exist, or needsOffsets is
+ * true but offsets were not indexed for this field. */
+ public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, boolean needsOffsets) throws IOException {
+ assert field != null;
+ assert term != null;
+ final Fields fields = fields();
+ if (fields != null) {
+ final Terms terms = fields.terms(field);
+ if (terms != null) {
+ final TermsEnum termsEnum = terms.iterator(null);
+ if (termsEnum.seekExact(term, true)) {
+ return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns {@link DocsEnum} for the specified field and
+ * {@link TermState}. This may return null, if either the field or the term
+ * does not exists or the {@link TermState} is invalid for the underlying
+ * implementation.*/
+ public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsFreqs) throws IOException {
+ assert state != null;
+ assert field != null;
+ final Fields fields = fields();
+ if (fields != null) {
+ final Terms terms = fields.terms(field);
+ if (terms != null) {
+ final TermsEnum termsEnum = terms.iterator(null);
+ termsEnum.seekExact(term, state);
+ return termsEnum.docs(liveDocs, null, needsFreqs);
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Returns {@link DocsAndPositionsEnum} for the specified field and
+ * {@link TermState}. This may return null, if either the field or the term
+ * does not exists, the {@link TermState} is invalid for the underlying
+ * implementation, or needsOffsets is true but offsets
+ * were not indexed for this field. */
+ public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsOffsets) throws IOException {
+ assert state != null;
+ assert field != null;
+ final Fields fields = fields();
+ if (fields != null) {
+ final Terms terms = fields.terms(field);
+ if (terms != null) {
+ final TermsEnum termsEnum = terms.iterator(null);
+ termsEnum.seekExact(term, state);
+ return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
+ }
+ }
+ return null;
+ }
+
+ /** Returns the number of unique terms (across all fields)
+ * in this reader.
+ *
+ * @return number of unique terms or -1 if this count
+ * cannot be easily determined (eg Multi*Readers).
+ * Instead, you should call {@link
+ * #getSequentialSubReaders} and ask each sub reader for
+ * its unique term count. */
+ public final long getUniqueTermCount() throws IOException {
+ final Fields fields = fields();
+ if (fields == null) {
+ return 0;
+ }
+ return fields.getUniqueTermCount();
+ }
+
+ /**
+ * Returns {@link DocValues} for this field.
+ * This method may return null if the reader has no per-document
+ * values stored.
+ *
+ *
NOTE: if this is a multi reader ({@link
+ * #getSequentialSubReaders} is not null) then this
+ * method will throw UnsupportedOperationException. If
+ * you really need {@link DocValues} for such a reader,
+ * use {@link MultiDocValues#getDocValues(IndexReader,String)}. However, for
+ * performance reasons, it's best to get all sub-readers
+ * using {@link ReaderUtil#gatherSubReaders} and iterate
+ * through them yourself. */
+ public abstract DocValues docValues(String field) throws IOException;
+
+ public abstract DocValues normValues(String field) throws IOException;
+
+ /**
+ * Get the {@link FieldInfos} describing all fields in
+ * this reader. NOTE: do not make any changes to the
+ * returned FieldInfos!
+ *
+ * @lucene.experimental
+ */
+ public abstract FieldInfos getFieldInfos();
+
+ /** Returns the {@link Bits} representing live (not
+ * deleted) docs. A set bit indicates the doc ID has not
+ * been deleted. If this method returns null it means
+ * there are no deleted documents (all documents are
+ * live).
+ *
+ * The returned instance has been safely published for
+ * use by multiple threads without additional
+ * synchronization.
+ */
+ public abstract Bits getLiveDocs();
+
+}
diff --git a/lucene/src/java/org/apache/lucene/index/BaseMultiReader.java b/lucene/src/java/org/apache/lucene/index/BaseMultiReader.java
index 69db6960980..418193d174d 100644
--- a/lucene/src/java/org/apache/lucene/index/BaseMultiReader.java
+++ b/lucene/src/java/org/apache/lucene/index/BaseMultiReader.java
@@ -23,10 +23,10 @@ import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil;
-abstract class BaseMultiReader extends IndexReader {
+abstract class BaseMultiReader extends CompositeIndexReader {
protected final R[] subReaders;
protected final int[] starts; // 1st docno for each segment
- private final ReaderContext topLevelContext;
+ private final CompositeReaderContext topLevelContext;
private final int maxDoc;
private final int numDocs;
private final boolean hasDeletions;
@@ -49,26 +49,11 @@ abstract class BaseMultiReader extends IndexReader {
this.maxDoc = maxDoc;
this.numDocs = numDocs;
this.hasDeletions = hasDeletions;
- topLevelContext = ReaderUtil.buildReaderContext(this);
- }
-
- @Override
- public FieldInfos getFieldInfos() {
- throw new UnsupportedOperationException("call getFieldInfos() on each sub reader, or use ReaderUtil.getMergedFieldInfos, instead");
+ topLevelContext = (CompositeReaderContext) ReaderUtil.buildReaderContext(this);
}
@Override
- public Fields fields() throws IOException {
- throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
- }
-
- @Override
- protected abstract IndexReader doOpenIfChanged() throws CorruptIndexException, IOException;
-
- @Override
- public Bits getLiveDocs() {
- throw new UnsupportedOperationException("please use MultiFields.getLiveDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits liveDocs");
- }
+ protected abstract CompositeIndexReader doOpenIfChanged() throws CorruptIndexException, IOException;
@Override
public Fields getTermVectors(int docID) throws IOException {
@@ -109,43 +94,14 @@ abstract class BaseMultiReader extends IndexReader {
}
return ReaderUtil.subIndex(docID, this.starts);
}
-
- @Override
- public boolean hasNorms(String field) throws IOException {
- ensureOpen();
- for (int i = 0; i < subReaders.length; i++) {
- if (subReaders[i].hasNorms(field)) return true;
- }
- return false;
- }
- @Override
- public int docFreq(String field, BytesRef t) throws IOException {
- ensureOpen();
- int total = 0; // sum freqs in segments
- for (int i = 0; i < subReaders.length; i++) {
- total += subReaders[i].docFreq(field, t);
- }
- return total;
- }
-
@Override
public IndexReader[] getSequentialSubReaders() {
return subReaders;
}
@Override
- public ReaderContext getTopReaderContext() {
+ public CompositeReaderContext getTopReaderContext() {
return topLevelContext;
}
-
- @Override
- public DocValues docValues(String field) throws IOException {
- throw new UnsupportedOperationException("please use MultiDocValues#getDocValues, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level DocValues");
- }
-
- @Override
- public DocValues normValues(String field) throws IOException {
- throw new UnsupportedOperationException("please use MultiDocValues#getNormValues, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Norm DocValues ");
- }
}
diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
index 47bd8ea5598..14b152f19ce 100644
--- a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
+++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
@@ -435,14 +435,14 @@ class BufferedDeletesStream {
}
// Delete by query
- private static long applyQueryDeletes(Iterable queriesIter, IndexWriter.ReadersAndLiveDocs rld, SegmentReader reader) throws IOException {
+ private static long applyQueryDeletes(Iterable queriesIter, IndexWriter.ReadersAndLiveDocs rld, final SegmentReader reader) throws IOException {
long delCount = 0;
- final AtomicReaderContext readerContext = (AtomicReaderContext) reader.getTopReaderContext();
+ final AtomicReaderContext readerContext = reader.getTopReaderContext();
boolean any = false;
for (QueryAndLimit ent : queriesIter) {
Query query = ent.query;
int limit = ent.limit;
- final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext, readerContext.reader.getLiveDocs());
+ final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext, reader.getLiveDocs());
if (docs != null) {
final DocIdSetIterator it = docs.iterator();
if (it != null) {
diff --git a/lucene/src/java/org/apache/lucene/index/CompositeIndexReader.java b/lucene/src/java/org/apache/lucene/index/CompositeIndexReader.java
new file mode 100644
index 00000000000..ebbfb5f708e
--- /dev/null
+++ b/lucene/src/java/org/apache/lucene/index/CompositeIndexReader.java
@@ -0,0 +1,267 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DocumentStoredFieldVisitor;
+import org.apache.lucene.search.SearcherManager; // javadocs
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ReaderUtil; // for javadocs
+
+/** IndexReader is an abstract class, providing an interface for accessing an
+ index. Search of an index is done entirely through this abstract interface,
+ so that any subclass which implements it is searchable.
+
+
Concrete subclasses of IndexReader are usually constructed with a call to
+ one of the static open() methods, e.g. {@link
+ #open(Directory)}.
+
+
For efficiency, in this API documents are often referred to via
+ document numbers, non-negative integers which each name a unique
+ document in the index. These document numbers are ephemeral--they may change
+ as documents are added to and deleted from an index. Clients should thus not
+ rely on a given document having the same number between sessions.
+
+
+ NOTE: for backwards API compatibility, several methods are not listed
+ as abstract, but have no useful implementations in this base class and
+ instead always throw UnsupportedOperationException. Subclasses are
+ strongly encouraged to override these methods, but in many cases may not
+ need to.
+
+
+
+
+
NOTE: {@link
+ IndexReader} instances are completely thread
+ safe, meaning multiple threads can call any of its methods,
+ concurrently. If your application requires external
+ synchronization, you should not synchronize on the
+ IndexReader instance; use your own
+ (non-Lucene) objects instead.
+*/
+public abstract class CompositeIndexReader extends IndexReader {
+
+ protected CompositeIndexReader() {
+ super();
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder buffer = new StringBuilder();
+ buffer.append(getClass().getSimpleName());
+ buffer.append('(');
+ final IndexReader[] subReaders = getSequentialSubReaders();
+ if ((subReaders != null) && (subReaders.length > 0)) {
+ buffer.append(subReaders[0]);
+ for (int i = 1; i < subReaders.length; ++i) {
+ buffer.append(" ").append(subReaders[i]);
+ }
+ }
+ buffer.append(')');
+ return buffer.toString();
+ }
+
+ @Override
+ public abstract CompositeReaderContext getTopReaderContext();
+
+ /**
+ * If the index has changed since it was opened, open and return a new reader;
+ * else, return {@code null}.
+ *
+ * @see #openIfChanged(IndexReader)
+ */
+ protected CompositeIndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
+ throw new UnsupportedOperationException("This reader does not support reopen().");
+ }
+
+ /**
+ * If the index has changed since it was opened, open and return a new reader;
+ * else, return {@code null}.
+ *
+ * @see #openIfChanged(IndexReader, IndexCommit)
+ */
+ protected CompositeIndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
+ throw new UnsupportedOperationException("This reader does not support reopen(IndexCommit).");
+ }
+
+ /**
+ * If the index has changed since it was opened, open and return a new reader;
+ * else, return {@code null}.
+ *
+ * @see #openIfChanged(IndexReader, IndexWriter, boolean)
+ */
+ protected CompositeIndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
+ return writer.getReader(applyAllDeletes);
+ }
+
+ /**
+ * Version number when this IndexReader was opened. Not
+ * implemented in the IndexReader base class.
+ *
+ *
If this reader is based on a Directory (ie, was
+ * created by calling {@link #open}, or {@link #openIfChanged} on
+ * a reader based on a Directory), then this method
+ * returns the version recorded in the commit that the
+ * reader opened. This version is advanced every time
+ * {@link IndexWriter#commit} is called.
+ *
+ *
If instead this reader is a near real-time reader
+ * (ie, obtained by a call to {@link
+ * IndexWriter#getReader}, or by calling {@link #openIfChanged}
+ * on a near real-time reader), then this method returns
+ * the version of the last commit done by the writer.
+ * Note that even as further changes are made with the
+ * writer, the version will not changed until a commit is
+ * completed. Thus, you should not rely on this method to
+ * determine when a near real-time reader should be
+ * opened. Use {@link #isCurrent} instead.
+ */
+ public abstract long getVersion();
+
+ /**
+ * Check whether any new changes have occurred to the
+ * index since this reader was opened.
+ *
+ *
If this reader is based on a Directory (ie, was
+ * created by calling {@link #open}, or {@link #openIfChanged} on
+ * a reader based on a Directory), then this method checks
+ * if any further commits (see {@link IndexWriter#commit}
+ * have occurred in that directory).
+ *
+ *
If instead this reader is a near real-time reader
+ * (ie, obtained by a call to {@link
+ * IndexWriter#getReader}, or by calling {@link #openIfChanged}
+ * on a near real-time reader), then this method checks if
+ * either a new commit has occurred, or any new
+ * uncommitted changes have taken place via the writer.
+ * Note that even if the writer has only performed
+ * merging, this method will still return false.
+ *
+ *
In any event, if this returns false, you should call
+ * {@link #openIfChanged} to get a new reader that sees the
+ * changes.
+ *
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws IOException if there is a low-level IO error
+ * @throws UnsupportedOperationException unless overridden in subclass
+ */
+ public abstract boolean isCurrent() throws CorruptIndexException, IOException;
+
+ /**
+ * Returns the time the index in the named directory was last modified.
+ * Do not use this to check whether the reader is still up-to-date, use
+ * {@link #isCurrent()} instead.
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws IOException if there is a low-level IO error
+ */
+ public static long lastModified(final Directory directory) throws CorruptIndexException, IOException {
+ return ((Long) new SegmentInfos.FindSegmentsFile(directory) {
+ @Override
+ public Object doBody(String segmentFileName) throws IOException {
+ return Long.valueOf(directory.fileModified(segmentFileName));
+ }
+ }.run()).longValue();
+ }
+
+ /**
+ * Reads version number from segments files. The version number is
+ * initialized with a timestamp and then increased by one for each change of
+ * the index.
+ *
+ * @param directory where the index resides.
+ * @return version number.
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws IOException if there is a low-level IO error
+ */
+ public static long getCurrentVersion(Directory directory) throws CorruptIndexException, IOException {
+ return SegmentInfos.readCurrentVersion(directory);
+ }
+
+ /**
+ * Reads commitUserData, previously passed to {@link
+ * IndexWriter#commit(Map)}, from current index
+ * segments file. This will return null if {@link
+ * IndexWriter#commit(Map)} has never been called for
+ * this index.
+ *
+ * @param directory where the index resides.
+ * @return commit userData.
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws IOException if there is a low-level IO error
+ *
+ * @see #getCommitUserData()
+ */
+ public static Map getCommitUserData(Directory directory) throws CorruptIndexException, IOException {
+ return SegmentInfos.readCurrentUserData(directory);
+ }
+
+ /**
+ * Retrieve the String userData optionally passed to
+ * IndexWriter#commit. This will return null if {@link
+ * IndexWriter#commit(Map)} has never been called for
+ * this index.
+ *
+ * @see #getCommitUserData(Directory)
+ */
+ public Map getCommitUserData() {
+ throw new UnsupportedOperationException("This reader does not support this method.");
+ }
+
+ /**
+ * Expert: return the IndexCommit that this reader has
+ * opened. This method is only implemented by those
+ * readers that correspond to a Directory with its own
+ * segments_N file.
+ *
+ * @lucene.experimental
+ */
+ public IndexCommit getIndexCommit() throws IOException {
+ throw new UnsupportedOperationException("This reader does not support this method.");
+ }
+
+ /** Expert: returns the sequential sub readers that this
+ * reader is logically composed of. If this reader is not composed
+ * of sequential child readers, it should return null.
+ * If this method returns an empty array, that means this
+ * reader is a null reader (for example a MultiReader
+ * that has no sub readers).
+ */
+ public abstract IndexReader[] getSequentialSubReaders();
+
+ /** For IndexReader implementations that use
+ * TermInfosReader to read terms, this returns the
+ * current indexDivisor as specified when the reader was
+ * opened.
+ */
+ public int getTermInfosIndexDivisor() {
+ throw new UnsupportedOperationException("This reader does not support this method.");
+ }
+
+}
diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
index 6a9bcd1115f..282c27ac236 100644
--- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
+++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
@@ -32,8 +32,9 @@ import org.apache.lucene.util.IOUtils;
/**
* An IndexReader which reads indexes with multiple segments.
+ * To get an instance of this reader use {@link IndexReader.open(Directory)}.
*/
-final class DirectoryReader extends BaseMultiReader {
+public final class DirectoryReader extends BaseMultiReader {
protected final Directory directory;
private final IndexWriter writer;
private final SegmentInfos segmentInfos;
@@ -50,9 +51,9 @@ final class DirectoryReader extends BaseMultiReader {
this.applyAllDeletes = applyAllDeletes;
}
- static IndexReader open(final Directory directory, final IndexCommit commit,
+ static DirectoryReader open(final Directory directory, final IndexCommit commit,
final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
- return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
+ return (DirectoryReader) new SegmentInfos.FindSegmentsFile(directory) {
@Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
SegmentInfos sis = new SegmentInfos();
@@ -222,12 +223,12 @@ final class DirectoryReader extends BaseMultiReader {
}
@Override
- protected final IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
+ protected final CompositeIndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
return doOpenIfChanged(null);
}
@Override
- protected final IndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
+ protected final CompositeIndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
ensureOpen();
// If we were obtained by writer.getReader(), re-ask the
@@ -240,7 +241,7 @@ final class DirectoryReader extends BaseMultiReader {
}
@Override
- protected final IndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
+ protected final CompositeIndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
ensureOpen();
if (writer == this.writer && applyAllDeletes == this.applyAllDeletes) {
return doOpenFromWriter(null);
@@ -250,7 +251,7 @@ final class DirectoryReader extends BaseMultiReader {
}
}
- private final IndexReader doOpenFromWriter(IndexCommit commit) throws CorruptIndexException, IOException {
+ private final CompositeIndexReader doOpenFromWriter(IndexCommit commit) throws CorruptIndexException, IOException {
if (commit != null) {
throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() cannot currently accept a commit");
}
@@ -259,7 +260,7 @@ final class DirectoryReader extends BaseMultiReader {
return null;
}
- IndexReader reader = writer.getReader(applyAllDeletes);
+ CompositeIndexReader reader = writer.getReader(applyAllDeletes);
// If in fact no changes took place, return null:
if (reader.getVersion() == segmentInfos.getVersion()) {
@@ -270,7 +271,7 @@ final class DirectoryReader extends BaseMultiReader {
return reader;
}
- private synchronized IndexReader doOpenNoWriter(IndexCommit commit) throws CorruptIndexException, IOException {
+ private synchronized CompositeIndexReader doOpenNoWriter(IndexCommit commit) throws CorruptIndexException, IOException {
if (commit == null) {
if (isCurrent()) {
@@ -285,7 +286,7 @@ final class DirectoryReader extends BaseMultiReader {
}
}
- return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
+ return (CompositeIndexReader) new SegmentInfos.FindSegmentsFile(directory) {
@Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
final SegmentInfos infos = new SegmentInfos();
diff --git a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
index d5152784bfb..8876ab3fdf7 100644
--- a/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
+++ b/lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
@@ -39,7 +39,7 @@ import java.util.Comparator;
* To reopen, you have to first reopen the underlying reader
* and wrap it again with the custom filter.
*/
-public class FilterIndexReader extends IndexReader {
+public class FilterIndexReader extends AtomicIndexReader {
/** Base class for filtering {@link Fields}
* implementations. */
@@ -279,14 +279,14 @@ public class FilterIndexReader extends IndexReader {
}
}
- protected IndexReader in;
+ protected AtomicIndexReader in;
/**
*
Construct a FilterIndexReader based on the specified base reader.
*
Note that base reader is closed if this FilterIndexReader is closed.
* @param in specified base reader.
*/
- public FilterIndexReader(IndexReader in) {
+ public FilterIndexReader(AtomicIndexReader in) {
super();
this.in = in;
}
@@ -355,35 +355,13 @@ public class FilterIndexReader extends IndexReader {
protected void doClose() throws IOException {
in.close();
}
-
- @Override
- public long getVersion() {
- ensureOpen();
- return in.getVersion();
- }
-
- @Override
- public boolean isCurrent() throws CorruptIndexException, IOException {
- ensureOpen();
- return in.isCurrent();
- }
@Override
- public IndexReader[] getSequentialSubReaders() {
- return in.getSequentialSubReaders();
- }
-
- @Override
- public ReaderContext getTopReaderContext() {
+ public AtomicReaderContext getTopReaderContext() {
ensureOpen();
return in.getTopReaderContext();
}
- @Override
- public Map getCommitUserData() {
- return in.getCommitUserData();
- }
-
@Override
public Fields fields() throws IOException {
ensureOpen();
@@ -428,11 +406,6 @@ public class FilterIndexReader extends IndexReader {
return in.normValues(field);
}
- @Override
- public IndexCommit getIndexCommit() throws IOException {
- return in.getIndexCommit();
- }
-
@Override
public int getTermInfosIndexDivisor() {
return in.getTermInfosIndexDivisor();
diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java
index ae4037d22b2..11c8fc38e56 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexReader.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java
@@ -172,23 +172,6 @@ public abstract class IndexReader implements Closeable {
return false;
}
- /** {@inheritDoc} */
- @Override
- public String toString() {
- final StringBuilder buffer = new StringBuilder();
- buffer.append(getClass().getSimpleName());
- buffer.append('(');
- final IndexReader[] subReaders = getSequentialSubReaders();
- if ((subReaders != null) && (subReaders.length > 0)) {
- buffer.append(subReaders[0]);
- for (int i = 1; i < subReaders.length; ++i) {
- buffer.append(" ").append(subReaders[i]);
- }
- }
- buffer.append(')');
- return buffer.toString();
- }
-
/**
* Expert: decreases the refCount of this IndexReader
* instance. If the refCount drops to 0, then this
@@ -238,7 +221,7 @@ public abstract class IndexReader implements Closeable {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public static IndexReader open(final Directory directory) throws CorruptIndexException, IOException {
+ public static DirectoryReader open(final Directory directory) throws CorruptIndexException, IOException {
return DirectoryReader.open(directory, null, DEFAULT_TERMS_INDEX_DIVISOR);
}
@@ -258,7 +241,7 @@ public abstract class IndexReader implements Closeable {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public static IndexReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
+ public static DirectoryReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return DirectoryReader.open(directory, null, termInfosIndexDivisor);
}
@@ -281,7 +264,7 @@ public abstract class IndexReader implements Closeable {
*
* @lucene.experimental
*/
- public static IndexReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
+ public static DirectoryReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
return writer.getReader(applyAllDeletes);
}
@@ -291,7 +274,7 @@ public abstract class IndexReader implements Closeable {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public static IndexReader open(final IndexCommit commit) throws CorruptIndexException, IOException {
+ public static DirectoryReader open(final IndexCommit commit) throws CorruptIndexException, IOException {
return DirectoryReader.open(commit.getDirectory(), commit, DEFAULT_TERMS_INDEX_DIVISOR);
}
@@ -312,7 +295,7 @@ public abstract class IndexReader implements Closeable {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
- public static IndexReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
+ public static DirectoryReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return DirectoryReader.open(commit.getDirectory(), commit, termInfosIndexDivisor);
}
@@ -340,8 +323,8 @@ public abstract class IndexReader implements Closeable {
* @return null if there are no changes; else, a new
* IndexReader instance which you must eventually close
*/
- public static IndexReader openIfChanged(IndexReader oldReader) throws IOException {
- final IndexReader newReader = oldReader.doOpenIfChanged();
+ public static CompositeIndexReader openIfChanged(CompositeIndexReader oldReader) throws IOException {
+ final CompositeIndexReader newReader = oldReader.doOpenIfChanged();
assert newReader != oldReader;
return newReader;
}
@@ -353,8 +336,8 @@ public abstract class IndexReader implements Closeable {
*
* @see #openIfChanged(IndexReader)
*/
- public static IndexReader openIfChanged(IndexReader oldReader, IndexCommit commit) throws IOException {
- final IndexReader newReader = oldReader.doOpenIfChanged(commit);
+ public static CompositeIndexReader openIfChanged(CompositeIndexReader oldReader, IndexCommit commit) throws IOException {
+ final CompositeIndexReader newReader = oldReader.doOpenIfChanged(commit);
assert newReader != oldReader;
return newReader;
}
@@ -420,43 +403,13 @@ public abstract class IndexReader implements Closeable {
*
* @lucene.experimental
*/
- public static IndexReader openIfChanged(IndexReader oldReader, IndexWriter writer, boolean applyAllDeletes) throws IOException {
- final IndexReader newReader = oldReader.doOpenIfChanged(writer, applyAllDeletes);
+ public static CompositeIndexReader openIfChanged(CompositeIndexReader oldReader, IndexWriter writer, boolean applyAllDeletes) throws IOException {
+ final CompositeIndexReader newReader = oldReader.doOpenIfChanged(writer, applyAllDeletes);
assert newReader != oldReader;
return newReader;
}
/**
- * If the index has changed since it was opened, open and return a new reader;
- * else, return {@code null}.
- *
- * @see #openIfChanged(IndexReader)
- */
- protected IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
- throw new UnsupportedOperationException("This reader does not support reopen().");
- }
-
- /**
- * If the index has changed since it was opened, open and return a new reader;
- * else, return {@code null}.
- *
- * @see #openIfChanged(IndexReader, IndexCommit)
- */
- protected IndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
- throw new UnsupportedOperationException("This reader does not support reopen(IndexCommit).");
- }
-
- /**
- * If the index has changed since it was opened, open and return a new reader;
- * else, return {@code null}.
- *
- * @see #openIfChanged(IndexReader, IndexWriter, boolean)
- */
- protected IndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
- return writer.getReader(applyAllDeletes);
- }
-
- /**
* Returns the directory associated with this index. The Default
* implementation returns the directory specified by subclasses when
* delegating to the IndexReader(Directory) constructor, or throws an
@@ -516,78 +469,6 @@ public abstract class IndexReader implements Closeable {
return SegmentInfos.readCurrentUserData(directory);
}
- /**
- * Version number when this IndexReader was opened. Not
- * implemented in the IndexReader base class.
- *
- *
If this reader is based on a Directory (ie, was
- * created by calling {@link #open}, or {@link #openIfChanged} on
- * a reader based on a Directory), then this method
- * returns the version recorded in the commit that the
- * reader opened. This version is advanced every time
- * {@link IndexWriter#commit} is called.
- *
- *
If instead this reader is a near real-time reader
- * (ie, obtained by a call to {@link
- * IndexWriter#getReader}, or by calling {@link #openIfChanged}
- * on a near real-time reader), then this method returns
- * the version of the last commit done by the writer.
- * Note that even as further changes are made with the
- * writer, the version will not changed until a commit is
- * completed. Thus, you should not rely on this method to
- * determine when a near real-time reader should be
- * opened. Use {@link #isCurrent} instead.
- *
- * @throws UnsupportedOperationException unless overridden in subclass
- */
- public long getVersion() {
- throw new UnsupportedOperationException("This reader does not support this method.");
- }
-
- /**
- * Retrieve the String userData optionally passed to
- * IndexWriter#commit. This will return null if {@link
- * IndexWriter#commit(Map)} has never been called for
- * this index.
- *
- * @see #getCommitUserData(Directory)
- */
- public Map getCommitUserData() {
- throw new UnsupportedOperationException("This reader does not support this method.");
- }
-
-
- /**
- * Check whether any new changes have occurred to the
- * index since this reader was opened.
- *
- *
If this reader is based on a Directory (ie, was
- * created by calling {@link #open}, or {@link #openIfChanged} on
- * a reader based on a Directory), then this method checks
- * if any further commits (see {@link IndexWriter#commit}
- * have occurred in that directory).
- *
- *
If instead this reader is a near real-time reader
- * (ie, obtained by a call to {@link
- * IndexWriter#getReader}, or by calling {@link #openIfChanged}
- * on a near real-time reader), then this method checks if
- * either a new commit has occurred, or any new
- * uncommitted changes have taken place via the writer.
- * Note that even if the writer has only performed
- * merging, this method will still return false.
- *
- *
In any event, if this returns false, you should call
- * {@link #openIfChanged} to get a new reader that sees the
- * changes.
- *
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
- * @throws UnsupportedOperationException unless overridden in subclass
- */
- public boolean isCurrent() throws CorruptIndexException, IOException {
- throw new UnsupportedOperationException("This reader does not support this method.");
- }
-
/** Retrieve term vectors for this document, or null if
* term vectors were not indexed. The returned Fields
* instance acts like a single-document inverted index
@@ -687,166 +568,6 @@ public abstract class IndexReader implements Closeable {
/** Returns true if any documents have been deleted */
public abstract boolean hasDeletions();
- /** Returns true if there are norms stored for this field. */
- public boolean hasNorms(String field) throws IOException {
- // backward compatible implementation.
- // SegmentReader has an efficient implementation.
- ensureOpen();
- return normValues(field) != null;
- }
-
- /**
- * Returns {@link Fields} for this reader.
- * This method may return null if the reader has no
- * postings.
- *
- *
NOTE: if this is a multi reader ({@link
- * #getSequentialSubReaders} is not null) then this
- * method will throw UnsupportedOperationException. If
- * you really need a {@link Fields} for such a reader,
- * use {@link MultiFields#getFields}. However, for
- * performance reasons, it's best to get all sub-readers
- * using {@link ReaderUtil#gatherSubReaders} and iterate
- * through them yourself. */
- public abstract Fields fields() throws IOException;
-
- public final int docFreq(Term term) throws IOException {
- return docFreq(term.field(), term.bytes());
- }
-
- /** Returns the number of documents containing the term
- * t. This method returns 0 if the term or
- * field does not exists. This method does not take into
- * account deleted documents that have not yet been merged
- * away. */
- public int docFreq(String field, BytesRef term) throws IOException {
- final Fields fields = fields();
- if (fields == null) {
- return 0;
- }
- final Terms terms = fields.terms(field);
- if (terms == null) {
- return 0;
- }
- final TermsEnum termsEnum = terms.iterator(null);
- if (termsEnum.seekExact(term, true)) {
- return termsEnum.docFreq();
- } else {
- return 0;
- }
- }
-
- /** Returns the number of documents containing the term
- * t. This method returns 0 if the term or
- * field does not exists. This method does not take into
- * account deleted documents that have not yet been merged
- * away. */
- public final long totalTermFreq(String field, BytesRef term) throws IOException {
- final Fields fields = fields();
- if (fields == null) {
- return 0;
- }
- final Terms terms = fields.terms(field);
- if (terms == null) {
- return 0;
- }
- final TermsEnum termsEnum = terms.iterator(null);
- if (termsEnum.seekExact(term, true)) {
- return termsEnum.totalTermFreq();
- } else {
- return 0;
- }
- }
-
- /** This may return null if the field does not exist.*/
- public final Terms terms(String field) throws IOException {
- final Fields fields = fields();
- if (fields == null) {
- return null;
- }
- return fields.terms(field);
- }
-
- /** Returns {@link DocsEnum} for the specified field &
- * term. This may return null, if either the field or
- * term does not exist. */
- public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, boolean needsFreqs) throws IOException {
- assert field != null;
- assert term != null;
- final Fields fields = fields();
- if (fields != null) {
- final Terms terms = fields.terms(field);
- if (terms != null) {
- final TermsEnum termsEnum = terms.iterator(null);
- if (termsEnum.seekExact(term, true)) {
- return termsEnum.docs(liveDocs, null, needsFreqs);
- }
- }
- }
- return null;
- }
-
- /** Returns {@link DocsAndPositionsEnum} for the specified
- * field & term. This may return null, if either the
- * field or term does not exist, or needsOffsets is
- * true but offsets were not indexed for this field. */
- public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, boolean needsOffsets) throws IOException {
- assert field != null;
- assert term != null;
- final Fields fields = fields();
- if (fields != null) {
- final Terms terms = fields.terms(field);
- if (terms != null) {
- final TermsEnum termsEnum = terms.iterator(null);
- if (termsEnum.seekExact(term, true)) {
- return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
- }
- }
- }
- return null;
- }
-
- /**
- * Returns {@link DocsEnum} for the specified field and
- * {@link TermState}. This may return null, if either the field or the term
- * does not exists or the {@link TermState} is invalid for the underlying
- * implementation.*/
- public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsFreqs) throws IOException {
- assert state != null;
- assert field != null;
- final Fields fields = fields();
- if (fields != null) {
- final Terms terms = fields.terms(field);
- if (terms != null) {
- final TermsEnum termsEnum = terms.iterator(null);
- termsEnum.seekExact(term, state);
- return termsEnum.docs(liveDocs, null, needsFreqs);
- }
- }
- return null;
- }
-
- /**
- * Returns {@link DocsAndPositionsEnum} for the specified field and
- * {@link TermState}. This may return null, if either the field or the term
- * does not exists, the {@link TermState} is invalid for the underlying
- * implementation, or needsOffsets is true but offsets
- * were not indexed for this field. */
- public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsOffsets) throws IOException {
- assert state != null;
- assert field != null;
- final Fields fields = fields();
- if (fields != null) {
- final Terms terms = fields.terms(field);
- if (terms != null) {
- final TermsEnum termsEnum = terms.iterator(null);
- termsEnum.seekExact(term, state);
- return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
- }
- }
- return null;
- }
-
/**
* Closes files associated with this index.
* Also saves any new deletions to disk.
@@ -863,39 +584,6 @@ public abstract class IndexReader implements Closeable {
/** Implements close. */
protected abstract void doClose() throws IOException;
- /**
- * Get the {@link FieldInfos} describing all fields in
- * this reader. NOTE: do not make any changes to the
- * returned FieldInfos!
- *
- * @lucene.experimental
- */
- public abstract FieldInfos getFieldInfos();
-
- /** Returns the {@link Bits} representing live (not
- * deleted) docs. A set bit indicates the doc ID has not
- * been deleted. If this method returns null it means
- * there are no deleted documents (all documents are
- * live).
- *
- * The returned instance has been safely published for
- * use by multiple threads without additional
- * synchronization.
- * @lucene.experimental */
- public abstract Bits getLiveDocs();
-
- /**
- * Expert: return the IndexCommit that this reader has
- * opened. This method is only implemented by those
- * readers that correspond to a Directory with its own
- * segments_N file.
- *
- * @lucene.experimental
- */
- public IndexCommit getIndexCommit() throws IOException {
- throw new UnsupportedOperationException("This reader does not support this method.");
- }
-
/** Returns all commit points that exist in the Directory.
* Normally, because the default is {@link
* KeepOnlyLastCommitDeletionPolicy}, there would be only
@@ -915,18 +603,6 @@ public abstract class IndexReader implements Closeable {
return DirectoryReader.listCommits(dir);
}
- /** Expert: returns the sequential sub readers that this
- * reader is logically composed of. If this reader is not composed
- * of sequential child readers, it should return null.
- * If this method returns an empty array, that means this
- * reader is a null reader (for example a MultiReader
- * that has no sub readers).
- */
- public IndexReader[] getSequentialSubReaders() {
- ensureOpen();
- return null;
- }
-
/**
* Expert: Returns a the root {@link ReaderContext} for this
* {@link IndexReader}'s sub-reader tree. Iff this reader is composed of sub
@@ -966,25 +642,6 @@ public abstract class IndexReader implements Closeable {
return this;
}
- /** Returns the number of unique terms (across all fields)
- * in this reader.
- *
- * @return number of unique terms or -1 if this count
- * cannot be easily determined (eg Multi*Readers).
- * Instead, you should call {@link
- * #getSequentialSubReaders} and ask each sub reader for
- * its unique term count. */
- public final long getUniqueTermCount() throws IOException {
- if (!getTopReaderContext().isAtomic) {
- return -1;
- }
- final Fields fields = fields();
- if (fields == null) {
- return 0;
- }
- return fields.getUniqueTermCount();
- }
-
/** For IndexReader implementations that use
* TermInfosReader to read terms, this returns the
* current indexDivisor as specified when the reader was
@@ -994,49 +651,17 @@ public abstract class IndexReader implements Closeable {
throw new UnsupportedOperationException("This reader does not support this method.");
}
- /**
- * Returns {@link DocValues} for this field.
- * This method may return null if the reader has no per-document
- * values stored.
- *
- *
NOTE: if this is a multi reader ({@link
- * #getSequentialSubReaders} is not null) then this
- * method will throw UnsupportedOperationException. If
- * you really need {@link DocValues} for such a reader,
- * use {@link MultiDocValues#getDocValues(IndexReader,String)}. However, for
- * performance reasons, it's best to get all sub-readers
- * using {@link ReaderUtil#gatherSubReaders} and iterate
- * through them yourself. */
- public abstract DocValues docValues(String field) throws IOException;
-
- public abstract DocValues normValues(String field) throws IOException;
-
- private volatile Fields fields;
-
- /** @lucene.internal */
- void storeFields(Fields fields) {
- ensureOpen();
- this.fields = fields;
- }
-
- /** @lucene.internal */
- Fields retrieveFields() {
- ensureOpen();
- return fields;
- }
-
+ // nocommit: remove generics and add a typed (overloaded) getter method instead instance fields with "R reader"
/**
* A struct like class that represents a hierarchical relationship between
* {@link IndexReader} instances.
* @lucene.experimental
*/
- public static abstract class ReaderContext {
+ public static abstract class ReaderContext {
/** The reader context for this reader's immediate parent, or null if none */
- public final ReaderContext parent;
+ public final CompositeReaderContext parent;
/** The actual reader */
- public final IndexReader reader;
- /** true iff the reader is an atomic reader */
- public final boolean isAtomic;
+ public final R reader;
/** true if this context struct represents the top level reader within the hierarchical context */
public final boolean isTopLevel;
/** the doc base for this reader in the parent, 0 if parent is null */
@@ -1044,11 +669,10 @@ public abstract class IndexReader implements Closeable {
/** the ord for this reader in the parent, 0 if parent is null */
public final int ordInParent;
- ReaderContext(ReaderContext parent, IndexReader reader,
- boolean isAtomic, int ordInParent, int docBaseInParent) {
+ ReaderContext(CompositeReaderContext parent, R reader,
+ int ordInParent, int docBaseInParent) {
this.parent = parent;
this.reader = reader;
- this.isAtomic = isAtomic;
this.docBaseInParent = docBaseInParent;
this.ordInParent = ordInParent;
this.isTopLevel = parent==null;
@@ -1073,18 +697,18 @@ public abstract class IndexReader implements Closeable {
* instanceof checks and type-casts to
* {@link CompositeReaderContext}.
*/
- public ReaderContext[] children() {
+ public ReaderContext extends IndexReader>[] children() {
return null;
}
}
/**
- * {@link ReaderContext} for composite {@link IndexReader} instance.
+ * {@link ReaderContext} for {@link CompositeIndexReader} instance.
* @lucene.experimental
*/
- public static final class CompositeReaderContext extends ReaderContext {
+ public static final class CompositeReaderContext extends ReaderContext {
/** the composite readers immediate children */
- public final ReaderContext[] children;
+ public final ReaderContext extends IndexReader>[] children;
/** the composite readers leaf reader contexts if this is the top level reader in this context */
public final AtomicReaderContext[] leaves;
@@ -1092,22 +716,22 @@ public abstract class IndexReader implements Closeable {
* Creates a {@link CompositeReaderContext} for intermediate readers that aren't
* not top-level readers in the current context
*/
- public CompositeReaderContext(ReaderContext parent, IndexReader reader,
- int ordInParent, int docbaseInParent, ReaderContext[] children) {
+ public CompositeReaderContext(CompositeReaderContext parent, CompositeIndexReader reader,
+ int ordInParent, int docbaseInParent, ReaderContext extends IndexReader>[] children) {
this(parent, reader, ordInParent, docbaseInParent, children, null);
}
/**
* Creates a {@link CompositeReaderContext} for top-level readers with parent set to null
*/
- public CompositeReaderContext(IndexReader reader, ReaderContext[] children, AtomicReaderContext[] leaves) {
+ public CompositeReaderContext(CompositeIndexReader reader, ReaderContext extends IndexReader>[] children, AtomicReaderContext[] leaves) {
this(null, reader, 0, 0, children, leaves);
}
- private CompositeReaderContext(ReaderContext parent, IndexReader reader,
- int ordInParent, int docbaseInParent, ReaderContext[] children,
+ private CompositeReaderContext(CompositeReaderContext parent, CompositeIndexReader reader,
+ int ordInParent, int docbaseInParent, ReaderContext extends IndexReader>[] children,
AtomicReaderContext[] leaves) {
- super(parent, reader, false, ordInParent, docbaseInParent);
+ super(parent, reader, ordInParent, docbaseInParent);
this.children = children;
this.leaves = leaves;
}
@@ -1119,16 +743,16 @@ public abstract class IndexReader implements Closeable {
@Override
- public ReaderContext[] children() {
+ public ReaderContext extends IndexReader>[] children() {
return children;
}
}
/**
- * {@link ReaderContext} for atomic {@link IndexReader} instances
+ * {@link ReaderContext} for {@link AtomicIndexReader} instances
* @lucene.experimental
*/
- public static final class AtomicReaderContext extends ReaderContext {
+ public static final class AtomicReaderContext extends ReaderContext {
/** The readers ord in the top-level's leaves array */
public final int ord;
/** The readers absolute doc base */
@@ -1136,10 +760,9 @@ public abstract class IndexReader implements Closeable {
/**
* Creates a new {@link AtomicReaderContext}
*/
- public AtomicReaderContext(ReaderContext parent, IndexReader reader,
+ public AtomicReaderContext(CompositeReaderContext parent, AtomicIndexReader reader,
int ord, int docBase, int leafOrd, int leafDocBase) {
- super(parent, reader, true, ord, docBase);
- assert reader.getSequentialSubReaders() == null : "Atomic readers must not have subreaders";
+ super(parent, reader, ord, docBase);
this.ord = leafOrd;
this.docBase = leafDocBase;
}
@@ -1148,7 +771,7 @@ public abstract class IndexReader implements Closeable {
* Creates a new {@link AtomicReaderContext} for a atomic reader without an immediate
* parent.
*/
- public AtomicReaderContext(IndexReader atomicReader) {
+ public AtomicReaderContext(AtomicIndexReader atomicReader) {
this(null, atomicReader, 0, 0, 0, 0);
}
}
diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
index 51d7ff868b7..72a1ddb3ff3 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
@@ -263,7 +263,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// The PayloadProcessorProvider to use when segments are merged
private PayloadProcessorProvider payloadProcessorProvider;
- IndexReader getReader() throws IOException {
+ DirectoryReader getReader() throws IOException {
return getReader(true);
}
@@ -326,7 +326,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
*
* @throws IOException
*/
- IndexReader getReader(boolean applyAllDeletes) throws IOException {
+ DirectoryReader getReader(boolean applyAllDeletes) throws IOException {
ensureOpen();
final long tStart = System.currentTimeMillis();
@@ -338,7 +338,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// obtained during this flush are pooled, the first time
// this method is called:
poolReaders = true;
- final IndexReader r;
+ final DirectoryReader r;
doBeforeFlush();
boolean anySegmentFlushed = false;
/*
diff --git a/lucene/src/java/org/apache/lucene/index/MergeState.java b/lucene/src/java/org/apache/lucene/index/MergeState.java
index ac17e3e24c2..76df57cbb49 100644
--- a/lucene/src/java/org/apache/lucene/index/MergeState.java
+++ b/lucene/src/java/org/apache/lucene/index/MergeState.java
@@ -31,10 +31,10 @@ import org.apache.lucene.util.InfoStream;
public class MergeState {
public static class IndexReaderAndLiveDocs {
- public final IndexReader reader;
+ public final AtomicIndexReader reader;
public final Bits liveDocs;
- public IndexReaderAndLiveDocs(IndexReader reader, Bits liveDocs) {
+ public IndexReaderAndLiveDocs(AtomicIndexReader reader, Bits liveDocs) {
this.reader = reader;
this.liveDocs = liveDocs;
}
diff --git a/lucene/src/java/org/apache/lucene/index/MultiDocValues.java b/lucene/src/java/org/apache/lucene/index/MultiDocValues.java
index 5ea61fbb000..a0e56feb4d7 100644
--- a/lucene/src/java/org/apache/lucene/index/MultiDocValues.java
+++ b/lucene/src/java/org/apache/lucene/index/MultiDocValues.java
@@ -33,7 +33,7 @@ import org.apache.lucene.util.ReaderUtil.Gather;
import org.apache.lucene.util.packed.PackedInts.Reader;
/**
- * A wrapper for compound IndexReader providing access to per segment
+ * A wrapper for CompositeIndexReader providing access to per segment
* {@link DocValues}
*
* @lucene.experimental
@@ -43,11 +43,11 @@ public class MultiDocValues extends DocValues {
private static DocValuesPuller DEFAULT_PULLER = new DocValuesPuller();
private static final DocValuesPuller NORMS_PULLER = new DocValuesPuller() {
- public DocValues pull(IndexReader reader, String field) throws IOException {
+ public DocValues pull(AtomicIndexReader reader, String field) throws IOException {
return reader.normValues(field);
}
- public boolean stopLoadingOnNull(IndexReader reader, String field) throws IOException {
+ public boolean stopLoadingOnNull(AtomicIndexReader reader, String field) throws IOException {
// for norms we drop all norms if one leaf reader has no norms and the field is present
FieldInfos fieldInfos = reader.getFieldInfos();
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
@@ -69,11 +69,11 @@ public class MultiDocValues extends DocValues {
}
private static class DocValuesPuller {
- public DocValues pull(IndexReader reader, String field) throws IOException {
+ public DocValues pull(AtomicIndexReader reader, String field) throws IOException {
return reader.docValues(field);
}
- public boolean stopLoadingOnNull(IndexReader reader, String field) throws IOException {
+ public boolean stopLoadingOnNull(AtomicIndexReader reader, String field) throws IOException {
return false;
}
}
@@ -115,11 +115,13 @@ public class MultiDocValues extends DocValues {
private static DocValues getDocValues(IndexReader r, final String field, final DocValuesPuller puller) throws IOException {
- final IndexReader[] subs = r.getSequentialSubReaders();
- if (subs == null) {
+ if (r instanceof AtomicIndexReader) {
// already an atomic reader
- return puller.pull(r, field);
- } else if (subs.length == 0) {
+ return puller.pull((AtomicIndexReader) r, field);
+ }
+ assert r instanceof CompositeIndexReader;
+ final IndexReader[] subs = ((CompositeIndexReader) r).getSequentialSubReaders();
+ if (subs.length == 0) {
// no fields
return null;
} else if (subs.length == 1) {
@@ -136,7 +138,7 @@ public class MultiDocValues extends DocValues {
new ReaderUtil.Gather(r) {
boolean stop = false;
@Override
- protected void add(int base, IndexReader r) throws IOException {
+ protected void add(int base, AtomicIndexReader r) throws IOException {
if (stop) {
return;
}
diff --git a/lucene/src/java/org/apache/lucene/index/MultiFields.java b/lucene/src/java/org/apache/lucene/index/MultiFields.java
index 748fc28d129..8b7ddaf2d08 100644
--- a/lucene/src/java/org/apache/lucene/index/MultiFields.java
+++ b/lucene/src/java/org/apache/lucene/index/MultiFields.java
@@ -59,59 +59,50 @@ public final class MultiFields extends Fields {
* Gather}) and iterate through them
* yourself. */
public static Fields getFields(IndexReader r) throws IOException {
- final IndexReader[] subs = r.getSequentialSubReaders();
- if (subs == null) {
+ if (r instanceof AtomicIndexReader) {
// already an atomic reader
- return r.fields();
- } else if (subs.length == 0) {
+ return ((AtomicIndexReader) r).fields();
+ }
+ assert r instanceof CompositeIndexReader;
+ final IndexReader[] subs = ((CompositeIndexReader) r).getSequentialSubReaders();
+ if (subs.length == 0) {
// no fields
return null;
- } else if (subs.length == 1) {
- return getFields(subs[0]);
} else {
+ final List fields = new ArrayList();
+ final List slices = new ArrayList();
- Fields currentFields = r.retrieveFields();
- if (currentFields == null) {
-
- final List fields = new ArrayList();
- final List slices = new ArrayList();
-
- new ReaderUtil.Gather(r) {
- @Override
- protected void add(int base, IndexReader r) throws IOException {
- final Fields f = r.fields();
- if (f != null) {
- fields.add(f);
- slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
- }
+ new ReaderUtil.Gather(r) {
+ @Override
+ protected void add(int base, AtomicIndexReader r) throws IOException {
+ final Fields f = r.fields();
+ if (f != null) {
+ fields.add(f);
+ slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
}
- }.run();
-
- if (fields.size() == 0) {
- return null;
- } else if (fields.size() == 1) {
- currentFields = fields.get(0);
- } else {
- currentFields = new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
- slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
}
- r.storeFields(currentFields);
+ }.run();
+
+ if (fields.isEmpty()) {
+ return null;
+ } else if (fields.size() == 1) {
+ return fields.get(0);
+ } else {
+ return new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
+ slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
}
- return currentFields;
}
}
public static Bits getLiveDocs(IndexReader r) {
- Bits result;
if (r.hasDeletions()) {
-
final List liveDocs = new ArrayList();
final List starts = new ArrayList();
try {
final int maxDoc = new ReaderUtil.Gather(r) {
@Override
- protected void add(int base, IndexReader r) throws IOException {
+ protected void add(int base, AtomicIndexReader r) throws IOException {
// record all liveDocs, even if they are null
liveDocs.add(r.getLiveDocs());
starts.add(base);
@@ -126,16 +117,13 @@ public final class MultiFields extends Fields {
assert liveDocs.size() > 0;
if (liveDocs.size() == 1) {
// Only one actual sub reader -- optimize this case
- result = liveDocs.get(0);
+ return liveDocs.get(0);
} else {
- result = new MultiBits(liveDocs, starts, true);
+ return new MultiBits(liveDocs, starts, true);
}
-
} else {
- result = null;
+ return null;
}
-
- return result;
}
/** This method may return null if the field does not exist.*/
diff --git a/lucene/src/java/org/apache/lucene/index/MultiReader.java b/lucene/src/java/org/apache/lucene/index/MultiReader.java
index 6211cf667cd..be781b4fbc2 100644
--- a/lucene/src/java/org/apache/lucene/index/MultiReader.java
+++ b/lucene/src/java/org/apache/lucene/index/MultiReader.java
@@ -60,50 +60,9 @@ public class MultiReader extends BaseMultiReader {
}
@Override
- protected synchronized IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
- ensureOpen();
-
- boolean changed = false;
- IndexReader[] newSubReaders = new IndexReader[subReaders.length];
-
- boolean success = false;
- try {
- for (int i = 0; i < subReaders.length; i++) {
- final IndexReader newSubReader = IndexReader.openIfChanged(subReaders[i]);
- if (newSubReader != null) {
- newSubReaders[i] = newSubReader;
- changed = true;
- } else {
- newSubReaders[i] = subReaders[i];
- }
- }
- success = true;
- } finally {
- if (!success && changed) {
- for (int i = 0; i < newSubReaders.length; i++) {
- if (newSubReaders[i] != subReaders[i]) {
- try {
- newSubReaders[i].close();
- } catch (IOException ignore) {
- // keep going - we want to clean up as much as possible
- }
- }
- }
- }
- }
-
- if (changed) {
- boolean[] newDecrefOnClose = new boolean[subReaders.length];
- for (int i = 0; i < subReaders.length; i++) {
- if (newSubReaders[i] == subReaders[i]) {
- newSubReaders[i].incRef();
- newDecrefOnClose[i] = true;
- }
- }
- return new MultiReader(newSubReaders, newDecrefOnClose);
- } else {
- return null;
- }
+ protected synchronized CompositeIndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
+ // nocommit: remove this method
+ return null;
}
@Override
@@ -128,7 +87,8 @@ public class MultiReader extends BaseMultiReader {
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
- if (!subReaders[i].isCurrent()) {
+ final IndexReader r = subReaders[i];
+ if (r instanceof CompositeIndexReader && !((CompositeIndexReader) r).isCurrent()) {
return false;
}
}
diff --git a/lucene/src/java/org/apache/lucene/index/ParallelReader.java b/lucene/src/java/org/apache/lucene/index/ParallelReader.java
index a9d67916f63..248d1ed6d55 100644
--- a/lucene/src/java/org/apache/lucene/index/ParallelReader.java
+++ b/lucene/src/java/org/apache/lucene/index/ParallelReader.java
@@ -25,7 +25,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil;
-/** An IndexReader which reads multiple, parallel indexes. Each index added
+/** An AtomicIndexReader which reads multiple, parallel indexes. Each index added
* must have the same number of documents, but typically each contains
* different fields. Each document contains the union of the fields of all
* documents with the same document number. When searching, matches for a
@@ -42,15 +42,15 @@ import org.apache.lucene.util.ReaderUtil;
* same order to the other indexes. Failure to do so will result in
* undefined behavior.
*/
-public class ParallelReader extends IndexReader {
- private List readers = new ArrayList();
+public class ParallelReader extends AtomicIndexReader {
+ private List readers = new ArrayList();
private List decrefOnClose = new ArrayList(); // remember which subreaders to decRef on close
boolean incRefReaders = false;
- private SortedMap fieldToReader = new TreeMap();
- private Map> readerToFields = new HashMap>();
- private List storedFieldReaders = new ArrayList();
+ private SortedMap fieldToReader = new TreeMap();
+ private Map> readerToFields = new HashMap>();
+ private List storedFieldReaders = new ArrayList();
private Map normsCache = new HashMap();
- private final ReaderContext topLevelReaderContext = new AtomicReaderContext(this);
+ private final AtomicReaderContext topLevelReaderContext = new AtomicReaderContext(this);
private int maxDoc;
private int numDocs;
private boolean hasDeletions;
@@ -77,7 +77,7 @@ public class ParallelReader extends IndexReader {
@Override
public String toString() {
final StringBuilder buffer = new StringBuilder("ParallelReader(");
- final Iterator iter = readers.iterator();
+ final Iterator iter = readers.iterator();
if (iter.hasNext()) {
buffer.append(iter.next());
}
@@ -88,25 +88,25 @@ public class ParallelReader extends IndexReader {
return buffer.toString();
}
- /** Add an IndexReader.
+ /** Add an AtomicIndexReader.
* @throws IOException if there is a low-level IO error
*/
- public void add(IndexReader reader) throws IOException {
+ public void add(AtomicIndexReader reader) throws IOException {
ensureOpen();
add(reader, false);
}
- /** Add an IndexReader whose stored fields will not be returned. This can
+ /** Add an AtomicIndexReader whose stored fields will not be returned. This can
* accelerate search when stored fields are only needed from a subset of
* the IndexReaders.
*
* @throws IllegalArgumentException if not all indexes contain the same number
* of documents
* @throws IllegalArgumentException if not all indexes have the same value
- * of {@link IndexReader#maxDoc()}
+ * of {@link AtomicIndexReader#maxDoc()}
* @throws IOException if there is a low-level IO error
*/
- public void add(IndexReader reader, boolean ignoreStoredFields)
+ public void add(AtomicIndexReader reader, boolean ignoreStoredFields)
throws IOException {
ensureOpen();
@@ -129,7 +129,7 @@ public class ParallelReader extends IndexReader {
if (fieldToReader.get(fieldInfo.name) == null) {
fieldInfos.add(fieldInfo);
fieldToReader.put(fieldInfo.name, reader);
- this.fields.addField(fieldInfo.name, MultiFields.getFields(reader).terms(fieldInfo.name));
+ this.fields.addField(fieldInfo.name, reader.terms(fieldInfo.name));
}
}
@@ -205,7 +205,7 @@ public class ParallelReader extends IndexReader {
@Override
public Bits getLiveDocs() {
ensureOpen();
- return MultiFields.getLiveDocs(readers.get(0));
+ return readers.get(0).getLiveDocs();
}
@Override
@@ -214,88 +214,6 @@ public class ParallelReader extends IndexReader {
return fields;
}
- /**
- * Tries to reopen the subreaders.
- *
- * If one or more subreaders could be re-opened (i. e. subReader.reopen()
- * returned a new instance != subReader), then a new ParallelReader instance
- * is returned, otherwise null is returned.
- *
- * A re-opened instance might share one or more subreaders with the old
- * instance. Index modification operations result in undefined behavior
- * when performed before the old instance is closed.
- * (see {@link IndexReader#openIfChanged}).
- *
- * If subreaders are shared, then the reference count of those
- * readers is increased to ensure that the subreaders remain open
- * until the last referring reader is closed.
- *
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
- */
- @Override
- protected synchronized IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
- ensureOpen();
-
- boolean reopened = false;
- List newReaders = new ArrayList();
-
- boolean success = false;
-
- try {
- for (final IndexReader oldReader : readers) {
- IndexReader newReader = null;
- newReader = IndexReader.openIfChanged(oldReader);
- if (newReader != null) {
- reopened = true;
- } else {
- newReader = oldReader;
- }
- newReaders.add(newReader);
- }
- success = true;
- } finally {
- if (!success && reopened) {
- for (int i = 0; i < newReaders.size(); i++) {
- IndexReader r = newReaders.get(i);
- if (r != readers.get(i)) {
- try {
- r.close();
- } catch (IOException ignore) {
- // keep going - we want to clean up as much as possible
- }
- }
- }
- }
- }
-
- if (reopened) {
- List newDecrefOnClose = new ArrayList();
- // TODO: maybe add a special reopen-ctor for norm-copying?
- ParallelReader pr = new ParallelReader();
- for (int i = 0; i < readers.size(); i++) {
- IndexReader oldReader = readers.get(i);
- IndexReader newReader = newReaders.get(i);
- if (newReader == oldReader) {
- newDecrefOnClose.add(Boolean.TRUE);
- newReader.incRef();
- } else {
- // this is a new subreader instance, so on close() we don't
- // decRef but close it
- newDecrefOnClose.add(Boolean.FALSE);
- }
- pr.add(newReader, !storedFieldReaders.contains(oldReader));
- }
- pr.decrefOnClose = newDecrefOnClose;
- pr.incRefReaders = incRefReaders;
- return pr;
- } else {
- // No subreader was refreshed
- return null;
- }
- }
-
-
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
@@ -317,7 +235,7 @@ public class ParallelReader extends IndexReader {
@Override
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
ensureOpen();
- for (final IndexReader reader: storedFieldReaders) {
+ for (final AtomicIndexReader reader: storedFieldReaders) {
reader.document(docID, visitor);
}
}
@@ -327,7 +245,7 @@ public class ParallelReader extends IndexReader {
public Fields getTermVectors(int docID) throws IOException {
ensureOpen();
ParallelFields fields = new ParallelFields();
- for (Map.Entry ent : fieldToReader.entrySet()) {
+ for (Map.Entry ent : fieldToReader.entrySet()) {
String fieldName = ent.getKey();
Terms vector = ent.getValue().getTermVector(docID, fieldName);
if (vector != null) {
@@ -341,44 +259,20 @@ public class ParallelReader extends IndexReader {
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
- IndexReader reader = fieldToReader.get(field);
+ AtomicIndexReader reader = fieldToReader.get(field);
return reader==null ? false : reader.hasNorms(field);
}
@Override
public int docFreq(String field, BytesRef term) throws IOException {
ensureOpen();
- IndexReader reader = fieldToReader.get(field);
+ AtomicIndexReader reader = fieldToReader.get(field);
return reader == null? 0 : reader.docFreq(field, term);
}
- /**
- * Checks recursively if all subreaders are up to date.
- */
- @Override
- public boolean isCurrent() throws CorruptIndexException, IOException {
- ensureOpen();
- for (final IndexReader reader : readers) {
- if (!reader.isCurrent()) {
- return false;
- }
- }
-
- // all subreaders are up to date
- return true;
- }
-
- /** Not implemented.
- * @throws UnsupportedOperationException
- */
- @Override
- public long getVersion() {
- throw new UnsupportedOperationException("ParallelReader does not support this method.");
- }
-
// for testing
- IndexReader[] getSubReaders() {
- return readers.toArray(new IndexReader[readers.size()]);
+ AtomicIndexReader[] getSubReaders() {
+ return readers.toArray(new AtomicIndexReader[readers.size()]);
}
@Override
@@ -393,7 +287,7 @@ public class ParallelReader extends IndexReader {
}
@Override
- public ReaderContext getTopReaderContext() {
+ public AtomicReaderContext getTopReaderContext() {
ensureOpen();
return topLevelReaderContext;
}
@@ -401,8 +295,8 @@ public class ParallelReader extends IndexReader {
// TODO: I suspect this is completely untested!!!!!
@Override
public DocValues docValues(String field) throws IOException {
- IndexReader reader = fieldToReader.get(field);
- return reader == null ? null : MultiDocValues.getDocValues(reader, field);
+ AtomicIndexReader reader = fieldToReader.get(field);
+ return reader == null ? null : reader.docValues(field);
}
// TODO: I suspect this is completely untested!!!!!
@@ -410,8 +304,8 @@ public class ParallelReader extends IndexReader {
public synchronized DocValues normValues(String field) throws IOException {
DocValues values = normsCache.get(field);
if (values == null) {
- IndexReader reader = fieldToReader.get(field);
- values = reader == null ? null : MultiDocValues.getNormDocValues(reader, field);
+ AtomicIndexReader reader = fieldToReader.get(field);
+ values = reader == null ? null : reader.normValues(field);
normsCache.put(field, values);
}
return values;
diff --git a/lucene/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
index 044ac3ebf58..ba6f9128ec9 100644
--- a/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
+++ b/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
@@ -76,7 +76,7 @@ final class SegmentMerger {
try {
new ReaderUtil.Gather(reader) {
@Override
- protected void add(int base, IndexReader r) {
+ protected void add(int base, AtomicIndexReader r) {
mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(r, r.getLiveDocs()));
}
}.run();
@@ -205,7 +205,7 @@ final class SegmentMerger {
Map normValuesTypes = new HashMap();
for (MergeState.IndexReaderAndLiveDocs readerAndLiveDocs : mergeState.readers) {
- final IndexReader reader = readerAndLiveDocs.reader;
+ final AtomicIndexReader reader = readerAndLiveDocs.reader;
FieldInfos readerFieldInfos = reader.getFieldInfos();
for (FieldInfo fi : readerFieldInfos) {
FieldInfo merged = mergeState.fieldInfos.add(fi);
diff --git a/lucene/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/src/java/org/apache/lucene/index/SegmentReader.java
index 7bda25bbba8..32b42d4fe61 100644
--- a/lucene/src/java/org/apache/lucene/index/SegmentReader.java
+++ b/lucene/src/java/org/apache/lucene/index/SegmentReader.java
@@ -31,10 +31,10 @@ import org.apache.lucene.util.Bits;
/**
* @lucene.experimental
*/
-public final class SegmentReader extends IndexReader {
+public final class SegmentReader extends AtomicIndexReader {
private final SegmentInfo si;
- private final ReaderContext readerContext = new AtomicReaderContext(this);
+ private final AtomicReaderContext readerContext = new AtomicReaderContext(this);
private final BitVector liveDocs;
@@ -230,7 +230,7 @@ public final class SegmentReader extends IndexReader {
}
@Override
- public ReaderContext getTopReaderContext() {
+ public AtomicReaderContext getTopReaderContext() {
ensureOpen();
return readerContext;
}
diff --git a/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java b/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java
index 8fe9b9dfdfc..d95ce0ac635 100644
--- a/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java
+++ b/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java
@@ -50,14 +50,20 @@ import org.apache.lucene.index.MultiReader; // javadoc
* yourself.
*/
-public final class SlowMultiReaderWrapper extends FilterIndexReader {
+public final class SlowMultiReaderWrapper extends AtomicIndexReader {
- private final ReaderContext readerContext;
+ private final CompositeIndexReader in;
+ private final AtomicReaderContext readerContext;
private final Map normsCache = new HashMap();
+ private final Fields fields;
+ private final Bits liveDocs;
- public SlowMultiReaderWrapper(IndexReader other) {
- super(other);
- readerContext = new AtomicReaderContext(this); // emulate atomic reader!
+ public SlowMultiReaderWrapper(CompositeIndexReader other) throws IOException {
+ super();
+ in = other;
+ readerContext = new AtomicReaderContext(this);
+ fields = MultiFields.getFields(in);
+ liveDocs = MultiFields.getLiveDocs(in);
}
@Override
@@ -68,7 +74,7 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
@Override
public Fields fields() throws IOException {
ensureOpen();
- return MultiFields.getFields(in);
+ return fields;
}
@Override
@@ -87,25 +93,59 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
}
return values;
}
+
+ @Override
+ public Fields getTermVectors(int docID)
+ throws IOException {
+ ensureOpen();
+ return in.getTermVectors(docID);
+ }
+
+ @Override
+ public int numDocs() {
+ // Don't call ensureOpen() here (it could affect performance)
+ return in.numDocs();
+ }
+
+ @Override
+ public int maxDoc() {
+ // Don't call ensureOpen() here (it could affect performance)
+ return in.maxDoc();
+ }
+
+ @Override
+ public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
+ ensureOpen();
+ in.document(docID, visitor);
+ }
+
@Override
public Bits getLiveDocs() {
ensureOpen();
- return MultiFields.getLiveDocs(in);
+ return liveDocs;
}
@Override
- public IndexReader[] getSequentialSubReaders() {
- return null;
- }
-
- @Override
- public ReaderContext getTopReaderContext() {
+ public AtomicReaderContext getTopReaderContext() {
ensureOpen();
return readerContext;
}
@Override
public FieldInfos getFieldInfos() {
+ ensureOpen();
return ReaderUtil.getMergedFieldInfos(in);
}
+
+ @Override
+ public boolean hasDeletions() {
+ ensureOpen();
+ return liveDocs != null;
+ }
+
+ @Override
+ protected void doClose() throws IOException {
+ // TODO: as this is a wrapper, should we really close the delegate?
+ in.close();
+ }
}
diff --git a/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java b/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java
index d5d6dc73a65..b61cd366481 100644
--- a/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java
+++ b/lucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java
@@ -22,7 +22,7 @@ import java.util.Collections;
import java.util.Map;
import java.util.WeakHashMap;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.Bits;
@@ -76,7 +76,7 @@ public class CachingWrapperFilter extends Filter {
* returns true, else it copies the {@link DocIdSetIterator} into
* a {@link FixedBitSet}.
*/
- protected DocIdSet docIdSetToCache(DocIdSet docIdSet, IndexReader reader) throws IOException {
+ protected DocIdSet docIdSetToCache(DocIdSet docIdSet, AtomicIndexReader reader) throws IOException {
if (docIdSet == null) {
// this is better than returning null, as the nonnull result can be cached
return DocIdSet.EMPTY_DOCIDSET;
@@ -102,7 +102,7 @@ public class CachingWrapperFilter extends Filter {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
- final IndexReader reader = context.reader;
+ final AtomicIndexReader reader = context.reader;
// Only cache if incoming acceptDocs is == live docs;
// if Lucene passes in more interesting acceptDocs in
diff --git a/lucene/src/java/org/apache/lucene/search/FieldCache.java b/lucene/src/java/org/apache/lucene/search/FieldCache.java
index 1c3bff1af07..882e7905797 100644
--- a/lucene/src/java/org/apache/lucene/search/FieldCache.java
+++ b/lucene/src/java/org/apache/lucene/search/FieldCache.java
@@ -24,7 +24,7 @@ import java.text.DecimalFormat;
import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
import org.apache.lucene.document.NumericField; // for javadocs
import org.apache.lucene.index.DocTermOrds;
-import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
@@ -63,7 +63,7 @@ public interface FieldCache {
}
/** Interface to parse bytes from document fields.
- * @see FieldCache#getBytes(IndexReader, String, FieldCache.ByteParser, boolean)
+ * @see FieldCache#getBytes(AtomicIndexReader, String, FieldCache.ByteParser, boolean)
*/
public interface ByteParser extends Parser {
/** Return a single Byte representation of this field's value. */
@@ -71,7 +71,7 @@ public interface FieldCache {
}
/** Interface to parse shorts from document fields.
- * @see FieldCache#getShorts(IndexReader, String, FieldCache.ShortParser, boolean)
+ * @see FieldCache#getShorts(AtomicIndexReader, String, FieldCache.ShortParser, boolean)
*/
public interface ShortParser extends Parser {
/** Return a short representation of this field's value. */
@@ -79,7 +79,7 @@ public interface FieldCache {
}
/** Interface to parse ints from document fields.
- * @see FieldCache#getInts(IndexReader, String, FieldCache.IntParser, boolean)
+ * @see FieldCache#getInts(AtomicIndexReader, String, FieldCache.IntParser, boolean)
*/
public interface IntParser extends Parser {
/** Return an integer representation of this field's value. */
@@ -87,7 +87,7 @@ public interface FieldCache {
}
/** Interface to parse floats from document fields.
- * @see FieldCache#getFloats(IndexReader, String, FieldCache.FloatParser, boolean)
+ * @see FieldCache#getFloats(AtomicIndexReader, String, FieldCache.FloatParser, boolean)
*/
public interface FloatParser extends Parser {
/** Return an float representation of this field's value. */
@@ -95,7 +95,7 @@ public interface FieldCache {
}
/** Interface to parse long from document fields.
- * @see FieldCache#getLongs(IndexReader, String, FieldCache.LongParser, boolean)
+ * @see FieldCache#getLongs(AtomicIndexReader, String, FieldCache.LongParser, boolean)
*/
public interface LongParser extends Parser {
/** Return an long representation of this field's value. */
@@ -103,7 +103,7 @@ public interface FieldCache {
}
/** Interface to parse doubles from document fields.
- * @see FieldCache#getDoubles(IndexReader, String, FieldCache.DoubleParser, boolean)
+ * @see FieldCache#getDoubles(AtomicIndexReader, String, FieldCache.DoubleParser, boolean)
*/
public interface DoubleParser extends Parser {
/** Return an long representation of this field's value. */
@@ -303,7 +303,7 @@ public interface FieldCache {
* reader.maxDoc(), with turned on bits for each docid that
* does have a value for this field.
*/
- public Bits getDocsWithField(IndexReader reader, String field)
+ public Bits getDocsWithField(AtomicIndexReader reader, String field)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is
@@ -317,7 +317,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public byte[] getBytes (IndexReader reader, String field, boolean setDocsWithField)
+ public byte[] getBytes (AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is found,
@@ -332,7 +332,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public byte[] getBytes (IndexReader reader, String field, ByteParser parser, boolean setDocsWithField)
+ public byte[] getBytes (AtomicIndexReader reader, String field, ByteParser parser, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is
@@ -346,7 +346,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public short[] getShorts (IndexReader reader, String field, boolean setDocsWithField)
+ public short[] getShorts (AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is found,
@@ -361,7 +361,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public short[] getShorts (IndexReader reader, String field, ShortParser parser, boolean setDocsWithField)
+ public short[] getShorts (AtomicIndexReader reader, String field, ShortParser parser, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is
@@ -375,7 +375,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public int[] getInts (IndexReader reader, String field, boolean setDocsWithField)
+ public int[] getInts (AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is found,
@@ -390,7 +390,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public int[] getInts (IndexReader reader, String field, IntParser parser, boolean setDocsWithField)
+ public int[] getInts (AtomicIndexReader reader, String field, IntParser parser, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if
@@ -404,7 +404,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public float[] getFloats (IndexReader reader, String field, boolean setDocsWithField)
+ public float[] getFloats (AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if
@@ -419,7 +419,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public float[] getFloats (IndexReader reader, String field,
+ public float[] getFloats (AtomicIndexReader reader, String field,
FloatParser parser, boolean setDocsWithField) throws IOException;
/**
@@ -435,7 +435,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws java.io.IOException If any error occurs.
*/
- public long[] getLongs(IndexReader reader, String field, boolean setDocsWithField)
+ public long[] getLongs(AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/**
@@ -452,7 +452,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public long[] getLongs(IndexReader reader, String field, LongParser parser, boolean setDocsWithField)
+ public long[] getLongs(AtomicIndexReader reader, String field, LongParser parser, boolean setDocsWithField)
throws IOException;
/**
@@ -468,7 +468,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public double[] getDoubles(IndexReader reader, String field, boolean setDocsWithField)
+ public double[] getDoubles(AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/**
@@ -485,7 +485,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public double[] getDoubles(IndexReader reader, String field, DoubleParser parser, boolean setDocsWithField)
+ public double[] getDoubles(AtomicIndexReader reader, String field, DoubleParser parser, boolean setDocsWithField)
throws IOException;
/** Returned by {@link #getTerms} */
@@ -513,15 +513,15 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public DocTerms getTerms (IndexReader reader, String field)
+ public DocTerms getTerms (AtomicIndexReader reader, String field)
throws IOException;
- /** Expert: just like {@link #getTerms(IndexReader,String)},
+ /** Expert: just like {@link #getTerms(AtomicIndexReader,String)},
* but you can specify whether more RAM should be consumed in exchange for
* faster lookups (default is "true"). Note that the
* first call for a given reader and field "wins",
* subsequent calls will share the same cache entry. */
- public DocTerms getTerms (IndexReader reader, String field, boolean fasterButMoreRAM)
+ public DocTerms getTerms (AtomicIndexReader reader, String field, boolean fasterButMoreRAM)
throws IOException;
/** Returned by {@link #getTermsIndex} */
@@ -589,16 +589,16 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
- public DocTermsIndex getTermsIndex (IndexReader reader, String field)
+ public DocTermsIndex getTermsIndex (AtomicIndexReader reader, String field)
throws IOException;
/** Expert: just like {@link
- * #getTermsIndex(IndexReader,String)}, but you can specify
+ * #getTermsIndex(AtomicIndexReader,String)}, but you can specify
* whether more RAM should be consumed in exchange for
* faster lookups (default is "true"). Note that the
* first call for a given reader and field "wins",
* subsequent calls will share the same cache entry. */
- public DocTermsIndex getTermsIndex (IndexReader reader, String field, boolean fasterButMoreRAM)
+ public DocTermsIndex getTermsIndex (AtomicIndexReader reader, String field, boolean fasterButMoreRAM)
throws IOException;
/**
@@ -611,7 +611,7 @@ public interface FieldCache {
* @return a {@link DocTermOrds} instance
* @throws IOException If any error occurs.
*/
- public DocTermOrds getDocTermOrds(IndexReader reader, String field) throws IOException;
+ public DocTermOrds getDocTermOrds(AtomicIndexReader reader, String field) throws IOException;
/**
* EXPERT: A unique Identifier/Description for each item in the FieldCache.
@@ -677,7 +677,7 @@ public interface FieldCache {
* currently in the FieldCache.
*
* NOTE: These CacheEntry objects maintain a strong reference to the
- * Cached Values. Maintaining references to a CacheEntry the IndexReader
+ * Cached Values. Maintaining references to a CacheEntry the AtomicIndexReader
* associated with it has garbage collected will prevent the Value itself
* from being garbage collected when the Cache drops the WeakReference.
*
@@ -705,7 +705,7 @@ public interface FieldCache {
* top-level reader, it usually will have no effect as
* Lucene now caches at the segment reader level.
*/
- public abstract void purge(IndexReader r);
+ public abstract void purge(AtomicIndexReader r);
/**
* If non-null, FieldCacheImpl will warn whenever
diff --git a/lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java b/lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java
index 61f27c48e7d..0d08b5c097b 100644
--- a/lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java
+++ b/lucene/src/java/org/apache/lucene/search/FieldCacheImpl.java
@@ -29,6 +29,7 @@ import java.util.WeakHashMap;
import org.apache.lucene.index.DocTermOrds;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.OrdTermState;
import org.apache.lucene.index.SegmentReader;
@@ -48,8 +49,6 @@ import org.apache.lucene.util.packed.PackedInts;
* Expert: The default cache implementation, storing all values in memory.
* A WeakHashMap is used for storage.
*
- *
Created: May 19, 2004 4:40:36 PM
- *
* @since lucene 1.4
*/
class FieldCacheImpl implements FieldCache {
@@ -76,7 +75,7 @@ class FieldCacheImpl implements FieldCache {
init();
}
- public synchronized void purge(IndexReader r) {
+ public synchronized void purge(AtomicIndexReader r) {
for(Cache c : caches.values()) {
c.purge(r);
}
@@ -155,24 +154,23 @@ class FieldCacheImpl implements FieldCache {
};
// composite/SlowMultiReaderWrapper fieldcaches don't purge until composite reader is closed.
- final IndexReader.ReaderClosedListener purgeReader = new IndexReader.ReaderClosedListener() {
+ final AtomicIndexReader.ReaderClosedListener purgeReader = new AtomicIndexReader.ReaderClosedListener() {
@Override
public void onClose(IndexReader owner) {
- FieldCacheImpl.this.purge(owner);
+ assert owner instanceof AtomicIndexReader;
+ FieldCacheImpl.this.purge((AtomicIndexReader) owner);
}
};
- private void initReader(IndexReader reader) {
+ private void initReader(AtomicIndexReader reader) {
if (reader instanceof SegmentReader) {
((SegmentReader) reader).addCoreClosedListener(purgeCore);
- } else if (reader.getSequentialSubReaders() != null) {
- throw new UnsupportedOperationException("Please use SlowMultiReaderWrapper, if you really need a top level FieldCache");
} else {
// we have a slow reader of some sort, try to register a purge event
// rather than relying on gc:
Object key = reader.getCoreCacheKey();
- if (key instanceof IndexReader) {
- ((IndexReader)key).addReaderClosedListener(purgeReader);
+ if (key instanceof AtomicIndexReader) {
+ ((AtomicIndexReader)key).addReaderClosedListener(purgeReader);
} else {
// last chance
reader.addReaderClosedListener(purgeReader);
@@ -191,11 +189,11 @@ class FieldCacheImpl implements FieldCache {
final Map