LUCENE-2858: First steps, still incomplete. No tests pass or even compile, but core code now compiles. See issue!

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene2858@1234441 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2012-01-21 23:22:50 +00:00
parent 0e64939f81
commit 42b2c86116
33 changed files with 879 additions and 1013 deletions

View File

@ -20,7 +20,7 @@ import java.io.IOException;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.DocValues.Type;
@ -65,13 +65,13 @@ public abstract class PerDocConsumer implements Closeable {
/**
* Returns a {@link DocValues} instance for merging from the given reader for the given
* {@link FieldInfo}. This method is used for merging and uses
* {@link IndexReader#docValues(String)} by default.
* {@link AtomicIndexReader#docValues(String)} by default.
* <p>
* To enable {@link DocValues} merging for different {@link DocValues} than
* the default override this method accordingly.
* <p>
*/
protected DocValues getDocValuesForMerge(IndexReader reader, FieldInfo info) throws IOException {
protected DocValues getDocValuesForMerge(AtomicIndexReader reader, FieldInfo info) throws IOException {
return reader.docValues(info.name);
}

View File

@ -26,7 +26,7 @@ import org.apache.lucene.index.DocValues.Type;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.index.SegmentReadState;
@ -95,7 +95,7 @@ public class Lucene40NormsFormat extends NormsFormat {
}
@Override
protected DocValues getDocValuesForMerge(IndexReader reader, FieldInfo info)
protected DocValues getDocValuesForMerge(AtomicIndexReader reader, FieldInfo info)
throws IOException {
return reader.normValues(info.name);
}

View File

@ -28,7 +28,7 @@ import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.store.Directory;
@ -87,7 +87,7 @@ public class SimpleTextNormsConsumer extends PerDocConsumer {
}
@Override
protected DocValues getDocValuesForMerge(IndexReader reader, FieldInfo info)
protected DocValues getDocValuesForMerge(AtomicIndexReader reader, FieldInfo info)
throws IOException {
return reader.normValues(info.name);
}

View File

@ -0,0 +1,291 @@
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DocumentStoredFieldVisitor;
import org.apache.lucene.search.SearcherManager; // javadocs
import org.apache.lucene.store.*;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil; // for javadocs
/** IndexReader is an abstract class, providing an interface for accessing an
index. Search of an index is done entirely through this abstract interface,
so that any subclass which implements it is searchable.
<p> Concrete subclasses of IndexReader are usually constructed with a call to
one of the static <code>open()</code> methods, e.g. {@link
#open(Directory)}.
<p> For efficiency, in this API documents are often referred to via
<i>document numbers</i>, non-negative integers which each name a unique
document in the index. These document numbers are ephemeral--they may change
as documents are added to and deleted from an index. Clients should thus not
rely on a given document having the same number between sessions.
<p>
<b>NOTE</b>: for backwards API compatibility, several methods are not listed
as abstract, but have no useful implementations in this base class and
instead always throw UnsupportedOperationException. Subclasses are
strongly encouraged to override these methods, but in many cases may not
need to.
</p>
<p>
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
IndexReader} instances are completely thread
safe, meaning multiple threads can call any of its methods,
concurrently. If your application requires external
synchronization, you should <b>not</b> synchronize on the
<code>IndexReader</code> instance; use your own
(non-Lucene) objects instead.
*/
public abstract class AtomicIndexReader extends IndexReader {
protected AtomicIndexReader() {
super();
}
@Override
public abstract AtomicReaderContext getTopReaderContext();
/** Returns true if there are norms stored for this field. */
public boolean hasNorms(String field) throws IOException {
// backward compatible implementation.
// SegmentReader has an efficient implementation.
ensureOpen();
return normValues(field) != null;
}
/**
* Returns {@link Fields} for this reader.
* This method may return null if the reader has no
* postings.
*
* <p><b>NOTE</b>: if this is a multi reader ({@link
* #getSequentialSubReaders} is not null) then this
* method will throw UnsupportedOperationException. If
* you really need a {@link Fields} for such a reader,
* use {@link MultiFields#getFields}. However, for
* performance reasons, it's best to get all sub-readers
* using {@link ReaderUtil#gatherSubReaders} and iterate
* through them yourself. */
public abstract Fields fields() throws IOException;
public final int docFreq(Term term) throws IOException {
return docFreq(term.field(), term.bytes());
}
/** Returns the number of documents containing the term
* <code>t</code>. This method returns 0 if the term or
* field does not exists. This method does not take into
* account deleted documents that have not yet been merged
* away. */
public int docFreq(String field, BytesRef term) throws IOException {
final Fields fields = fields();
if (fields == null) {
return 0;
}
final Terms terms = fields.terms(field);
if (terms == null) {
return 0;
}
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term, true)) {
return termsEnum.docFreq();
} else {
return 0;
}
}
/** Returns the number of documents containing the term
* <code>t</code>. This method returns 0 if the term or
* field does not exists. This method does not take into
* account deleted documents that have not yet been merged
* away. */
public final long totalTermFreq(String field, BytesRef term) throws IOException {
final Fields fields = fields();
if (fields == null) {
return 0;
}
final Terms terms = fields.terms(field);
if (terms == null) {
return 0;
}
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term, true)) {
return termsEnum.totalTermFreq();
} else {
return 0;
}
}
/** This may return null if the field does not exist.*/
public final Terms terms(String field) throws IOException {
final Fields fields = fields();
if (fields == null) {
return null;
}
return fields.terms(field);
}
/** Returns {@link DocsEnum} for the specified field &
* term. This may return null, if either the field or
* term does not exist. */
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, boolean needsFreqs) throws IOException {
assert field != null;
assert term != null;
final Fields fields = fields();
if (fields != null) {
final Terms terms = fields.terms(field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term, true)) {
return termsEnum.docs(liveDocs, null, needsFreqs);
}
}
}
return null;
}
/** Returns {@link DocsAndPositionsEnum} for the specified
* field & term. This may return null, if either the
* field or term does not exist, or needsOffsets is
* true but offsets were not indexed for this field. */
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, boolean needsOffsets) throws IOException {
assert field != null;
assert term != null;
final Fields fields = fields();
if (fields != null) {
final Terms terms = fields.terms(field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term, true)) {
return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
}
}
}
return null;
}
/**
* Returns {@link DocsEnum} for the specified field and
* {@link TermState}. This may return null, if either the field or the term
* does not exists or the {@link TermState} is invalid for the underlying
* implementation.*/
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsFreqs) throws IOException {
assert state != null;
assert field != null;
final Fields fields = fields();
if (fields != null) {
final Terms terms = fields.terms(field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
termsEnum.seekExact(term, state);
return termsEnum.docs(liveDocs, null, needsFreqs);
}
}
return null;
}
/**
* Returns {@link DocsAndPositionsEnum} for the specified field and
* {@link TermState}. This may return null, if either the field or the term
* does not exists, the {@link TermState} is invalid for the underlying
* implementation, or needsOffsets is true but offsets
* were not indexed for this field. */
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsOffsets) throws IOException {
assert state != null;
assert field != null;
final Fields fields = fields();
if (fields != null) {
final Terms terms = fields.terms(field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
termsEnum.seekExact(term, state);
return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
}
}
return null;
}
/** Returns the number of unique terms (across all fields)
* in this reader.
*
* @return number of unique terms or -1 if this count
* cannot be easily determined (eg Multi*Readers).
* Instead, you should call {@link
* #getSequentialSubReaders} and ask each sub reader for
* its unique term count. */
public final long getUniqueTermCount() throws IOException {
final Fields fields = fields();
if (fields == null) {
return 0;
}
return fields.getUniqueTermCount();
}
/**
* Returns {@link DocValues} for this field.
* This method may return null if the reader has no per-document
* values stored.
*
* <p><b>NOTE</b>: if this is a multi reader ({@link
* #getSequentialSubReaders} is not null) then this
* method will throw UnsupportedOperationException. If
* you really need {@link DocValues} for such a reader,
* use {@link MultiDocValues#getDocValues(IndexReader,String)}. However, for
* performance reasons, it's best to get all sub-readers
* using {@link ReaderUtil#gatherSubReaders} and iterate
* through them yourself. */
public abstract DocValues docValues(String field) throws IOException;
public abstract DocValues normValues(String field) throws IOException;
/**
* Get the {@link FieldInfos} describing all fields in
* this reader. NOTE: do not make any changes to the
* returned FieldInfos!
*
* @lucene.experimental
*/
public abstract FieldInfos getFieldInfos();
/** Returns the {@link Bits} representing live (not
* deleted) docs. A set bit indicates the doc ID has not
* been deleted. If this method returns null it means
* there are no deleted documents (all documents are
* live).
*
* The returned instance has been safely published for
* use by multiple threads without additional
* synchronization.
*/
public abstract Bits getLiveDocs();
}

View File

@ -23,10 +23,10 @@ import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil;
abstract class BaseMultiReader<R extends IndexReader> extends IndexReader {
abstract class BaseMultiReader<R extends IndexReader> extends CompositeIndexReader {
protected final R[] subReaders;
protected final int[] starts; // 1st docno for each segment
private final ReaderContext topLevelContext;
private final CompositeReaderContext topLevelContext;
private final int maxDoc;
private final int numDocs;
private final boolean hasDeletions;
@ -49,26 +49,11 @@ abstract class BaseMultiReader<R extends IndexReader> extends IndexReader {
this.maxDoc = maxDoc;
this.numDocs = numDocs;
this.hasDeletions = hasDeletions;
topLevelContext = ReaderUtil.buildReaderContext(this);
}
@Override
public FieldInfos getFieldInfos() {
throw new UnsupportedOperationException("call getFieldInfos() on each sub reader, or use ReaderUtil.getMergedFieldInfos, instead");
topLevelContext = (CompositeReaderContext) ReaderUtil.buildReaderContext(this);
}
@Override
public Fields fields() throws IOException {
throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
}
@Override
protected abstract IndexReader doOpenIfChanged() throws CorruptIndexException, IOException;
@Override
public Bits getLiveDocs() {
throw new UnsupportedOperationException("please use MultiFields.getLiveDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits liveDocs");
}
protected abstract CompositeIndexReader doOpenIfChanged() throws CorruptIndexException, IOException;
@Override
public Fields getTermVectors(int docID) throws IOException {
@ -109,43 +94,14 @@ abstract class BaseMultiReader<R extends IndexReader> extends IndexReader {
}
return ReaderUtil.subIndex(docID, this.starts);
}
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
return false;
}
@Override
public int docFreq(String field, BytesRef t) throws IOException {
ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++) {
total += subReaders[i].docFreq(field, t);
}
return total;
}
@Override
public IndexReader[] getSequentialSubReaders() {
return subReaders;
}
@Override
public ReaderContext getTopReaderContext() {
public CompositeReaderContext getTopReaderContext() {
return topLevelContext;
}
@Override
public DocValues docValues(String field) throws IOException {
throw new UnsupportedOperationException("please use MultiDocValues#getDocValues, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level DocValues");
}
@Override
public DocValues normValues(String field) throws IOException {
throw new UnsupportedOperationException("please use MultiDocValues#getNormValues, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Norm DocValues ");
}
}

View File

@ -435,14 +435,14 @@ class BufferedDeletesStream {
}
// Delete by query
private static long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, IndexWriter.ReadersAndLiveDocs rld, SegmentReader reader) throws IOException {
private static long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, IndexWriter.ReadersAndLiveDocs rld, final SegmentReader reader) throws IOException {
long delCount = 0;
final AtomicReaderContext readerContext = (AtomicReaderContext) reader.getTopReaderContext();
final AtomicReaderContext readerContext = reader.getTopReaderContext();
boolean any = false;
for (QueryAndLimit ent : queriesIter) {
Query query = ent.query;
int limit = ent.limit;
final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext, readerContext.reader.getLiveDocs());
final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext, reader.getLiveDocs());
if (docs != null) {
final DocIdSetIterator it = docs.iterator();
if (it != null) {

View File

@ -0,0 +1,267 @@
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DocumentStoredFieldVisitor;
import org.apache.lucene.search.SearcherManager; // javadocs
import org.apache.lucene.store.*;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil; // for javadocs
/** IndexReader is an abstract class, providing an interface for accessing an
index. Search of an index is done entirely through this abstract interface,
so that any subclass which implements it is searchable.
<p> Concrete subclasses of IndexReader are usually constructed with a call to
one of the static <code>open()</code> methods, e.g. {@link
#open(Directory)}.
<p> For efficiency, in this API documents are often referred to via
<i>document numbers</i>, non-negative integers which each name a unique
document in the index. These document numbers are ephemeral--they may change
as documents are added to and deleted from an index. Clients should thus not
rely on a given document having the same number between sessions.
<p>
<b>NOTE</b>: for backwards API compatibility, several methods are not listed
as abstract, but have no useful implementations in this base class and
instead always throw UnsupportedOperationException. Subclasses are
strongly encouraged to override these methods, but in many cases may not
need to.
</p>
<p>
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
IndexReader} instances are completely thread
safe, meaning multiple threads can call any of its methods,
concurrently. If your application requires external
synchronization, you should <b>not</b> synchronize on the
<code>IndexReader</code> instance; use your own
(non-Lucene) objects instead.
*/
public abstract class CompositeIndexReader extends IndexReader {
protected CompositeIndexReader() {
super();
}
@Override
public String toString() {
final StringBuilder buffer = new StringBuilder();
buffer.append(getClass().getSimpleName());
buffer.append('(');
final IndexReader[] subReaders = getSequentialSubReaders();
if ((subReaders != null) && (subReaders.length > 0)) {
buffer.append(subReaders[0]);
for (int i = 1; i < subReaders.length; ++i) {
buffer.append(" ").append(subReaders[i]);
}
}
buffer.append(')');
return buffer.toString();
}
@Override
public abstract CompositeReaderContext getTopReaderContext();
/**
* If the index has changed since it was opened, open and return a new reader;
* else, return {@code null}.
*
* @see #openIfChanged(IndexReader)
*/
protected CompositeIndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not support reopen().");
}
/**
* If the index has changed since it was opened, open and return a new reader;
* else, return {@code null}.
*
* @see #openIfChanged(IndexReader, IndexCommit)
*/
protected CompositeIndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not support reopen(IndexCommit).");
}
/**
* If the index has changed since it was opened, open and return a new reader;
* else, return {@code null}.
*
* @see #openIfChanged(IndexReader, IndexWriter, boolean)
*/
protected CompositeIndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
return writer.getReader(applyAllDeletes);
}
/**
* Version number when this IndexReader was opened. Not
* implemented in the IndexReader base class.
*
* <p>If this reader is based on a Directory (ie, was
* created by calling {@link #open}, or {@link #openIfChanged} on
* a reader based on a Directory), then this method
* returns the version recorded in the commit that the
* reader opened. This version is advanced every time
* {@link IndexWriter#commit} is called.</p>
*
* <p>If instead this reader is a near real-time reader
* (ie, obtained by a call to {@link
* IndexWriter#getReader}, or by calling {@link #openIfChanged}
* on a near real-time reader), then this method returns
* the version of the last commit done by the writer.
* Note that even as further changes are made with the
* writer, the version will not changed until a commit is
* completed. Thus, you should not rely on this method to
* determine when a near real-time reader should be
* opened. Use {@link #isCurrent} instead.</p>
*/
public abstract long getVersion();
/**
* Check whether any new changes have occurred to the
* index since this reader was opened.
*
* <p>If this reader is based on a Directory (ie, was
* created by calling {@link #open}, or {@link #openIfChanged} on
* a reader based on a Directory), then this method checks
* if any further commits (see {@link IndexWriter#commit}
* have occurred in that directory).</p>
*
* <p>If instead this reader is a near real-time reader
* (ie, obtained by a call to {@link
* IndexWriter#getReader}, or by calling {@link #openIfChanged}
* on a near real-time reader), then this method checks if
* either a new commit has occurred, or any new
* uncommitted changes have taken place via the writer.
* Note that even if the writer has only performed
* merging, this method will still return false.</p>
*
* <p>In any event, if this returns false, you should call
* {@link #openIfChanged} to get a new reader that sees the
* changes.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @throws UnsupportedOperationException unless overridden in subclass
*/
public abstract boolean isCurrent() throws CorruptIndexException, IOException;
/**
* Returns the time the index in the named directory was last modified.
* Do not use this to check whether the reader is still up-to-date, use
* {@link #isCurrent()} instead.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static long lastModified(final Directory directory) throws CorruptIndexException, IOException {
return ((Long) new SegmentInfos.FindSegmentsFile(directory) {
@Override
public Object doBody(String segmentFileName) throws IOException {
return Long.valueOf(directory.fileModified(segmentFileName));
}
}.run()).longValue();
}
/**
* Reads version number from segments files. The version number is
* initialized with a timestamp and then increased by one for each change of
* the index.
*
* @param directory where the index resides.
* @return version number.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static long getCurrentVersion(Directory directory) throws CorruptIndexException, IOException {
return SegmentInfos.readCurrentVersion(directory);
}
/**
* Reads commitUserData, previously passed to {@link
* IndexWriter#commit(Map)}, from current index
* segments file. This will return null if {@link
* IndexWriter#commit(Map)} has never been called for
* this index.
*
* @param directory where the index resides.
* @return commit userData.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*
* @see #getCommitUserData()
*/
public static Map<String, String> getCommitUserData(Directory directory) throws CorruptIndexException, IOException {
return SegmentInfos.readCurrentUserData(directory);
}
/**
* Retrieve the String userData optionally passed to
* IndexWriter#commit. This will return null if {@link
* IndexWriter#commit(Map)} has never been called for
* this index.
*
* @see #getCommitUserData(Directory)
*/
public Map<String,String> getCommitUserData() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Expert: return the IndexCommit that this reader has
* opened. This method is only implemented by those
* readers that correspond to a Directory with its own
* segments_N file.
*
* @lucene.experimental
*/
public IndexCommit getIndexCommit() throws IOException {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/** Expert: returns the sequential sub readers that this
* reader is logically composed of. If this reader is not composed
* of sequential child readers, it should return null.
* If this method returns an empty array, that means this
* reader is a null reader (for example a MultiReader
* that has no sub readers).
*/
public abstract IndexReader[] getSequentialSubReaders();
/** For IndexReader implementations that use
* TermInfosReader to read terms, this returns the
* current indexDivisor as specified when the reader was
* opened.
*/
public int getTermInfosIndexDivisor() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
}

View File

@ -32,8 +32,9 @@ import org.apache.lucene.util.IOUtils;
/**
* An IndexReader which reads indexes with multiple segments.
* To get an instance of this reader use {@link IndexReader.open(Directory)}.
*/
final class DirectoryReader extends BaseMultiReader<SegmentReader> {
public final class DirectoryReader extends BaseMultiReader<SegmentReader> {
protected final Directory directory;
private final IndexWriter writer;
private final SegmentInfos segmentInfos;
@ -50,9 +51,9 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
this.applyAllDeletes = applyAllDeletes;
}
static IndexReader open(final Directory directory, final IndexCommit commit,
static DirectoryReader open(final Directory directory, final IndexCommit commit,
final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
return (DirectoryReader) new SegmentInfos.FindSegmentsFile(directory) {
@Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
SegmentInfos sis = new SegmentInfos();
@ -222,12 +223,12 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
}
@Override
protected final IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
protected final CompositeIndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
return doOpenIfChanged(null);
}
@Override
protected final IndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
protected final CompositeIndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
ensureOpen();
// If we were obtained by writer.getReader(), re-ask the
@ -240,7 +241,7 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
}
@Override
protected final IndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
protected final CompositeIndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
ensureOpen();
if (writer == this.writer && applyAllDeletes == this.applyAllDeletes) {
return doOpenFromWriter(null);
@ -250,7 +251,7 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
}
}
private final IndexReader doOpenFromWriter(IndexCommit commit) throws CorruptIndexException, IOException {
private final CompositeIndexReader doOpenFromWriter(IndexCommit commit) throws CorruptIndexException, IOException {
if (commit != null) {
throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() cannot currently accept a commit");
}
@ -259,7 +260,7 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
return null;
}
IndexReader reader = writer.getReader(applyAllDeletes);
CompositeIndexReader reader = writer.getReader(applyAllDeletes);
// If in fact no changes took place, return null:
if (reader.getVersion() == segmentInfos.getVersion()) {
@ -270,7 +271,7 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
return reader;
}
private synchronized IndexReader doOpenNoWriter(IndexCommit commit) throws CorruptIndexException, IOException {
private synchronized CompositeIndexReader doOpenNoWriter(IndexCommit commit) throws CorruptIndexException, IOException {
if (commit == null) {
if (isCurrent()) {
@ -285,7 +286,7 @@ final class DirectoryReader extends BaseMultiReader<SegmentReader> {
}
}
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
return (CompositeIndexReader) new SegmentInfos.FindSegmentsFile(directory) {
@Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
final SegmentInfos infos = new SegmentInfos();

View File

@ -39,7 +39,7 @@ import java.util.Comparator;
* To reopen, you have to first reopen the underlying reader
* and wrap it again with the custom filter.
*/
public class FilterIndexReader extends IndexReader {
public class FilterIndexReader extends AtomicIndexReader {
/** Base class for filtering {@link Fields}
* implementations. */
@ -279,14 +279,14 @@ public class FilterIndexReader extends IndexReader {
}
}
protected IndexReader in;
protected AtomicIndexReader in;
/**
* <p>Construct a FilterIndexReader based on the specified base reader.
* <p>Note that base reader is closed if this FilterIndexReader is closed.</p>
* @param in specified base reader.
*/
public FilterIndexReader(IndexReader in) {
public FilterIndexReader(AtomicIndexReader in) {
super();
this.in = in;
}
@ -355,35 +355,13 @@ public class FilterIndexReader extends IndexReader {
protected void doClose() throws IOException {
in.close();
}
@Override
public long getVersion() {
ensureOpen();
return in.getVersion();
}
@Override
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
return in.isCurrent();
}
@Override
public IndexReader[] getSequentialSubReaders() {
return in.getSequentialSubReaders();
}
@Override
public ReaderContext getTopReaderContext() {
public AtomicReaderContext getTopReaderContext() {
ensureOpen();
return in.getTopReaderContext();
}
@Override
public Map<String, String> getCommitUserData() {
return in.getCommitUserData();
}
@Override
public Fields fields() throws IOException {
ensureOpen();
@ -428,11 +406,6 @@ public class FilterIndexReader extends IndexReader {
return in.normValues(field);
}
@Override
public IndexCommit getIndexCommit() throws IOException {
return in.getIndexCommit();
}
@Override
public int getTermInfosIndexDivisor() {
return in.getTermInfosIndexDivisor();

View File

@ -172,23 +172,6 @@ public abstract class IndexReader implements Closeable {
return false;
}
/** {@inheritDoc} */
@Override
public String toString() {
final StringBuilder buffer = new StringBuilder();
buffer.append(getClass().getSimpleName());
buffer.append('(');
final IndexReader[] subReaders = getSequentialSubReaders();
if ((subReaders != null) && (subReaders.length > 0)) {
buffer.append(subReaders[0]);
for (int i = 1; i < subReaders.length; ++i) {
buffer.append(" ").append(subReaders[i]);
}
}
buffer.append(')');
return buffer.toString();
}
/**
* Expert: decreases the refCount of this IndexReader
* instance. If the refCount drops to 0, then this
@ -238,7 +221,7 @@ public abstract class IndexReader implements Closeable {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final Directory directory) throws CorruptIndexException, IOException {
public static DirectoryReader open(final Directory directory) throws CorruptIndexException, IOException {
return DirectoryReader.open(directory, null, DEFAULT_TERMS_INDEX_DIVISOR);
}
@ -258,7 +241,7 @@ public abstract class IndexReader implements Closeable {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
public static DirectoryReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return DirectoryReader.open(directory, null, termInfosIndexDivisor);
}
@ -281,7 +264,7 @@ public abstract class IndexReader implements Closeable {
*
* @lucene.experimental
*/
public static IndexReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
public static DirectoryReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
return writer.getReader(applyAllDeletes);
}
@ -291,7 +274,7 @@ public abstract class IndexReader implements Closeable {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final IndexCommit commit) throws CorruptIndexException, IOException {
public static DirectoryReader open(final IndexCommit commit) throws CorruptIndexException, IOException {
return DirectoryReader.open(commit.getDirectory(), commit, DEFAULT_TERMS_INDEX_DIVISOR);
}
@ -312,7 +295,7 @@ public abstract class IndexReader implements Closeable {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
public static DirectoryReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return DirectoryReader.open(commit.getDirectory(), commit, termInfosIndexDivisor);
}
@ -340,8 +323,8 @@ public abstract class IndexReader implements Closeable {
* @return null if there are no changes; else, a new
* IndexReader instance which you must eventually close
*/
public static IndexReader openIfChanged(IndexReader oldReader) throws IOException {
final IndexReader newReader = oldReader.doOpenIfChanged();
public static CompositeIndexReader openIfChanged(CompositeIndexReader oldReader) throws IOException {
final CompositeIndexReader newReader = oldReader.doOpenIfChanged();
assert newReader != oldReader;
return newReader;
}
@ -353,8 +336,8 @@ public abstract class IndexReader implements Closeable {
*
* @see #openIfChanged(IndexReader)
*/
public static IndexReader openIfChanged(IndexReader oldReader, IndexCommit commit) throws IOException {
final IndexReader newReader = oldReader.doOpenIfChanged(commit);
public static CompositeIndexReader openIfChanged(CompositeIndexReader oldReader, IndexCommit commit) throws IOException {
final CompositeIndexReader newReader = oldReader.doOpenIfChanged(commit);
assert newReader != oldReader;
return newReader;
}
@ -420,43 +403,13 @@ public abstract class IndexReader implements Closeable {
*
* @lucene.experimental
*/
public static IndexReader openIfChanged(IndexReader oldReader, IndexWriter writer, boolean applyAllDeletes) throws IOException {
final IndexReader newReader = oldReader.doOpenIfChanged(writer, applyAllDeletes);
public static CompositeIndexReader openIfChanged(CompositeIndexReader oldReader, IndexWriter writer, boolean applyAllDeletes) throws IOException {
final CompositeIndexReader newReader = oldReader.doOpenIfChanged(writer, applyAllDeletes);
assert newReader != oldReader;
return newReader;
}
/**
* If the index has changed since it was opened, open and return a new reader;
* else, return {@code null}.
*
* @see #openIfChanged(IndexReader)
*/
protected IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not support reopen().");
}
/**
* If the index has changed since it was opened, open and return a new reader;
* else, return {@code null}.
*
* @see #openIfChanged(IndexReader, IndexCommit)
*/
protected IndexReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not support reopen(IndexCommit).");
}
/**
* If the index has changed since it was opened, open and return a new reader;
* else, return {@code null}.
*
* @see #openIfChanged(IndexReader, IndexWriter, boolean)
*/
protected IndexReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
return writer.getReader(applyAllDeletes);
}
/**
* Returns the directory associated with this index. The Default
* implementation returns the directory specified by subclasses when
* delegating to the IndexReader(Directory) constructor, or throws an
@ -516,78 +469,6 @@ public abstract class IndexReader implements Closeable {
return SegmentInfos.readCurrentUserData(directory);
}
/**
* Version number when this IndexReader was opened. Not
* implemented in the IndexReader base class.
*
* <p>If this reader is based on a Directory (ie, was
* created by calling {@link #open}, or {@link #openIfChanged} on
* a reader based on a Directory), then this method
* returns the version recorded in the commit that the
* reader opened. This version is advanced every time
* {@link IndexWriter#commit} is called.</p>
*
* <p>If instead this reader is a near real-time reader
* (ie, obtained by a call to {@link
* IndexWriter#getReader}, or by calling {@link #openIfChanged}
* on a near real-time reader), then this method returns
* the version of the last commit done by the writer.
* Note that even as further changes are made with the
* writer, the version will not changed until a commit is
* completed. Thus, you should not rely on this method to
* determine when a near real-time reader should be
* opened. Use {@link #isCurrent} instead.</p>
*
* @throws UnsupportedOperationException unless overridden in subclass
*/
public long getVersion() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Retrieve the String userData optionally passed to
* IndexWriter#commit. This will return null if {@link
* IndexWriter#commit(Map)} has never been called for
* this index.
*
* @see #getCommitUserData(Directory)
*/
public Map<String,String> getCommitUserData() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Check whether any new changes have occurred to the
* index since this reader was opened.
*
* <p>If this reader is based on a Directory (ie, was
* created by calling {@link #open}, or {@link #openIfChanged} on
* a reader based on a Directory), then this method checks
* if any further commits (see {@link IndexWriter#commit}
* have occurred in that directory).</p>
*
* <p>If instead this reader is a near real-time reader
* (ie, obtained by a call to {@link
* IndexWriter#getReader}, or by calling {@link #openIfChanged}
* on a near real-time reader), then this method checks if
* either a new commit has occurred, or any new
* uncommitted changes have taken place via the writer.
* Note that even if the writer has only performed
* merging, this method will still return false.</p>
*
* <p>In any event, if this returns false, you should call
* {@link #openIfChanged} to get a new reader that sees the
* changes.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @throws UnsupportedOperationException unless overridden in subclass
*/
public boolean isCurrent() throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/** Retrieve term vectors for this document, or null if
* term vectors were not indexed. The returned Fields
* instance acts like a single-document inverted index
@ -687,166 +568,6 @@ public abstract class IndexReader implements Closeable {
/** Returns true if any documents have been deleted */
public abstract boolean hasDeletions();
/** Returns true if there are norms stored for this field. */
public boolean hasNorms(String field) throws IOException {
// backward compatible implementation.
// SegmentReader has an efficient implementation.
ensureOpen();
return normValues(field) != null;
}
/**
* Returns {@link Fields} for this reader.
* This method may return null if the reader has no
* postings.
*
* <p><b>NOTE</b>: if this is a multi reader ({@link
* #getSequentialSubReaders} is not null) then this
* method will throw UnsupportedOperationException. If
* you really need a {@link Fields} for such a reader,
* use {@link MultiFields#getFields}. However, for
* performance reasons, it's best to get all sub-readers
* using {@link ReaderUtil#gatherSubReaders} and iterate
* through them yourself. */
public abstract Fields fields() throws IOException;
public final int docFreq(Term term) throws IOException {
return docFreq(term.field(), term.bytes());
}
/** Returns the number of documents containing the term
* <code>t</code>. This method returns 0 if the term or
* field does not exists. This method does not take into
* account deleted documents that have not yet been merged
* away. */
public int docFreq(String field, BytesRef term) throws IOException {
final Fields fields = fields();
if (fields == null) {
return 0;
}
final Terms terms = fields.terms(field);
if (terms == null) {
return 0;
}
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term, true)) {
return termsEnum.docFreq();
} else {
return 0;
}
}
/** Returns the number of documents containing the term
* <code>t</code>. This method returns 0 if the term or
* field does not exists. This method does not take into
* account deleted documents that have not yet been merged
* away. */
public final long totalTermFreq(String field, BytesRef term) throws IOException {
final Fields fields = fields();
if (fields == null) {
return 0;
}
final Terms terms = fields.terms(field);
if (terms == null) {
return 0;
}
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term, true)) {
return termsEnum.totalTermFreq();
} else {
return 0;
}
}
/** This may return null if the field does not exist.*/
public final Terms terms(String field) throws IOException {
final Fields fields = fields();
if (fields == null) {
return null;
}
return fields.terms(field);
}
/** Returns {@link DocsEnum} for the specified field &
* term. This may return null, if either the field or
* term does not exist. */
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, boolean needsFreqs) throws IOException {
assert field != null;
assert term != null;
final Fields fields = fields();
if (fields != null) {
final Terms terms = fields.terms(field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term, true)) {
return termsEnum.docs(liveDocs, null, needsFreqs);
}
}
}
return null;
}
/** Returns {@link DocsAndPositionsEnum} for the specified
* field & term. This may return null, if either the
* field or term does not exist, or needsOffsets is
* true but offsets were not indexed for this field. */
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, boolean needsOffsets) throws IOException {
assert field != null;
assert term != null;
final Fields fields = fields();
if (fields != null) {
final Terms terms = fields.terms(field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term, true)) {
return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
}
}
}
return null;
}
/**
* Returns {@link DocsEnum} for the specified field and
* {@link TermState}. This may return null, if either the field or the term
* does not exists or the {@link TermState} is invalid for the underlying
* implementation.*/
public final DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsFreqs) throws IOException {
assert state != null;
assert field != null;
final Fields fields = fields();
if (fields != null) {
final Terms terms = fields.terms(field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
termsEnum.seekExact(term, state);
return termsEnum.docs(liveDocs, null, needsFreqs);
}
}
return null;
}
/**
* Returns {@link DocsAndPositionsEnum} for the specified field and
* {@link TermState}. This may return null, if either the field or the term
* does not exists, the {@link TermState} is invalid for the underlying
* implementation, or needsOffsets is true but offsets
* were not indexed for this field. */
public final DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state, boolean needsOffsets) throws IOException {
assert state != null;
assert field != null;
final Fields fields = fields();
if (fields != null) {
final Terms terms = fields.terms(field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
termsEnum.seekExact(term, state);
return termsEnum.docsAndPositions(liveDocs, null, needsOffsets);
}
}
return null;
}
/**
* Closes files associated with this index.
* Also saves any new deletions to disk.
@ -863,39 +584,6 @@ public abstract class IndexReader implements Closeable {
/** Implements close. */
protected abstract void doClose() throws IOException;
/**
* Get the {@link FieldInfos} describing all fields in
* this reader. NOTE: do not make any changes to the
* returned FieldInfos!
*
* @lucene.experimental
*/
public abstract FieldInfos getFieldInfos();
/** Returns the {@link Bits} representing live (not
* deleted) docs. A set bit indicates the doc ID has not
* been deleted. If this method returns null it means
* there are no deleted documents (all documents are
* live).
*
* The returned instance has been safely published for
* use by multiple threads without additional
* synchronization.
* @lucene.experimental */
public abstract Bits getLiveDocs();
/**
* Expert: return the IndexCommit that this reader has
* opened. This method is only implemented by those
* readers that correspond to a Directory with its own
* segments_N file.
*
* @lucene.experimental
*/
public IndexCommit getIndexCommit() throws IOException {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/** Returns all commit points that exist in the Directory.
* Normally, because the default is {@link
* KeepOnlyLastCommitDeletionPolicy}, there would be only
@ -915,18 +603,6 @@ public abstract class IndexReader implements Closeable {
return DirectoryReader.listCommits(dir);
}
/** Expert: returns the sequential sub readers that this
* reader is logically composed of. If this reader is not composed
* of sequential child readers, it should return null.
* If this method returns an empty array, that means this
* reader is a null reader (for example a MultiReader
* that has no sub readers).
*/
public IndexReader[] getSequentialSubReaders() {
ensureOpen();
return null;
}
/**
* Expert: Returns a the root {@link ReaderContext} for this
* {@link IndexReader}'s sub-reader tree. Iff this reader is composed of sub
@ -966,25 +642,6 @@ public abstract class IndexReader implements Closeable {
return this;
}
/** Returns the number of unique terms (across all fields)
* in this reader.
*
* @return number of unique terms or -1 if this count
* cannot be easily determined (eg Multi*Readers).
* Instead, you should call {@link
* #getSequentialSubReaders} and ask each sub reader for
* its unique term count. */
public final long getUniqueTermCount() throws IOException {
if (!getTopReaderContext().isAtomic) {
return -1;
}
final Fields fields = fields();
if (fields == null) {
return 0;
}
return fields.getUniqueTermCount();
}
/** For IndexReader implementations that use
* TermInfosReader to read terms, this returns the
* current indexDivisor as specified when the reader was
@ -994,49 +651,17 @@ public abstract class IndexReader implements Closeable {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Returns {@link DocValues} for this field.
* This method may return null if the reader has no per-document
* values stored.
*
* <p><b>NOTE</b>: if this is a multi reader ({@link
* #getSequentialSubReaders} is not null) then this
* method will throw UnsupportedOperationException. If
* you really need {@link DocValues} for such a reader,
* use {@link MultiDocValues#getDocValues(IndexReader,String)}. However, for
* performance reasons, it's best to get all sub-readers
* using {@link ReaderUtil#gatherSubReaders} and iterate
* through them yourself. */
public abstract DocValues docValues(String field) throws IOException;
public abstract DocValues normValues(String field) throws IOException;
private volatile Fields fields;
/** @lucene.internal */
void storeFields(Fields fields) {
ensureOpen();
this.fields = fields;
}
/** @lucene.internal */
Fields retrieveFields() {
ensureOpen();
return fields;
}
// nocommit: remove generics and add a typed (overloaded) getter method instead instance fields with "R reader"
/**
* A struct like class that represents a hierarchical relationship between
* {@link IndexReader} instances.
* @lucene.experimental
*/
public static abstract class ReaderContext {
public static abstract class ReaderContext<R extends IndexReader> {
/** The reader context for this reader's immediate parent, or null if none */
public final ReaderContext parent;
public final CompositeReaderContext parent;
/** The actual reader */
public final IndexReader reader;
/** <code>true</code> iff the reader is an atomic reader */
public final boolean isAtomic;
public final R reader;
/** <code>true</code> if this context struct represents the top level reader within the hierarchical context */
public final boolean isTopLevel;
/** the doc base for this reader in the parent, <tt>0</tt> if parent is null */
@ -1044,11 +669,10 @@ public abstract class IndexReader implements Closeable {
/** the ord for this reader in the parent, <tt>0</tt> if parent is null */
public final int ordInParent;
ReaderContext(ReaderContext parent, IndexReader reader,
boolean isAtomic, int ordInParent, int docBaseInParent) {
ReaderContext(CompositeReaderContext parent, R reader,
int ordInParent, int docBaseInParent) {
this.parent = parent;
this.reader = reader;
this.isAtomic = isAtomic;
this.docBaseInParent = docBaseInParent;
this.ordInParent = ordInParent;
this.isTopLevel = parent==null;
@ -1073,18 +697,18 @@ public abstract class IndexReader implements Closeable {
* <code>instanceof</code> checks and type-casts to
* {@link CompositeReaderContext}.
*/
public ReaderContext[] children() {
public ReaderContext<? extends IndexReader>[] children() {
return null;
}
}
/**
* {@link ReaderContext} for composite {@link IndexReader} instance.
* {@link ReaderContext} for {@link CompositeIndexReader} instance.
* @lucene.experimental
*/
public static final class CompositeReaderContext extends ReaderContext {
public static final class CompositeReaderContext extends ReaderContext<CompositeIndexReader> {
/** the composite readers immediate children */
public final ReaderContext[] children;
public final ReaderContext<? extends IndexReader>[] children;
/** the composite readers leaf reader contexts if this is the top level reader in this context */
public final AtomicReaderContext[] leaves;
@ -1092,22 +716,22 @@ public abstract class IndexReader implements Closeable {
* Creates a {@link CompositeReaderContext} for intermediate readers that aren't
* not top-level readers in the current context
*/
public CompositeReaderContext(ReaderContext parent, IndexReader reader,
int ordInParent, int docbaseInParent, ReaderContext[] children) {
public CompositeReaderContext(CompositeReaderContext parent, CompositeIndexReader reader,
int ordInParent, int docbaseInParent, ReaderContext<? extends IndexReader>[] children) {
this(parent, reader, ordInParent, docbaseInParent, children, null);
}
/**
* Creates a {@link CompositeReaderContext} for top-level readers with parent set to <code>null</code>
*/
public CompositeReaderContext(IndexReader reader, ReaderContext[] children, AtomicReaderContext[] leaves) {
public CompositeReaderContext(CompositeIndexReader reader, ReaderContext<? extends IndexReader>[] children, AtomicReaderContext[] leaves) {
this(null, reader, 0, 0, children, leaves);
}
private CompositeReaderContext(ReaderContext parent, IndexReader reader,
int ordInParent, int docbaseInParent, ReaderContext[] children,
private CompositeReaderContext(CompositeReaderContext parent, CompositeIndexReader reader,
int ordInParent, int docbaseInParent, ReaderContext<? extends IndexReader>[] children,
AtomicReaderContext[] leaves) {
super(parent, reader, false, ordInParent, docbaseInParent);
super(parent, reader, ordInParent, docbaseInParent);
this.children = children;
this.leaves = leaves;
}
@ -1119,16 +743,16 @@ public abstract class IndexReader implements Closeable {
@Override
public ReaderContext[] children() {
public ReaderContext<? extends IndexReader>[] children() {
return children;
}
}
/**
* {@link ReaderContext} for atomic {@link IndexReader} instances
* {@link ReaderContext} for {@link AtomicIndexReader} instances
* @lucene.experimental
*/
public static final class AtomicReaderContext extends ReaderContext {
public static final class AtomicReaderContext extends ReaderContext<AtomicIndexReader> {
/** The readers ord in the top-level's leaves array */
public final int ord;
/** The readers absolute doc base */
@ -1136,10 +760,9 @@ public abstract class IndexReader implements Closeable {
/**
* Creates a new {@link AtomicReaderContext}
*/
public AtomicReaderContext(ReaderContext parent, IndexReader reader,
public AtomicReaderContext(CompositeReaderContext parent, AtomicIndexReader reader,
int ord, int docBase, int leafOrd, int leafDocBase) {
super(parent, reader, true, ord, docBase);
assert reader.getSequentialSubReaders() == null : "Atomic readers must not have subreaders";
super(parent, reader, ord, docBase);
this.ord = leafOrd;
this.docBase = leafDocBase;
}
@ -1148,7 +771,7 @@ public abstract class IndexReader implements Closeable {
* Creates a new {@link AtomicReaderContext} for a atomic reader without an immediate
* parent.
*/
public AtomicReaderContext(IndexReader atomicReader) {
public AtomicReaderContext(AtomicIndexReader atomicReader) {
this(null, atomicReader, 0, 0, 0, 0);
}
}

View File

@ -263,7 +263,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// The PayloadProcessorProvider to use when segments are merged
private PayloadProcessorProvider payloadProcessorProvider;
IndexReader getReader() throws IOException {
DirectoryReader getReader() throws IOException {
return getReader(true);
}
@ -326,7 +326,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
*
* @throws IOException
*/
IndexReader getReader(boolean applyAllDeletes) throws IOException {
DirectoryReader getReader(boolean applyAllDeletes) throws IOException {
ensureOpen();
final long tStart = System.currentTimeMillis();
@ -338,7 +338,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// obtained during this flush are pooled, the first time
// this method is called:
poolReaders = true;
final IndexReader r;
final DirectoryReader r;
doBeforeFlush();
boolean anySegmentFlushed = false;
/*

View File

@ -31,10 +31,10 @@ import org.apache.lucene.util.InfoStream;
public class MergeState {
public static class IndexReaderAndLiveDocs {
public final IndexReader reader;
public final AtomicIndexReader reader;
public final Bits liveDocs;
public IndexReaderAndLiveDocs(IndexReader reader, Bits liveDocs) {
public IndexReaderAndLiveDocs(AtomicIndexReader reader, Bits liveDocs) {
this.reader = reader;
this.liveDocs = liveDocs;
}

View File

@ -33,7 +33,7 @@ import org.apache.lucene.util.ReaderUtil.Gather;
import org.apache.lucene.util.packed.PackedInts.Reader;
/**
* A wrapper for compound IndexReader providing access to per segment
* A wrapper for CompositeIndexReader providing access to per segment
* {@link DocValues}
*
* @lucene.experimental
@ -43,11 +43,11 @@ public class MultiDocValues extends DocValues {
private static DocValuesPuller DEFAULT_PULLER = new DocValuesPuller();
private static final DocValuesPuller NORMS_PULLER = new DocValuesPuller() {
public DocValues pull(IndexReader reader, String field) throws IOException {
public DocValues pull(AtomicIndexReader reader, String field) throws IOException {
return reader.normValues(field);
}
public boolean stopLoadingOnNull(IndexReader reader, String field) throws IOException {
public boolean stopLoadingOnNull(AtomicIndexReader reader, String field) throws IOException {
// for norms we drop all norms if one leaf reader has no norms and the field is present
FieldInfos fieldInfos = reader.getFieldInfos();
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
@ -69,11 +69,11 @@ public class MultiDocValues extends DocValues {
}
private static class DocValuesPuller {
public DocValues pull(IndexReader reader, String field) throws IOException {
public DocValues pull(AtomicIndexReader reader, String field) throws IOException {
return reader.docValues(field);
}
public boolean stopLoadingOnNull(IndexReader reader, String field) throws IOException {
public boolean stopLoadingOnNull(AtomicIndexReader reader, String field) throws IOException {
return false;
}
}
@ -115,11 +115,13 @@ public class MultiDocValues extends DocValues {
private static DocValues getDocValues(IndexReader r, final String field, final DocValuesPuller puller) throws IOException {
final IndexReader[] subs = r.getSequentialSubReaders();
if (subs == null) {
if (r instanceof AtomicIndexReader) {
// already an atomic reader
return puller.pull(r, field);
} else if (subs.length == 0) {
return puller.pull((AtomicIndexReader) r, field);
}
assert r instanceof CompositeIndexReader;
final IndexReader[] subs = ((CompositeIndexReader) r).getSequentialSubReaders();
if (subs.length == 0) {
// no fields
return null;
} else if (subs.length == 1) {
@ -136,7 +138,7 @@ public class MultiDocValues extends DocValues {
new ReaderUtil.Gather(r) {
boolean stop = false;
@Override
protected void add(int base, IndexReader r) throws IOException {
protected void add(int base, AtomicIndexReader r) throws IOException {
if (stop) {
return;
}

View File

@ -59,59 +59,50 @@ public final class MultiFields extends Fields {
* Gather}) and iterate through them
* yourself. */
public static Fields getFields(IndexReader r) throws IOException {
final IndexReader[] subs = r.getSequentialSubReaders();
if (subs == null) {
if (r instanceof AtomicIndexReader) {
// already an atomic reader
return r.fields();
} else if (subs.length == 0) {
return ((AtomicIndexReader) r).fields();
}
assert r instanceof CompositeIndexReader;
final IndexReader[] subs = ((CompositeIndexReader) r).getSequentialSubReaders();
if (subs.length == 0) {
// no fields
return null;
} else if (subs.length == 1) {
return getFields(subs[0]);
} else {
final List<Fields> fields = new ArrayList<Fields>();
final List<ReaderUtil.Slice> slices = new ArrayList<ReaderUtil.Slice>();
Fields currentFields = r.retrieveFields();
if (currentFields == null) {
final List<Fields> fields = new ArrayList<Fields>();
final List<ReaderUtil.Slice> slices = new ArrayList<ReaderUtil.Slice>();
new ReaderUtil.Gather(r) {
@Override
protected void add(int base, IndexReader r) throws IOException {
final Fields f = r.fields();
if (f != null) {
fields.add(f);
slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
}
new ReaderUtil.Gather(r) {
@Override
protected void add(int base, AtomicIndexReader r) throws IOException {
final Fields f = r.fields();
if (f != null) {
fields.add(f);
slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
}
}.run();
if (fields.size() == 0) {
return null;
} else if (fields.size() == 1) {
currentFields = fields.get(0);
} else {
currentFields = new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
}
r.storeFields(currentFields);
}.run();
if (fields.isEmpty()) {
return null;
} else if (fields.size() == 1) {
return fields.get(0);
} else {
return new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
}
return currentFields;
}
}
public static Bits getLiveDocs(IndexReader r) {
Bits result;
if (r.hasDeletions()) {
final List<Bits> liveDocs = new ArrayList<Bits>();
final List<Integer> starts = new ArrayList<Integer>();
try {
final int maxDoc = new ReaderUtil.Gather(r) {
@Override
protected void add(int base, IndexReader r) throws IOException {
protected void add(int base, AtomicIndexReader r) throws IOException {
// record all liveDocs, even if they are null
liveDocs.add(r.getLiveDocs());
starts.add(base);
@ -126,16 +117,13 @@ public final class MultiFields extends Fields {
assert liveDocs.size() > 0;
if (liveDocs.size() == 1) {
// Only one actual sub reader -- optimize this case
result = liveDocs.get(0);
return liveDocs.get(0);
} else {
result = new MultiBits(liveDocs, starts, true);
return new MultiBits(liveDocs, starts, true);
}
} else {
result = null;
return null;
}
return result;
}
/** This method may return null if the field does not exist.*/

View File

@ -60,50 +60,9 @@ public class MultiReader extends BaseMultiReader<IndexReader> {
}
@Override
protected synchronized IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
ensureOpen();
boolean changed = false;
IndexReader[] newSubReaders = new IndexReader[subReaders.length];
boolean success = false;
try {
for (int i = 0; i < subReaders.length; i++) {
final IndexReader newSubReader = IndexReader.openIfChanged(subReaders[i]);
if (newSubReader != null) {
newSubReaders[i] = newSubReader;
changed = true;
} else {
newSubReaders[i] = subReaders[i];
}
}
success = true;
} finally {
if (!success && changed) {
for (int i = 0; i < newSubReaders.length; i++) {
if (newSubReaders[i] != subReaders[i]) {
try {
newSubReaders[i].close();
} catch (IOException ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
if (changed) {
boolean[] newDecrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {
if (newSubReaders[i] == subReaders[i]) {
newSubReaders[i].incRef();
newDecrefOnClose[i] = true;
}
}
return new MultiReader(newSubReaders, newDecrefOnClose);
} else {
return null;
}
protected synchronized CompositeIndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
// nocommit: remove this method
return null;
}
@Override
@ -128,7 +87,8 @@ public class MultiReader extends BaseMultiReader<IndexReader> {
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (!subReaders[i].isCurrent()) {
final IndexReader r = subReaders[i];
if (r instanceof CompositeIndexReader && !((CompositeIndexReader) r).isCurrent()) {
return false;
}
}

View File

@ -25,7 +25,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ReaderUtil;
/** An IndexReader which reads multiple, parallel indexes. Each index added
/** An AtomicIndexReader which reads multiple, parallel indexes. Each index added
* must have the same number of documents, but typically each contains
* different fields. Each document contains the union of the fields of all
* documents with the same document number. When searching, matches for a
@ -42,15 +42,15 @@ import org.apache.lucene.util.ReaderUtil;
* same order to the other indexes. <em>Failure to do so will result in
* undefined behavior</em>.
*/
public class ParallelReader extends IndexReader {
private List<IndexReader> readers = new ArrayList<IndexReader>();
public class ParallelReader extends AtomicIndexReader {
private List<AtomicIndexReader> readers = new ArrayList<AtomicIndexReader>();
private List<Boolean> decrefOnClose = new ArrayList<Boolean>(); // remember which subreaders to decRef on close
boolean incRefReaders = false;
private SortedMap<String,IndexReader> fieldToReader = new TreeMap<String,IndexReader>();
private Map<IndexReader,Collection<String>> readerToFields = new HashMap<IndexReader,Collection<String>>();
private List<IndexReader> storedFieldReaders = new ArrayList<IndexReader>();
private SortedMap<String,AtomicIndexReader> fieldToReader = new TreeMap<String,AtomicIndexReader>();
private Map<AtomicIndexReader,Collection<String>> readerToFields = new HashMap<AtomicIndexReader,Collection<String>>();
private List<AtomicIndexReader> storedFieldReaders = new ArrayList<AtomicIndexReader>();
private Map<String, DocValues> normsCache = new HashMap<String,DocValues>();
private final ReaderContext topLevelReaderContext = new AtomicReaderContext(this);
private final AtomicReaderContext topLevelReaderContext = new AtomicReaderContext(this);
private int maxDoc;
private int numDocs;
private boolean hasDeletions;
@ -77,7 +77,7 @@ public class ParallelReader extends IndexReader {
@Override
public String toString() {
final StringBuilder buffer = new StringBuilder("ParallelReader(");
final Iterator<IndexReader> iter = readers.iterator();
final Iterator<AtomicIndexReader> iter = readers.iterator();
if (iter.hasNext()) {
buffer.append(iter.next());
}
@ -88,25 +88,25 @@ public class ParallelReader extends IndexReader {
return buffer.toString();
}
/** Add an IndexReader.
/** Add an AtomicIndexReader.
* @throws IOException if there is a low-level IO error
*/
public void add(IndexReader reader) throws IOException {
public void add(AtomicIndexReader reader) throws IOException {
ensureOpen();
add(reader, false);
}
/** Add an IndexReader whose stored fields will not be returned. This can
/** Add an AtomicIndexReader whose stored fields will not be returned. This can
* accelerate search when stored fields are only needed from a subset of
* the IndexReaders.
*
* @throws IllegalArgumentException if not all indexes contain the same number
* of documents
* @throws IllegalArgumentException if not all indexes have the same value
* of {@link IndexReader#maxDoc()}
* of {@link AtomicIndexReader#maxDoc()}
* @throws IOException if there is a low-level IO error
*/
public void add(IndexReader reader, boolean ignoreStoredFields)
public void add(AtomicIndexReader reader, boolean ignoreStoredFields)
throws IOException {
ensureOpen();
@ -129,7 +129,7 @@ public class ParallelReader extends IndexReader {
if (fieldToReader.get(fieldInfo.name) == null) {
fieldInfos.add(fieldInfo);
fieldToReader.put(fieldInfo.name, reader);
this.fields.addField(fieldInfo.name, MultiFields.getFields(reader).terms(fieldInfo.name));
this.fields.addField(fieldInfo.name, reader.terms(fieldInfo.name));
}
}
@ -205,7 +205,7 @@ public class ParallelReader extends IndexReader {
@Override
public Bits getLiveDocs() {
ensureOpen();
return MultiFields.getLiveDocs(readers.get(0));
return readers.get(0).getLiveDocs();
}
@Override
@ -214,88 +214,6 @@ public class ParallelReader extends IndexReader {
return fields;
}
/**
* Tries to reopen the subreaders.
* <br>
* If one or more subreaders could be re-opened (i. e. subReader.reopen()
* returned a new instance != subReader), then a new ParallelReader instance
* is returned, otherwise null is returned.
* <p>
* A re-opened instance might share one or more subreaders with the old
* instance. Index modification operations result in undefined behavior
* when performed before the old instance is closed.
* (see {@link IndexReader#openIfChanged}).
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
@Override
protected synchronized IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
ensureOpen();
boolean reopened = false;
List<IndexReader> newReaders = new ArrayList<IndexReader>();
boolean success = false;
try {
for (final IndexReader oldReader : readers) {
IndexReader newReader = null;
newReader = IndexReader.openIfChanged(oldReader);
if (newReader != null) {
reopened = true;
} else {
newReader = oldReader;
}
newReaders.add(newReader);
}
success = true;
} finally {
if (!success && reopened) {
for (int i = 0; i < newReaders.size(); i++) {
IndexReader r = newReaders.get(i);
if (r != readers.get(i)) {
try {
r.close();
} catch (IOException ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
if (reopened) {
List<Boolean> newDecrefOnClose = new ArrayList<Boolean>();
// TODO: maybe add a special reopen-ctor for norm-copying?
ParallelReader pr = new ParallelReader();
for (int i = 0; i < readers.size(); i++) {
IndexReader oldReader = readers.get(i);
IndexReader newReader = newReaders.get(i);
if (newReader == oldReader) {
newDecrefOnClose.add(Boolean.TRUE);
newReader.incRef();
} else {
// this is a new subreader instance, so on close() we don't
// decRef but close it
newDecrefOnClose.add(Boolean.FALSE);
}
pr.add(newReader, !storedFieldReaders.contains(oldReader));
}
pr.decrefOnClose = newDecrefOnClose;
pr.incRefReaders = incRefReaders;
return pr;
} else {
// No subreader was refreshed
return null;
}
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
@ -317,7 +235,7 @@ public class ParallelReader extends IndexReader {
@Override
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
ensureOpen();
for (final IndexReader reader: storedFieldReaders) {
for (final AtomicIndexReader reader: storedFieldReaders) {
reader.document(docID, visitor);
}
}
@ -327,7 +245,7 @@ public class ParallelReader extends IndexReader {
public Fields getTermVectors(int docID) throws IOException {
ensureOpen();
ParallelFields fields = new ParallelFields();
for (Map.Entry<String,IndexReader> ent : fieldToReader.entrySet()) {
for (Map.Entry<String,AtomicIndexReader> ent : fieldToReader.entrySet()) {
String fieldName = ent.getKey();
Terms vector = ent.getValue().getTermVector(docID, fieldName);
if (vector != null) {
@ -341,44 +259,20 @@ public class ParallelReader extends IndexReader {
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
AtomicIndexReader reader = fieldToReader.get(field);
return reader==null ? false : reader.hasNorms(field);
}
@Override
public int docFreq(String field, BytesRef term) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
AtomicIndexReader reader = fieldToReader.get(field);
return reader == null? 0 : reader.docFreq(field, term);
}
/**
* Checks recursively if all subreaders are up to date.
*/
@Override
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
for (final IndexReader reader : readers) {
if (!reader.isCurrent()) {
return false;
}
}
// all subreaders are up to date
return true;
}
/** Not implemented.
* @throws UnsupportedOperationException
*/
@Override
public long getVersion() {
throw new UnsupportedOperationException("ParallelReader does not support this method.");
}
// for testing
IndexReader[] getSubReaders() {
return readers.toArray(new IndexReader[readers.size()]);
AtomicIndexReader[] getSubReaders() {
return readers.toArray(new AtomicIndexReader[readers.size()]);
}
@Override
@ -393,7 +287,7 @@ public class ParallelReader extends IndexReader {
}
@Override
public ReaderContext getTopReaderContext() {
public AtomicReaderContext getTopReaderContext() {
ensureOpen();
return topLevelReaderContext;
}
@ -401,8 +295,8 @@ public class ParallelReader extends IndexReader {
// TODO: I suspect this is completely untested!!!!!
@Override
public DocValues docValues(String field) throws IOException {
IndexReader reader = fieldToReader.get(field);
return reader == null ? null : MultiDocValues.getDocValues(reader, field);
AtomicIndexReader reader = fieldToReader.get(field);
return reader == null ? null : reader.docValues(field);
}
// TODO: I suspect this is completely untested!!!!!
@ -410,8 +304,8 @@ public class ParallelReader extends IndexReader {
public synchronized DocValues normValues(String field) throws IOException {
DocValues values = normsCache.get(field);
if (values == null) {
IndexReader reader = fieldToReader.get(field);
values = reader == null ? null : MultiDocValues.getNormDocValues(reader, field);
AtomicIndexReader reader = fieldToReader.get(field);
values = reader == null ? null : reader.normValues(field);
normsCache.put(field, values);
}
return values;

View File

@ -76,7 +76,7 @@ final class SegmentMerger {
try {
new ReaderUtil.Gather(reader) {
@Override
protected void add(int base, IndexReader r) {
protected void add(int base, AtomicIndexReader r) {
mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(r, r.getLiveDocs()));
}
}.run();
@ -205,7 +205,7 @@ final class SegmentMerger {
Map<FieldInfo,TypePromoter> normValuesTypes = new HashMap<FieldInfo,TypePromoter>();
for (MergeState.IndexReaderAndLiveDocs readerAndLiveDocs : mergeState.readers) {
final IndexReader reader = readerAndLiveDocs.reader;
final AtomicIndexReader reader = readerAndLiveDocs.reader;
FieldInfos readerFieldInfos = reader.getFieldInfos();
for (FieldInfo fi : readerFieldInfos) {
FieldInfo merged = mergeState.fieldInfos.add(fi);

View File

@ -31,10 +31,10 @@ import org.apache.lucene.util.Bits;
/**
* @lucene.experimental
*/
public final class SegmentReader extends IndexReader {
public final class SegmentReader extends AtomicIndexReader {
private final SegmentInfo si;
private final ReaderContext readerContext = new AtomicReaderContext(this);
private final AtomicReaderContext readerContext = new AtomicReaderContext(this);
private final BitVector liveDocs;
@ -230,7 +230,7 @@ public final class SegmentReader extends IndexReader {
}
@Override
public ReaderContext getTopReaderContext() {
public AtomicReaderContext getTopReaderContext() {
ensureOpen();
return readerContext;
}

View File

@ -50,14 +50,20 @@ import org.apache.lucene.index.MultiReader; // javadoc
* yourself.</p>
*/
public final class SlowMultiReaderWrapper extends FilterIndexReader {
public final class SlowMultiReaderWrapper extends AtomicIndexReader {
private final ReaderContext readerContext;
private final CompositeIndexReader in;
private final AtomicReaderContext readerContext;
private final Map<String, DocValues> normsCache = new HashMap<String, DocValues>();
private final Fields fields;
private final Bits liveDocs;
public SlowMultiReaderWrapper(IndexReader other) {
super(other);
readerContext = new AtomicReaderContext(this); // emulate atomic reader!
public SlowMultiReaderWrapper(CompositeIndexReader other) throws IOException {
super();
in = other;
readerContext = new AtomicReaderContext(this);
fields = MultiFields.getFields(in);
liveDocs = MultiFields.getLiveDocs(in);
}
@Override
@ -68,7 +74,7 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
@Override
public Fields fields() throws IOException {
ensureOpen();
return MultiFields.getFields(in);
return fields;
}
@Override
@ -87,25 +93,59 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
}
return values;
}
@Override
public Fields getTermVectors(int docID)
throws IOException {
ensureOpen();
return in.getTermVectors(docID);
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
return in.numDocs();
}
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return in.maxDoc();
}
@Override
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
ensureOpen();
in.document(docID, visitor);
}
@Override
public Bits getLiveDocs() {
ensureOpen();
return MultiFields.getLiveDocs(in);
return liveDocs;
}
@Override
public IndexReader[] getSequentialSubReaders() {
return null;
}
@Override
public ReaderContext getTopReaderContext() {
public AtomicReaderContext getTopReaderContext() {
ensureOpen();
return readerContext;
}
@Override
public FieldInfos getFieldInfos() {
ensureOpen();
return ReaderUtil.getMergedFieldInfos(in);
}
@Override
public boolean hasDeletions() {
ensureOpen();
return liveDocs != null;
}
@Override
protected void doClose() throws IOException {
// TODO: as this is a wrapper, should we really close the delegate?
in.close();
}
}

View File

@ -22,7 +22,7 @@ import java.util.Collections;
import java.util.Map;
import java.util.WeakHashMap;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.Bits;
@ -76,7 +76,7 @@ public class CachingWrapperFilter extends Filter {
* returns <code>true</code>, else it copies the {@link DocIdSetIterator} into
* a {@link FixedBitSet}.
*/
protected DocIdSet docIdSetToCache(DocIdSet docIdSet, IndexReader reader) throws IOException {
protected DocIdSet docIdSetToCache(DocIdSet docIdSet, AtomicIndexReader reader) throws IOException {
if (docIdSet == null) {
// this is better than returning null, as the nonnull result can be cached
return DocIdSet.EMPTY_DOCIDSET;
@ -102,7 +102,7 @@ public class CachingWrapperFilter extends Filter {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
final IndexReader reader = context.reader;
final AtomicIndexReader reader = context.reader;
// Only cache if incoming acceptDocs is == live docs;
// if Lucene passes in more interesting acceptDocs in

View File

@ -24,7 +24,7 @@ import java.text.DecimalFormat;
import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
import org.apache.lucene.document.NumericField; // for javadocs
import org.apache.lucene.index.DocTermOrds;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
@ -63,7 +63,7 @@ public interface FieldCache {
}
/** Interface to parse bytes from document fields.
* @see FieldCache#getBytes(IndexReader, String, FieldCache.ByteParser, boolean)
* @see FieldCache#getBytes(AtomicIndexReader, String, FieldCache.ByteParser, boolean)
*/
public interface ByteParser extends Parser {
/** Return a single Byte representation of this field's value. */
@ -71,7 +71,7 @@ public interface FieldCache {
}
/** Interface to parse shorts from document fields.
* @see FieldCache#getShorts(IndexReader, String, FieldCache.ShortParser, boolean)
* @see FieldCache#getShorts(AtomicIndexReader, String, FieldCache.ShortParser, boolean)
*/
public interface ShortParser extends Parser {
/** Return a short representation of this field's value. */
@ -79,7 +79,7 @@ public interface FieldCache {
}
/** Interface to parse ints from document fields.
* @see FieldCache#getInts(IndexReader, String, FieldCache.IntParser, boolean)
* @see FieldCache#getInts(AtomicIndexReader, String, FieldCache.IntParser, boolean)
*/
public interface IntParser extends Parser {
/** Return an integer representation of this field's value. */
@ -87,7 +87,7 @@ public interface FieldCache {
}
/** Interface to parse floats from document fields.
* @see FieldCache#getFloats(IndexReader, String, FieldCache.FloatParser, boolean)
* @see FieldCache#getFloats(AtomicIndexReader, String, FieldCache.FloatParser, boolean)
*/
public interface FloatParser extends Parser {
/** Return an float representation of this field's value. */
@ -95,7 +95,7 @@ public interface FieldCache {
}
/** Interface to parse long from document fields.
* @see FieldCache#getLongs(IndexReader, String, FieldCache.LongParser, boolean)
* @see FieldCache#getLongs(AtomicIndexReader, String, FieldCache.LongParser, boolean)
*/
public interface LongParser extends Parser {
/** Return an long representation of this field's value. */
@ -103,7 +103,7 @@ public interface FieldCache {
}
/** Interface to parse doubles from document fields.
* @see FieldCache#getDoubles(IndexReader, String, FieldCache.DoubleParser, boolean)
* @see FieldCache#getDoubles(AtomicIndexReader, String, FieldCache.DoubleParser, boolean)
*/
public interface DoubleParser extends Parser {
/** Return an long representation of this field's value. */
@ -303,7 +303,7 @@ public interface FieldCache {
* <code>reader.maxDoc()</code>, with turned on bits for each docid that
* does have a value for this field.
*/
public Bits getDocsWithField(IndexReader reader, String field)
public Bits getDocsWithField(AtomicIndexReader reader, String field)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is
@ -317,7 +317,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public byte[] getBytes (IndexReader reader, String field, boolean setDocsWithField)
public byte[] getBytes (AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is found,
@ -332,7 +332,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public byte[] getBytes (IndexReader reader, String field, ByteParser parser, boolean setDocsWithField)
public byte[] getBytes (AtomicIndexReader reader, String field, ByteParser parser, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is
@ -346,7 +346,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public short[] getShorts (IndexReader reader, String field, boolean setDocsWithField)
public short[] getShorts (AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is found,
@ -361,7 +361,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public short[] getShorts (IndexReader reader, String field, ShortParser parser, boolean setDocsWithField)
public short[] getShorts (AtomicIndexReader reader, String field, ShortParser parser, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is
@ -375,7 +375,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public int[] getInts (IndexReader reader, String field, boolean setDocsWithField)
public int[] getInts (AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if none is found,
@ -390,7 +390,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public int[] getInts (IndexReader reader, String field, IntParser parser, boolean setDocsWithField)
public int[] getInts (AtomicIndexReader reader, String field, IntParser parser, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if
@ -404,7 +404,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public float[] getFloats (IndexReader reader, String field, boolean setDocsWithField)
public float[] getFloats (AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/** Checks the internal cache for an appropriate entry, and if
@ -419,7 +419,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public float[] getFloats (IndexReader reader, String field,
public float[] getFloats (AtomicIndexReader reader, String field,
FloatParser parser, boolean setDocsWithField) throws IOException;
/**
@ -435,7 +435,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws java.io.IOException If any error occurs.
*/
public long[] getLongs(IndexReader reader, String field, boolean setDocsWithField)
public long[] getLongs(AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/**
@ -452,7 +452,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public long[] getLongs(IndexReader reader, String field, LongParser parser, boolean setDocsWithField)
public long[] getLongs(AtomicIndexReader reader, String field, LongParser parser, boolean setDocsWithField)
throws IOException;
/**
@ -468,7 +468,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public double[] getDoubles(IndexReader reader, String field, boolean setDocsWithField)
public double[] getDoubles(AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException;
/**
@ -485,7 +485,7 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public double[] getDoubles(IndexReader reader, String field, DoubleParser parser, boolean setDocsWithField)
public double[] getDoubles(AtomicIndexReader reader, String field, DoubleParser parser, boolean setDocsWithField)
throws IOException;
/** Returned by {@link #getTerms} */
@ -513,15 +513,15 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public DocTerms getTerms (IndexReader reader, String field)
public DocTerms getTerms (AtomicIndexReader reader, String field)
throws IOException;
/** Expert: just like {@link #getTerms(IndexReader,String)},
/** Expert: just like {@link #getTerms(AtomicIndexReader,String)},
* but you can specify whether more RAM should be consumed in exchange for
* faster lookups (default is "true"). Note that the
* first call for a given reader and field "wins",
* subsequent calls will share the same cache entry. */
public DocTerms getTerms (IndexReader reader, String field, boolean fasterButMoreRAM)
public DocTerms getTerms (AtomicIndexReader reader, String field, boolean fasterButMoreRAM)
throws IOException;
/** Returned by {@link #getTermsIndex} */
@ -589,16 +589,16 @@ public interface FieldCache {
* @return The values in the given field for each document.
* @throws IOException If any error occurs.
*/
public DocTermsIndex getTermsIndex (IndexReader reader, String field)
public DocTermsIndex getTermsIndex (AtomicIndexReader reader, String field)
throws IOException;
/** Expert: just like {@link
* #getTermsIndex(IndexReader,String)}, but you can specify
* #getTermsIndex(AtomicIndexReader,String)}, but you can specify
* whether more RAM should be consumed in exchange for
* faster lookups (default is "true"). Note that the
* first call for a given reader and field "wins",
* subsequent calls will share the same cache entry. */
public DocTermsIndex getTermsIndex (IndexReader reader, String field, boolean fasterButMoreRAM)
public DocTermsIndex getTermsIndex (AtomicIndexReader reader, String field, boolean fasterButMoreRAM)
throws IOException;
/**
@ -611,7 +611,7 @@ public interface FieldCache {
* @return a {@link DocTermOrds} instance
* @throws IOException If any error occurs.
*/
public DocTermOrds getDocTermOrds(IndexReader reader, String field) throws IOException;
public DocTermOrds getDocTermOrds(AtomicIndexReader reader, String field) throws IOException;
/**
* EXPERT: A unique Identifier/Description for each item in the FieldCache.
@ -677,7 +677,7 @@ public interface FieldCache {
* currently in the FieldCache.
* <p>
* NOTE: These CacheEntry objects maintain a strong reference to the
* Cached Values. Maintaining references to a CacheEntry the IndexReader
* Cached Values. Maintaining references to a CacheEntry the AtomicIndexReader
* associated with it has garbage collected will prevent the Value itself
* from being garbage collected when the Cache drops the WeakReference.
* </p>
@ -705,7 +705,7 @@ public interface FieldCache {
* top-level reader, it usually will have no effect as
* Lucene now caches at the segment reader level.
*/
public abstract void purge(IndexReader r);
public abstract void purge(AtomicIndexReader r);
/**
* If non-null, FieldCacheImpl will warn whenever

View File

@ -29,6 +29,7 @@ import java.util.WeakHashMap;
import org.apache.lucene.index.DocTermOrds;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.OrdTermState;
import org.apache.lucene.index.SegmentReader;
@ -48,8 +49,6 @@ import org.apache.lucene.util.packed.PackedInts;
* Expert: The default cache implementation, storing all values in memory.
* A WeakHashMap is used for storage.
*
* <p>Created: May 19, 2004 4:40:36 PM
*
* @since lucene 1.4
*/
class FieldCacheImpl implements FieldCache {
@ -76,7 +75,7 @@ class FieldCacheImpl implements FieldCache {
init();
}
public synchronized void purge(IndexReader r) {
public synchronized void purge(AtomicIndexReader r) {
for(Cache c : caches.values()) {
c.purge(r);
}
@ -155,24 +154,23 @@ class FieldCacheImpl implements FieldCache {
};
// composite/SlowMultiReaderWrapper fieldcaches don't purge until composite reader is closed.
final IndexReader.ReaderClosedListener purgeReader = new IndexReader.ReaderClosedListener() {
final AtomicIndexReader.ReaderClosedListener purgeReader = new AtomicIndexReader.ReaderClosedListener() {
@Override
public void onClose(IndexReader owner) {
FieldCacheImpl.this.purge(owner);
assert owner instanceof AtomicIndexReader;
FieldCacheImpl.this.purge((AtomicIndexReader) owner);
}
};
private void initReader(IndexReader reader) {
private void initReader(AtomicIndexReader reader) {
if (reader instanceof SegmentReader) {
((SegmentReader) reader).addCoreClosedListener(purgeCore);
} else if (reader.getSequentialSubReaders() != null) {
throw new UnsupportedOperationException("Please use SlowMultiReaderWrapper, if you really need a top level FieldCache");
} else {
// we have a slow reader of some sort, try to register a purge event
// rather than relying on gc:
Object key = reader.getCoreCacheKey();
if (key instanceof IndexReader) {
((IndexReader)key).addReaderClosedListener(purgeReader);
if (key instanceof AtomicIndexReader) {
((AtomicIndexReader)key).addReaderClosedListener(purgeReader);
} else {
// last chance
reader.addReaderClosedListener(purgeReader);
@ -191,11 +189,11 @@ class FieldCacheImpl implements FieldCache {
final Map<Object,Map<Entry,Object>> readerCache = new WeakHashMap<Object,Map<Entry,Object>>();
protected abstract Object createValue(IndexReader reader, Entry key, boolean setDocsWithField)
protected abstract Object createValue(AtomicIndexReader reader, Entry key, boolean setDocsWithField)
throws IOException;
/** Remove this reader from the cache, if present. */
public void purge(IndexReader r) {
public void purge(AtomicIndexReader r) {
Object readerKey = r.getCoreCacheKey();
synchronized(readerCache) {
readerCache.remove(readerKey);
@ -204,7 +202,7 @@ class FieldCacheImpl implements FieldCache {
/** Sets the key to the value for the provided reader;
* if the key is already set then this doesn't change it. */
public void put(IndexReader reader, Entry key, Object value) {
public void put(AtomicIndexReader reader, Entry key, Object value) {
final Object readerKey = reader.getCoreCacheKey();
synchronized (readerCache) {
Map<Entry,Object> innerCache = readerCache.get(readerKey);
@ -223,7 +221,7 @@ class FieldCacheImpl implements FieldCache {
}
}
public Object get(IndexReader reader, Entry key, boolean setDocsWithField) throws IOException {
public Object get(AtomicIndexReader reader, Entry key, boolean setDocsWithField) throws IOException {
Map<Entry,Object> innerCache;
Object value;
final Object readerKey = reader.getCoreCacheKey();
@ -321,12 +319,12 @@ class FieldCacheImpl implements FieldCache {
}
// inherit javadocs
public byte[] getBytes (IndexReader reader, String field, boolean setDocsWithField) throws IOException {
public byte[] getBytes (AtomicIndexReader reader, String field, boolean setDocsWithField) throws IOException {
return getBytes(reader, field, null, setDocsWithField);
}
// inherit javadocs
public byte[] getBytes(IndexReader reader, String field, ByteParser parser, boolean setDocsWithField)
public byte[] getBytes(AtomicIndexReader reader, String field, ByteParser parser, boolean setDocsWithField)
throws IOException {
return (byte[]) caches.get(Byte.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
}
@ -336,7 +334,7 @@ class FieldCacheImpl implements FieldCache {
super(wrapper);
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField)
throws IOException {
String field = entryKey.field;
ByteParser parser = (ByteParser) entryKey.custom;
@ -393,12 +391,12 @@ class FieldCacheImpl implements FieldCache {
}
// inherit javadocs
public short[] getShorts (IndexReader reader, String field, boolean setDocsWithField) throws IOException {
public short[] getShorts (AtomicIndexReader reader, String field, boolean setDocsWithField) throws IOException {
return getShorts(reader, field, null, setDocsWithField);
}
// inherit javadocs
public short[] getShorts(IndexReader reader, String field, ShortParser parser, boolean setDocsWithField)
public short[] getShorts(AtomicIndexReader reader, String field, ShortParser parser, boolean setDocsWithField)
throws IOException {
return (short[]) caches.get(Short.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
}
@ -409,7 +407,7 @@ class FieldCacheImpl implements FieldCache {
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField)
throws IOException {
String field = entryKey.field;
ShortParser parser = (ShortParser) entryKey.custom;
@ -466,7 +464,7 @@ class FieldCacheImpl implements FieldCache {
}
// null Bits means no docs matched
void setDocsWithField(IndexReader reader, String field, Bits docsWithField) {
void setDocsWithField(AtomicIndexReader reader, String field, Bits docsWithField) {
final int maxDoc = reader.maxDoc();
final Bits bits;
if (docsWithField == null) {
@ -487,12 +485,12 @@ class FieldCacheImpl implements FieldCache {
}
// inherit javadocs
public int[] getInts (IndexReader reader, String field, boolean setDocsWithField) throws IOException {
public int[] getInts (AtomicIndexReader reader, String field, boolean setDocsWithField) throws IOException {
return getInts(reader, field, null, setDocsWithField);
}
// inherit javadocs
public int[] getInts(IndexReader reader, String field, IntParser parser, boolean setDocsWithField)
public int[] getInts(AtomicIndexReader reader, String field, IntParser parser, boolean setDocsWithField)
throws IOException {
return (int[]) caches.get(Integer.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
}
@ -503,7 +501,7 @@ class FieldCacheImpl implements FieldCache {
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField)
throws IOException {
String field = entryKey.field;
IntParser parser = (IntParser) entryKey.custom;
@ -574,7 +572,7 @@ class FieldCacheImpl implements FieldCache {
}
}
public Bits getDocsWithField(IndexReader reader, String field)
public Bits getDocsWithField(AtomicIndexReader reader, String field)
throws IOException {
return (Bits) caches.get(DocsWithFieldCache.class).get(reader, new Entry(field, null), false);
}
@ -585,7 +583,7 @@ class FieldCacheImpl implements FieldCache {
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
throws IOException {
final String field = entryKey.field;
FixedBitSet res = null;
@ -635,13 +633,13 @@ class FieldCacheImpl implements FieldCache {
}
// inherit javadocs
public float[] getFloats (IndexReader reader, String field, boolean setDocsWithField)
public float[] getFloats (AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException {
return getFloats(reader, field, null, setDocsWithField);
}
// inherit javadocs
public float[] getFloats(IndexReader reader, String field, FloatParser parser, boolean setDocsWithField)
public float[] getFloats(AtomicIndexReader reader, String field, FloatParser parser, boolean setDocsWithField)
throws IOException {
return (float[]) caches.get(Float.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
@ -653,7 +651,7 @@ class FieldCacheImpl implements FieldCache {
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField)
throws IOException {
String field = entryKey.field;
FloatParser parser = (FloatParser) entryKey.custom;
@ -725,12 +723,12 @@ class FieldCacheImpl implements FieldCache {
}
public long[] getLongs(IndexReader reader, String field, boolean setDocsWithField) throws IOException {
public long[] getLongs(AtomicIndexReader reader, String field, boolean setDocsWithField) throws IOException {
return getLongs(reader, field, null, setDocsWithField);
}
// inherit javadocs
public long[] getLongs(IndexReader reader, String field, FieldCache.LongParser parser, boolean setDocsWithField)
public long[] getLongs(AtomicIndexReader reader, String field, FieldCache.LongParser parser, boolean setDocsWithField)
throws IOException {
return (long[]) caches.get(Long.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
}
@ -741,7 +739,7 @@ class FieldCacheImpl implements FieldCache {
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField)
throws IOException {
String field = entryKey.field;
FieldCache.LongParser parser = (FieldCache.LongParser) entryKey.custom;
@ -813,13 +811,13 @@ class FieldCacheImpl implements FieldCache {
}
// inherit javadocs
public double[] getDoubles(IndexReader reader, String field, boolean setDocsWithField)
public double[] getDoubles(AtomicIndexReader reader, String field, boolean setDocsWithField)
throws IOException {
return getDoubles(reader, field, null, setDocsWithField);
}
// inherit javadocs
public double[] getDoubles(IndexReader reader, String field, FieldCache.DoubleParser parser, boolean setDocsWithField)
public double[] getDoubles(AtomicIndexReader reader, String field, FieldCache.DoubleParser parser, boolean setDocsWithField)
throws IOException {
return (double[]) caches.get(Double.TYPE).get(reader, new Entry(field, parser), setDocsWithField);
}
@ -830,7 +828,7 @@ class FieldCacheImpl implements FieldCache {
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField)
throws IOException {
String field = entryKey.field;
FieldCache.DoubleParser parser = (FieldCache.DoubleParser) entryKey.custom;
@ -1075,11 +1073,11 @@ class FieldCacheImpl implements FieldCache {
private static boolean DEFAULT_FASTER_BUT_MORE_RAM = true;
public DocTermsIndex getTermsIndex(IndexReader reader, String field) throws IOException {
public DocTermsIndex getTermsIndex(AtomicIndexReader reader, String field) throws IOException {
return getTermsIndex(reader, field, DEFAULT_FASTER_BUT_MORE_RAM);
}
public DocTermsIndex getTermsIndex(IndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
public DocTermsIndex getTermsIndex(AtomicIndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
return (DocTermsIndex) caches.get(DocTermsIndex.class).get(reader, new Entry(field, Boolean.valueOf(fasterButMoreRAM)), false);
}
@ -1089,7 +1087,7 @@ class FieldCacheImpl implements FieldCache {
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
throws IOException {
Terms terms = reader.terms(entryKey.field);
@ -1220,11 +1218,11 @@ class FieldCacheImpl implements FieldCache {
// TODO: this if DocTermsIndex was already created, we
// should share it...
public DocTerms getTerms(IndexReader reader, String field) throws IOException {
public DocTerms getTerms(AtomicIndexReader reader, String field) throws IOException {
return getTerms(reader, field, DEFAULT_FASTER_BUT_MORE_RAM);
}
public DocTerms getTerms(IndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
public DocTerms getTerms(AtomicIndexReader reader, String field, boolean fasterButMoreRAM) throws IOException {
return (DocTerms) caches.get(DocTerms.class).get(reader, new Entry(field, Boolean.valueOf(fasterButMoreRAM)), false);
}
@ -1234,7 +1232,7 @@ class FieldCacheImpl implements FieldCache {
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
throws IOException {
Terms terms = reader.terms(entryKey.field);
@ -1308,7 +1306,7 @@ class FieldCacheImpl implements FieldCache {
}
}
public DocTermOrds getDocTermOrds(IndexReader reader, String field) throws IOException {
public DocTermOrds getDocTermOrds(AtomicIndexReader reader, String field) throws IOException {
return (DocTermOrds) caches.get(DocTermOrds.class).get(reader, new Entry(field, null), false);
}
@ -1318,7 +1316,7 @@ class FieldCacheImpl implements FieldCache {
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
protected Object createValue(AtomicIndexReader reader, Entry entryKey, boolean setDocsWithField /* ignored */)
throws IOException {
return new DocTermOrds(reader, entryKey.field);
}

View File

@ -22,6 +22,7 @@ import java.util.*;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.IndexReader;
@ -177,7 +178,7 @@ public class MultiPhraseQuery extends Query {
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
boolean topScorer, Bits acceptDocs) throws IOException {
assert !termArrays.isEmpty();
final IndexReader reader = context.reader;
final AtomicIndexReader reader = context.reader;
final Bits liveDocs = acceptDocs;
PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.size()];

View File

@ -22,7 +22,7 @@ import java.io.IOException;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.FixedBitSet;
@ -83,7 +83,7 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
*/
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
final IndexReader reader = context.reader;
final AtomicIndexReader reader = context.reader;
final Fields fields = reader.fields();
if (fields == null) {
// reader has no fields

View File

@ -25,6 +25,7 @@ import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
@ -219,7 +220,7 @@ public class PhraseQuery extends Query {
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
boolean topScorer, Bits acceptDocs) throws IOException {
assert !terms.isEmpty();
final IndexReader reader = context.reader;
final AtomicIndexReader reader = context.reader;
final Bits liveDocs = acceptDocs;
PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.size()];
@ -270,7 +271,7 @@ public class PhraseQuery extends Query {
}
// only called from assert
private boolean termNotInReader(IndexReader reader, String field, BytesRef bytes) throws IOException {
private boolean termNotInReader(AtomicIndexReader reader, String field, BytesRef bytes) throws IOException {
return reader.docFreq(field, bytes) == 0;
}

View File

@ -50,7 +50,6 @@ public class QueryWrapperFilter extends Filter {
@Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
// get a private context that is used to rewrite, createWeight and score eventually
assert context.reader.getTopReaderContext().isAtomic;
final AtomicReaderContext privateContext = (AtomicReaderContext) context.reader.getTopReaderContext();
final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query);
return new DocIdSet() {

View File

@ -121,7 +121,7 @@ public abstract class ScoringRewrite<Q extends Query> extends TermCollectingRewr
for (int i = 0; i < size; i++) {
final int pos = sort[i];
final Term term = new Term(query.getField(), col.terms.get(pos, new BytesRef()));
assert reader.docFreq(term) == termStates[pos].docFreq();
// nocommit: reenable this: assert reader.docFreq(term) == termStates[pos].docFreq();
addClause(result, term, termStates[pos].docFreq(), query.getBoost() * boost[pos], termStates[pos]);
}
}

View File

@ -25,6 +25,7 @@ import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.lucene.search.NRTManager; // javadocs
import org.apache.lucene.index.CompositeIndexReader; // javadocs
import org.apache.lucene.index.IndexReader; // javadocs
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.IOUtils;
@ -109,7 +110,8 @@ public class SearcherLifetimeManager implements Closeable {
public SearcherTracker(IndexSearcher searcher) {
this.searcher = searcher;
version = searcher.getIndexReader().getVersion();
// nocommit: fix getVersion() usage for atomic readers
version = ((CompositeIndexReader) searcher.getIndexReader()).getVersion();
searcher.getIndexReader().incRef();
// Use nanoTime not currentTimeMillis since it [in
// theory] reduces risk from clock shift
@ -168,7 +170,8 @@ public class SearcherLifetimeManager implements Closeable {
// TODO: we don't have to use IR.getVersion to track;
// could be risky (if it's buggy); we could get better
// bug isolation if we assign our own private ID:
final long version = searcher.getIndexReader().getVersion();
// nocommit: fix getVersion() usage for atomic readers
final long version = ((CompositeIndexReader) searcher.getIndexReader()).getVersion();
SearcherTracker tracker = searchers.get(version);
if (tracker == null) {
//System.out.println("RECORD version=" + version + " ms=" + System.currentTimeMillis());

View File

@ -23,6 +23,7 @@ import java.util.concurrent.Semaphore;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.CompositeIndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.NRTManager; // javadocs
import org.apache.lucene.search.IndexSearcher; // javadocs
@ -144,7 +145,10 @@ public final class SearcherManager implements Closeable {
final IndexReader newReader;
final IndexSearcher searcherToReopen = acquire();
try {
newReader = IndexReader.openIfChanged(searcherToReopen.getIndexReader());
final IndexReader r = searcherToReopen.getIndexReader();
newReader = (r instanceof CompositeIndexReader) ?
IndexReader.openIfChanged((CompositeIndexReader) r) :
null;
} finally {
release(searcherToReopen);
}
@ -172,13 +176,16 @@ public final class SearcherManager implements Closeable {
/**
* Returns <code>true</code> if no changes have occured since this searcher
* ie. reader was opened, otherwise <code>false</code>.
* @see IndexReader#isCurrent()
* @see CompositeIndexReader#isCurrent()
*/
public boolean isSearcherCurrent() throws CorruptIndexException,
IOException {
final IndexSearcher searcher = acquire();
try {
return searcher.getIndexReader().isCurrent();
final IndexReader r = searcher.getIndexReader();
return r instanceof CompositeIndexReader ?
((CompositeIndexReader ) r).isCurrent() :
true;
} finally {
release(searcher);
}

View File

@ -23,7 +23,7 @@ import java.util.Set;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
@ -117,7 +117,7 @@ public class TermQuery extends Query {
return termsEnum;
}
private boolean termNotInReader(IndexReader reader, String field, BytesRef bytes) throws IOException {
private boolean termNotInReader(AtomicIndexReader reader, String field, BytesRef bytes) throws IOException {
// only called from assert
//System.out.println("TQ.termNotInReader reader=" + reader + " term=" + field + ":" + bytes.utf8ToString());
return reader.docFreq(field, bytes) == 0;

View File

@ -160,7 +160,7 @@ public abstract class TopTermsRewrite<Q extends Query> extends TermCollectingRew
for (final ScoreTerm st : scoreTerms) {
final Term term = new Term(query.field, st.bytes);
assert reader.docFreq(term) == st.termState.docFreq() : "reader DF is " + reader.docFreq(term) + " vs " + st.termState.docFreq() + " term=" + term;
//nocommit: reenable this: assert reader.docFreq(term) == st.termState.docFreq() : "reader DF is " + reader.docFreq(term) + " vs " + st.termState.docFreq() + " term=" + term;
addClause(q, term, st.termState.docFreq(), query.getBoost() * st.boost, st.termState); // add to query
}
return q;

View File

@ -23,7 +23,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.FieldCache.CacheEntry;
@ -146,9 +146,6 @@ public final class FieldCacheSanityChecker {
insanity.addAll(checkValueMismatch(valIdToItems,
readerFieldToValIds,
valMismatchKeys));
insanity.addAll(checkSubreaders(valIdToItems,
readerFieldToValIds));
return insanity.toArray(new Insanity[insanity.size()]);
}
@ -189,107 +186,6 @@ public final class FieldCacheSanityChecker {
return insanity;
}
/**
* Internal helper method used by check that iterates over
* the keys of readerFieldToValIds and generates a Collection
* of Insanity instances whenever two (or more) ReaderField instances are
* found that have an ancestry relationships.
*
* @see InsanityType#SUBREADER
*/
private Collection<Insanity> checkSubreaders( MapOfSets<Integer, CacheEntry> valIdToItems,
MapOfSets<ReaderField, Integer> readerFieldToValIds) {
final List<Insanity> insanity = new ArrayList<Insanity>(23);
Map<ReaderField, Set<ReaderField>> badChildren = new HashMap<ReaderField, Set<ReaderField>>(17);
MapOfSets<ReaderField, ReaderField> badKids = new MapOfSets<ReaderField, ReaderField>(badChildren); // wrapper
Map<Integer, Set<CacheEntry>> viToItemSets = valIdToItems.getMap();
Map<ReaderField, Set<Integer>> rfToValIdSets = readerFieldToValIds.getMap();
Set<ReaderField> seen = new HashSet<ReaderField>(17);
Set<ReaderField> readerFields = rfToValIdSets.keySet();
for (final ReaderField rf : readerFields) {
if (seen.contains(rf)) continue;
List<Object> kids = getAllDescendantReaderKeys(rf.readerKey);
for (Object kidKey : kids) {
ReaderField kid = new ReaderField(kidKey, rf.fieldName);
if (badChildren.containsKey(kid)) {
// we've already process this kid as RF and found other problems
// track those problems as our own
badKids.put(rf, kid);
badKids.putAll(rf, badChildren.get(kid));
badChildren.remove(kid);
} else if (rfToValIdSets.containsKey(kid)) {
// we have cache entries for the kid
badKids.put(rf, kid);
}
seen.add(kid);
}
seen.add(rf);
}
// every mapping in badKids represents an Insanity
for (final ReaderField parent : badChildren.keySet()) {
Set<ReaderField> kids = badChildren.get(parent);
List<CacheEntry> badEntries = new ArrayList<CacheEntry>(kids.size() * 2);
// put parent entr(ies) in first
{
for (final Integer value : rfToValIdSets.get(parent)) {
badEntries.addAll(viToItemSets.get(value));
}
}
// now the entries for the descendants
for (final ReaderField kid : kids) {
for (final Integer value : rfToValIdSets.get(kid)) {
badEntries.addAll(viToItemSets.get(value));
}
}
CacheEntry[] badness = new CacheEntry[badEntries.size()];
badness = badEntries.toArray(badness);
insanity.add(new Insanity(InsanityType.SUBREADER,
"Found caches for descendants of " +
parent.toString(),
badness));
}
return insanity;
}
/**
* Checks if the seed is an IndexReader, and if so will walk
* the hierarchy of subReaders building up a list of the objects
* returned by obj.getFieldCacheKey()
*/
private List<Object> getAllDescendantReaderKeys(Object seed) {
List<Object> all = new ArrayList<Object>(17); // will grow as we iter
all.add(seed);
for (int i = 0; i < all.size(); i++) {
Object obj = all.get(i);
if (obj instanceof IndexReader) {
IndexReader[] subs = ((IndexReader)obj).getSequentialSubReaders();
for (int j = 0; (null != subs) && (j < subs.length); j++) {
all.add(subs[j].getCoreCacheKey());
}
}
}
// need to skip the first, because it was the seed
return all.subList(1, all.size());
}
/**
* Simple pair object for using "readerKey + fieldName" a Map key
*/

View File

@ -25,6 +25,8 @@ import java.io.IOException;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.AtomicIndexReader;
import org.apache.lucene.index.CompositeIndexReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader.CompositeReaderContext;
@ -68,11 +70,11 @@ public final class ReaderUtil {
* @param reader
*/
public static void gatherSubReaders(final List<IndexReader> allSubReaders, IndexReader reader) {
public static void gatherSubReaders(final List<AtomicIndexReader> allSubReaders, IndexReader reader) {
try {
new Gather(reader) {
@Override
protected void add(int base, IndexReader r) {
protected void add(int base, AtomicIndexReader r) {
allSubReaders.add(r);
}
}.run();
@ -103,13 +105,13 @@ public final class ReaderUtil {
}
private int run(int base, IndexReader reader) throws IOException {
IndexReader[] subReaders = reader.getSequentialSubReaders();
if (subReaders == null) {
if (reader instanceof AtomicIndexReader) {
// atomic reader
add(base, reader);
add(base, (AtomicIndexReader) reader);
base += reader.maxDoc();
} else {
// composite reader
assert reader instanceof CompositeIndexReader : "must be a composite reader";
IndexReader[] subReaders = ((CompositeIndexReader) reader).getSequentialSubReaders();
for (int i = 0; i < subReaders.length; i++) {
base = run(base, subReaders[i]);
}
@ -118,46 +120,10 @@ public final class ReaderUtil {
return base;
}
protected abstract void add(int base, IndexReader r) throws IOException;
protected abstract void add(int base, AtomicIndexReader r) throws IOException;
}
/**
* Returns sub IndexReader that contains the given document id.
*
* @param doc id of document
* @param reader parent reader
* @return sub reader of parent which contains the specified doc id
*/
public static IndexReader subReader(int doc, IndexReader reader) {
List<IndexReader> subReadersList = new ArrayList<IndexReader>();
ReaderUtil.gatherSubReaders(subReadersList, reader);
IndexReader[] subReaders = subReadersList
.toArray(new IndexReader[subReadersList.size()]);
int[] docStarts = new int[subReaders.length];
int maxDoc = 0;
for (int i = 0; i < subReaders.length; i++) {
docStarts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc();
}
return subReaders[subIndex(doc, docStarts)];
}
/**
* Returns sub-reader subIndex from reader.
*
* @param reader parent reader
* @param subIndex index of desired sub reader
* @return the subreader at subIndex
*/
public static IndexReader subReader(IndexReader reader, int subIndex) {
List<IndexReader> subReadersList = new ArrayList<IndexReader>();
ReaderUtil.gatherSubReaders(subReadersList, reader);
IndexReader[] subReaders = subReadersList
.toArray(new IndexReader[subReadersList.size()]);
return subReaders[subIndex];
}
public static ReaderContext buildReaderContext(IndexReader reader) {
public static ReaderContext<? extends IndexReader> buildReaderContext(IndexReader reader) {
return new ReaderContextBuilder(reader).build();
}
@ -171,24 +137,25 @@ public final class ReaderUtil {
leaves = new AtomicReaderContext[numLeaves(reader)];
}
public ReaderContext build() {
public ReaderContext<? extends IndexReader> build() {
return build(null, reader, 0, 0);
}
private ReaderContext build(CompositeReaderContext parent, IndexReader reader, int ord, int docBase) {
IndexReader[] sequentialSubReaders = reader.getSequentialSubReaders();
if (sequentialSubReaders == null) {
AtomicReaderContext atomic = new AtomicReaderContext(parent, reader, ord, docBase, leafOrd, leafDocBase);
private ReaderContext<? extends IndexReader> build(CompositeReaderContext parent, IndexReader reader, int ord, int docBase) {
if (reader instanceof AtomicIndexReader) {
AtomicReaderContext atomic = new AtomicReaderContext(parent, (AtomicIndexReader) reader, ord, docBase, leafOrd, leafDocBase);
leaves[leafOrd++] = atomic;
leafDocBase += reader.maxDoc();
return atomic;
} else {
ReaderContext[] children = new ReaderContext[sequentialSubReaders.length];
CompositeIndexReader cr = (CompositeIndexReader) reader;
IndexReader[] sequentialSubReaders = cr.getSequentialSubReaders();
@SuppressWarnings({"unchecked","rawtypes"}) ReaderContext<? extends IndexReader>[] children = new ReaderContext[sequentialSubReaders.length];
final CompositeReaderContext newParent;
if (parent == null) {
newParent = new CompositeReaderContext(reader, children, leaves);
newParent = new CompositeReaderContext(cr, children, leaves);
} else {
newParent = new CompositeReaderContext(parent, reader, ord, docBase, children);
newParent = new CompositeReaderContext(parent, cr, ord, docBase, children);
}
int newDocBase = 0;
@ -205,7 +172,7 @@ public final class ReaderUtil {
try {
new Gather(reader) {
@Override
protected void add(int base, IndexReader r) {
protected void add(int base, AtomicIndexReader r) {
numLeaves[0]++;
}
}.run();
@ -224,11 +191,10 @@ public final class ReaderUtil {
* <code>null</code> the given context must be an instance of
* {@link AtomicReaderContext}
*/
public static AtomicReaderContext[] leaves(ReaderContext context) {
public static AtomicReaderContext[] leaves(ReaderContext<? extends IndexReader> context) {
assert context != null && context.isTopLevel : "context must be non-null & top-level";
final AtomicReaderContext[] leaves = context.leaves();
if (leaves == null) {
assert context.isAtomic : "top-level context without leaves must be atomic";
return new AtomicReaderContext[] { (AtomicReaderContext) context };
}
return leaves;
@ -238,7 +204,7 @@ public final class ReaderUtil {
* Walks up the reader tree and return the given context's top level reader
* context, or in other words the reader tree's root context.
*/
public static ReaderContext getTopLevelContext(ReaderContext context) {
public static ReaderContext getTopLevelContext(ReaderContext<? extends IndexReader> context) {
while (context.parent != null) {
context = context.parent;
}
@ -310,10 +276,10 @@ public final class ReaderUtil {
/** Call this to get the (merged) FieldInfos for a
* composite reader */
public static FieldInfos getMergedFieldInfos(IndexReader reader) {
final List<IndexReader> subReaders = new ArrayList<IndexReader>();
final List<AtomicIndexReader> subReaders = new ArrayList<AtomicIndexReader>();
ReaderUtil.gatherSubReaders(subReaders, reader);
final FieldInfos fieldInfos = new FieldInfos();
for(IndexReader subReader : subReaders) {
for(AtomicIndexReader subReader : subReaders) {
fieldInfos.add(subReader.getFieldInfos());
}
return fieldInfos;