mirror of https://github.com/apache/lucene.git
LUCENE-3633: Remove code duplication in MultiReader and DirectoryReader; make everything finally final :-)
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1212844 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
00e5695f20
commit
764059147f
|
@ -0,0 +1,184 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.codecs.PerDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
|
||||
abstract class BaseMultiReader<R extends IndexReader> extends IndexReader implements Cloneable {
|
||||
protected final R[] subReaders;
|
||||
protected final int[] starts; // 1st docno for each segment
|
||||
private final ReaderContext topLevelContext;
|
||||
private final int maxDoc;
|
||||
private final int numDocs;
|
||||
private final boolean hasDeletions;
|
||||
|
||||
protected BaseMultiReader(R[] subReaders) throws IOException {
|
||||
this.subReaders = subReaders;
|
||||
starts = new int[subReaders.length + 1]; // build starts array
|
||||
int maxDoc = 0, numDocs = 0;
|
||||
boolean hasDeletions = false;
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
starts[i] = maxDoc;
|
||||
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
|
||||
numDocs += subReaders[i].numDocs(); // compute numDocs
|
||||
|
||||
if (subReaders[i].hasDeletions()) {
|
||||
hasDeletions = true;
|
||||
}
|
||||
}
|
||||
starts[subReaders.length] = maxDoc;
|
||||
this.maxDoc = maxDoc;
|
||||
this.numDocs = numDocs;
|
||||
this.hasDeletions = hasDeletions;
|
||||
topLevelContext = ReaderUtil.buildReaderContext(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields fields() throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected abstract IndexReader doOpenIfChanged() throws CorruptIndexException, IOException;
|
||||
|
||||
@Override
|
||||
public abstract Object clone();
|
||||
|
||||
@Override
|
||||
public Bits getLiveDocs() {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getLiveDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits liveDocs");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields getTermVectors(int docID) throws IOException {
|
||||
ensureOpen();
|
||||
int i = readerIndex(docID); // find segment num
|
||||
return subReaders[i].getTermVectors(docID - starts[i]); // dispatch to segment
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numDocs() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
return numDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int maxDoc() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
return maxDoc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
int i = readerIndex(docID); // find segment num
|
||||
subReaders[i].document(docID - starts[i], visitor); // dispatch to segment reader
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
ensureOpen();
|
||||
return hasDeletions;
|
||||
}
|
||||
|
||||
/** Helper method for subclasses to get the corresponding reader for a doc ID */
|
||||
protected final int readerIndex(int n) { // find reader for doc n:
|
||||
return readerIndex(n, this.starts, this.subReaders.length);
|
||||
}
|
||||
|
||||
final static int readerIndex(int n, int[] starts, int numSubReaders) { // find reader for doc n:
|
||||
int lo = 0; // search starts array
|
||||
int hi = numSubReaders - 1; // for first element less
|
||||
|
||||
while (hi >= lo) {
|
||||
int mid = (lo + hi) >>> 1;
|
||||
int midValue = starts[mid];
|
||||
if (n < midValue)
|
||||
hi = mid - 1;
|
||||
else if (n > midValue)
|
||||
lo = mid + 1;
|
||||
else { // found a match
|
||||
while (mid+1 < numSubReaders && starts[mid+1] == midValue) {
|
||||
mid++; // scan to last match
|
||||
}
|
||||
return mid;
|
||||
}
|
||||
}
|
||||
return hi;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNorms(String field) throws IOException {
|
||||
ensureOpen();
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
if (subReaders[i].hasNorms(field)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized byte[] norms(String field) throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(String field, BytesRef t) throws IOException {
|
||||
ensureOpen();
|
||||
int total = 0; // sum freqs in segments
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
total += subReaders[i].docFreq(field, t);
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
|
||||
ensureOpen();
|
||||
// maintain a unique set of field names
|
||||
Set<String> fieldSet = new HashSet<String>();
|
||||
for (IndexReader reader : subReaders) {
|
||||
Collection<String> names = reader.getFieldNames(fieldNames);
|
||||
fieldSet.addAll(names);
|
||||
}
|
||||
return fieldSet;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexReader[] getSequentialSubReaders() {
|
||||
return subReaders;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReaderContext getTopReaderContext() {
|
||||
ensureOpen();
|
||||
return topLevelContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PerDocValues perDocValues() throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiPerDocValues#getPerDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
|
||||
}
|
||||
}
|
|
@ -23,99 +23,76 @@ import java.util.ArrayList;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.index.codecs.PerDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
import org.apache.lucene.util.MapBackedSet;
|
||||
|
||||
/**
|
||||
* An IndexReader which reads indexes with multiple segments.
|
||||
*/
|
||||
class DirectoryReader extends IndexReader implements Cloneable {
|
||||
protected Directory directory;
|
||||
|
||||
IndexWriter writer;
|
||||
|
||||
final class DirectoryReader extends BaseMultiReader<SegmentReader> {
|
||||
protected final Directory directory;
|
||||
private final IndexWriter writer;
|
||||
private final SegmentInfos segmentInfos;
|
||||
private final int termInfosIndexDivisor;
|
||||
|
||||
private SegmentReader[] subReaders;
|
||||
private ReaderContext topLevelReaderContext;
|
||||
private int[] starts; // 1st docno for each segment
|
||||
private int maxDoc = 0;
|
||||
private int numDocs = -1;
|
||||
private boolean hasDeletions = false;
|
||||
|
||||
private final boolean applyAllDeletes;
|
||||
|
||||
DirectoryReader(SegmentReader[] readers, Directory directory, IndexWriter writer,
|
||||
SegmentInfos sis, int termInfosIndexDivisor, boolean applyAllDeletes,
|
||||
Collection<ReaderFinishedListener> readerFinishedListeners
|
||||
) throws IOException {
|
||||
super(readers);
|
||||
this.directory = directory;
|
||||
this.writer = writer;
|
||||
this.segmentInfos = sis;
|
||||
this.termInfosIndexDivisor = termInfosIndexDivisor;
|
||||
this.readerFinishedListeners = readerFinishedListeners;
|
||||
this.applyAllDeletes = applyAllDeletes;
|
||||
}
|
||||
|
||||
static IndexReader open(final Directory directory, final IndexCommit commit,
|
||||
final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
|
||||
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
|
||||
@Override
|
||||
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
|
||||
SegmentInfos infos = new SegmentInfos();
|
||||
infos.read(directory, segmentFileName);
|
||||
return new DirectoryReader(directory, infos, termInfosIndexDivisor);
|
||||
}
|
||||
}.run(commit);
|
||||
}
|
||||
|
||||
/** Construct reading the named set of readers. */
|
||||
DirectoryReader(Directory directory, SegmentInfos sis, int termInfosIndexDivisor) throws IOException {
|
||||
this.directory = directory;
|
||||
this.segmentInfos = sis;
|
||||
this.termInfosIndexDivisor = termInfosIndexDivisor;
|
||||
readerFinishedListeners = new MapBackedSet<ReaderFinishedListener>(new ConcurrentHashMap<ReaderFinishedListener,Boolean>());
|
||||
applyAllDeletes = false;
|
||||
|
||||
// To reduce the chance of hitting FileNotFound
|
||||
// (and having to retry), we open segments in
|
||||
// reverse because IndexWriter merges & deletes
|
||||
// the newest segments first.
|
||||
|
||||
SegmentReader[] readers = new SegmentReader[sis.size()];
|
||||
for (int i = sis.size()-1; i >= 0; i--) {
|
||||
boolean success = false;
|
||||
try {
|
||||
readers[i] = SegmentReader.get(sis.info(i), termInfosIndexDivisor, IOContext.READ);
|
||||
readers[i].readerFinishedListeners = readerFinishedListeners;
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
// Close all readers we had opened:
|
||||
for(i++;i<sis.size();i++) {
|
||||
try {
|
||||
readers[i].close();
|
||||
} catch (Throwable ignore) {
|
||||
// keep going - we want to clean up as much as possible
|
||||
final Collection<ReaderFinishedListener> readerFinishedListeners =
|
||||
new MapBackedSet<ReaderFinishedListener>(new ConcurrentHashMap<ReaderFinishedListener,Boolean>());
|
||||
SegmentInfos sis = new SegmentInfos();
|
||||
sis.read(directory, segmentFileName);
|
||||
final SegmentReader[] readers = new SegmentReader[sis.size()];
|
||||
for (int i = sis.size()-1; i >= 0; i--) {
|
||||
boolean success = false;
|
||||
try {
|
||||
readers[i] = SegmentReader.get(sis.info(i), termInfosIndexDivisor, IOContext.READ);
|
||||
readers[i].readerFinishedListeners = readerFinishedListeners;
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
// Close all readers we had opened:
|
||||
for(i++;i<sis.size();i++) {
|
||||
try {
|
||||
readers[i].close();
|
||||
} catch (Throwable ignore) {
|
||||
// keep going - we want to clean up as much as possible
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return new DirectoryReader(readers, directory, null, sis, termInfosIndexDivisor,
|
||||
false, readerFinishedListeners);
|
||||
}
|
||||
}
|
||||
|
||||
initialize(readers);
|
||||
}.run(commit);
|
||||
}
|
||||
|
||||
// Used by near real-time search
|
||||
DirectoryReader(IndexWriter writer, SegmentInfos infos, boolean applyAllDeletes) throws IOException {
|
||||
this.directory = writer.getDirectory();
|
||||
this.applyAllDeletes = applyAllDeletes; // saved for reopen
|
||||
|
||||
this.termInfosIndexDivisor = writer.getConfig().getReaderTermsIndexDivisor();
|
||||
readerFinishedListeners = writer.getReaderFinishedListeners();
|
||||
|
||||
static DirectoryReader open(IndexWriter writer, SegmentInfos infos, boolean applyAllDeletes) throws IOException {
|
||||
// IndexWriter synchronizes externally before calling
|
||||
// us, which ensures infos will not change; so there's
|
||||
// no need to process segments in reverse order
|
||||
|
@ -124,7 +101,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
List<SegmentReader> readers = new ArrayList<SegmentReader>();
|
||||
final Directory dir = writer.getDirectory();
|
||||
|
||||
segmentInfos = (SegmentInfos) infos.clone();
|
||||
final SegmentInfos segmentInfos = (SegmentInfos) infos.clone();
|
||||
int infosUpto = 0;
|
||||
for (int i=0;i<numSegments;i++) {
|
||||
boolean success = false;
|
||||
|
@ -133,7 +110,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
assert info.dir == dir;
|
||||
final SegmentReader reader = writer.readerPool.getReadOnlyClone(info, IOContext.READ);
|
||||
if (reader.numDocs() > 0 || writer.getKeepFullyDeletedSegments()) {
|
||||
reader.readerFinishedListeners = readerFinishedListeners;
|
||||
reader.readerFinishedListeners = writer.getReaderFinishedListeners();
|
||||
readers.add(reader);
|
||||
infosUpto++;
|
||||
} else {
|
||||
|
@ -154,24 +131,18 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.writer = writer;
|
||||
|
||||
initialize(readers.toArray(new SegmentReader[readers.size()]));
|
||||
return new DirectoryReader(readers.toArray(new SegmentReader[readers.size()]),
|
||||
dir, writer, segmentInfos, writer.getConfig().getReaderTermsIndexDivisor(),
|
||||
applyAllDeletes, writer.getReaderFinishedListeners());
|
||||
}
|
||||
|
||||
/** This constructor is only used for {@link #doOpenIfChanged()} */
|
||||
DirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders,
|
||||
boolean doClone, int termInfosIndexDivisor, Collection<ReaderFinishedListener> readerFinishedListeners) throws IOException {
|
||||
this.directory = directory;
|
||||
this.segmentInfos = infos;
|
||||
this.termInfosIndexDivisor = termInfosIndexDivisor;
|
||||
this.readerFinishedListeners = readerFinishedListeners;
|
||||
applyAllDeletes = false;
|
||||
|
||||
static DirectoryReader open(Directory directory, IndexWriter writer, SegmentInfos infos, SegmentReader[] oldReaders,
|
||||
boolean doClone, int termInfosIndexDivisor, Collection<ReaderFinishedListener> readerFinishedListeners
|
||||
) throws IOException {
|
||||
// we put the old SegmentReaders in a map, that allows us
|
||||
// to lookup a reader using its segment name
|
||||
Map<String,Integer> segmentReaders = new HashMap<String,Integer>();
|
||||
final Map<String,Integer> segmentReaders = new HashMap<String,Integer>();
|
||||
|
||||
if (oldReaders != null) {
|
||||
// create a Map SegmentName->SegmentReader
|
||||
|
@ -247,9 +218,9 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// initialize the readers to calculate maxDoc before we try to reuse the old normsCache
|
||||
initialize(newReaders);
|
||||
return new DirectoryReader(newReaders,
|
||||
directory, writer, infos, termInfosIndexDivisor,
|
||||
false, readerFinishedListeners);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
|
@ -273,41 +244,10 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
return buffer.toString();
|
||||
}
|
||||
|
||||
private void initialize(SegmentReader[] subReaders) throws IOException {
|
||||
this.subReaders = subReaders;
|
||||
starts = new int[subReaders.length + 1]; // build starts array
|
||||
final AtomicReaderContext[] subReaderCtx = new AtomicReaderContext[subReaders.length];
|
||||
topLevelReaderContext = new CompositeReaderContext(this, subReaderCtx, subReaderCtx);
|
||||
final List<Fields> subFields = new ArrayList<Fields>();
|
||||
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
starts[i] = maxDoc;
|
||||
subReaderCtx[i] = new AtomicReaderContext(topLevelReaderContext, subReaders[i], i, maxDoc, i, maxDoc);
|
||||
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
|
||||
|
||||
if (subReaders[i].hasDeletions()) {
|
||||
hasDeletions = true;
|
||||
}
|
||||
|
||||
final Fields f = subReaders[i].fields();
|
||||
if (f != null) {
|
||||
subFields.add(f);
|
||||
}
|
||||
}
|
||||
starts[subReaders.length] = maxDoc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bits getLiveDocs() {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getLiveDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits liveDocs");
|
||||
}
|
||||
|
||||
@Override
|
||||
public final synchronized Object clone() {
|
||||
try {
|
||||
DirectoryReader newReader = doOpenIfChanged((SegmentInfos) segmentInfos.clone(), true);
|
||||
newReader.writer = writer;
|
||||
newReader.hasDeletions = hasDeletions;
|
||||
DirectoryReader newReader = doOpenIfChanged((SegmentInfos) segmentInfos.clone(), true, writer);
|
||||
assert newReader.readerFinishedListeners != null;
|
||||
return newReader;
|
||||
} catch (Exception ex) {
|
||||
|
@ -385,13 +325,13 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
|
||||
final SegmentInfos infos = new SegmentInfos();
|
||||
infos.read(directory, segmentFileName);
|
||||
return doOpenIfChanged(infos, false);
|
||||
return doOpenIfChanged(infos, false, null);
|
||||
}
|
||||
}.run(commit);
|
||||
}
|
||||
|
||||
private synchronized DirectoryReader doOpenIfChanged(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException {
|
||||
return new DirectoryReader(directory, infos, subReaders, doClone, termInfosIndexDivisor, readerFinishedListeners);
|
||||
private synchronized DirectoryReader doOpenIfChanged(SegmentInfos infos, boolean doClone, IndexWriter writer) throws CorruptIndexException, IOException {
|
||||
return DirectoryReader.open(directory, writer, infos, subReaders, doClone, termInfosIndexDivisor, readerFinishedListeners);
|
||||
}
|
||||
|
||||
/** Version number when this IndexReader was opened. */
|
||||
|
@ -401,118 +341,12 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
return segmentInfos.getVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields getTermVectors(int docID) throws IOException {
|
||||
ensureOpen();
|
||||
int i = readerIndex(docID); // find segment num
|
||||
return subReaders[i].getTermVectors(docID - starts[i]); // dispatch to segment
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numDocs() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
|
||||
// NOTE: multiple threads may wind up init'ing
|
||||
// numDocs... but that's harmless
|
||||
if (numDocs == -1) { // check cache
|
||||
int n = 0; // cache miss--recompute
|
||||
for (int i = 0; i < subReaders.length; i++)
|
||||
n += subReaders[i].numDocs(); // sum from readers
|
||||
numDocs = n;
|
||||
}
|
||||
return numDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int maxDoc() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
return maxDoc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
int i = readerIndex(docID); // find segment num
|
||||
subReaders[i].document(docID - starts[i], visitor); // dispatch to segment reader
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
ensureOpen();
|
||||
return hasDeletions;
|
||||
}
|
||||
|
||||
private int readerIndex(int n) { // find reader for doc n:
|
||||
return readerIndex(n, this.starts, this.subReaders.length);
|
||||
}
|
||||
|
||||
final static int readerIndex(int n, int[] starts, int numSubReaders) { // find reader for doc n:
|
||||
int lo = 0; // search starts array
|
||||
int hi = numSubReaders - 1; // for first element less
|
||||
|
||||
while (hi >= lo) {
|
||||
int mid = (lo + hi) >>> 1;
|
||||
int midValue = starts[mid];
|
||||
if (n < midValue)
|
||||
hi = mid - 1;
|
||||
else if (n > midValue)
|
||||
lo = mid + 1;
|
||||
else { // found a match
|
||||
while (mid+1 < numSubReaders && starts[mid+1] == midValue) {
|
||||
mid++; // scan to last match
|
||||
}
|
||||
return mid;
|
||||
}
|
||||
}
|
||||
return hi;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNorms(String field) throws IOException {
|
||||
ensureOpen();
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
if (subReaders[i].hasNorms(field)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized byte[] norms(String field) throws IOException {
|
||||
ensureOpen();
|
||||
throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(String field, BytesRef term) throws IOException {
|
||||
ensureOpen();
|
||||
int total = 0; // sum freqs in segments
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
total += subReaders[i].docFreq(field, term);
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields fields() throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String,String> getCommitUserData() {
|
||||
ensureOpen();
|
||||
return segmentInfos.getUserData();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether this IndexReader is still using the current (i.e., most recently committed) version of the index. If
|
||||
* a writer has committed any changes to the index since this reader was opened, this will return <code>false</code>,
|
||||
* in which case you must open a new IndexReader in order
|
||||
* to see the changes. Use {@link IndexWriter#commit} to
|
||||
* commit changes to the index.
|
||||
*
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
@Override
|
||||
public boolean isCurrent() throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
|
@ -546,33 +380,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
if (ioe != null) throw ioe;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
|
||||
ensureOpen();
|
||||
return getFieldNames(fieldNames, this.subReaders);
|
||||
}
|
||||
|
||||
static Collection<String> getFieldNames (IndexReader.FieldOption fieldNames, IndexReader[] subReaders) {
|
||||
// maintain a unique set of field names
|
||||
Set<String> fieldSet = new HashSet<String>();
|
||||
for (IndexReader reader : subReaders) {
|
||||
Collection<String> names = reader.getFieldNames(fieldNames);
|
||||
fieldSet.addAll(names);
|
||||
}
|
||||
return fieldSet;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReaderContext getTopReaderContext() {
|
||||
ensureOpen();
|
||||
return topLevelReaderContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexReader[] getSequentialSubReaders() {
|
||||
return subReaders;
|
||||
}
|
||||
|
||||
/** Returns the directory this index resides in. */
|
||||
@Override
|
||||
public Directory directory() {
|
||||
|
@ -715,9 +522,4 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
throw new UnsupportedOperationException("This IndexCommit does not support deletions");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public PerDocValues perDocValues() throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiPerDocValues#getPerDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -362,7 +362,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
// just like we do when loading segments_N
|
||||
synchronized(this) {
|
||||
maybeApplyDeletes(applyAllDeletes);
|
||||
r = new DirectoryReader(this, segmentInfos, applyAllDeletes);
|
||||
r = DirectoryReader.open(this, segmentInfos, applyAllDeletes);
|
||||
if (infoStream.isEnabled("IW")) {
|
||||
infoStream.message("IW", "return reader version=" + r.getVersion() + " reader=" + r);
|
||||
}
|
||||
|
|
|
@ -18,26 +18,16 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.lucene.index.codecs.PerDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.apache.lucene.util.MapBackedSet;
|
||||
|
||||
/** An IndexReader which reads multiple indexes, appending
|
||||
* their content. */
|
||||
public class MultiReader extends IndexReader implements Cloneable {
|
||||
protected IndexReader[] subReaders;
|
||||
private final ReaderContext topLevelContext;
|
||||
private int[] starts; // 1st docno for each segment
|
||||
private boolean[] decrefOnClose; // remember which subreaders to decRef on close
|
||||
private int maxDoc = 0;
|
||||
private int numDocs = -1;
|
||||
private boolean hasDeletions = false;
|
||||
public class MultiReader extends BaseMultiReader<IndexReader> {
|
||||
private boolean[] decrefOnClose; // remember which subreaders to decRef on close
|
||||
|
||||
/**
|
||||
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
|
||||
|
@ -45,81 +35,34 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
* @param subReaders set of (sub)readers
|
||||
*/
|
||||
public MultiReader(IndexReader... subReaders) throws IOException {
|
||||
topLevelContext = initialize(subReaders, true);
|
||||
this(subReaders, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
|
||||
* @param subReaders set of (sub)readers
|
||||
* @param closeSubReaders indicates whether the subreaders should be closed
|
||||
* when this MultiReader is closed
|
||||
* @param subReaders set of (sub)readers
|
||||
*/
|
||||
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) throws IOException {
|
||||
topLevelContext = initialize(subReaders, closeSubReaders);
|
||||
}
|
||||
|
||||
private ReaderContext initialize(IndexReader[] subReaders, boolean closeSubReaders) throws IOException {
|
||||
this.subReaders = subReaders.clone();
|
||||
starts = new int[subReaders.length + 1]; // build starts array
|
||||
super(subReaders.clone());
|
||||
this.readerFinishedListeners = new MapBackedSet<ReaderFinishedListener>(new ConcurrentHashMap<ReaderFinishedListener,Boolean>());
|
||||
decrefOnClose = new boolean[subReaders.length];
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
starts[i] = maxDoc;
|
||||
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
|
||||
|
||||
if (!closeSubReaders) {
|
||||
subReaders[i].incRef();
|
||||
decrefOnClose[i] = true;
|
||||
} else {
|
||||
decrefOnClose[i] = false;
|
||||
}
|
||||
|
||||
if (subReaders[i].hasDeletions()) {
|
||||
hasDeletions = true;
|
||||
}
|
||||
}
|
||||
starts[subReaders.length] = maxDoc;
|
||||
readerFinishedListeners = new MapBackedSet<ReaderFinishedListener>(new ConcurrentHashMap<ReaderFinishedListener,Boolean>());
|
||||
return ReaderUtil.buildReaderContext(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields fields() throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getFields, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to reopen the subreaders.
|
||||
* <br>
|
||||
* If one or more subreaders could be re-opened (i. e. IndexReader.openIfChanged(subReader)
|
||||
* returned a new instance), then a new MultiReader instance
|
||||
* is returned, otherwise this instance is returned.
|
||||
* <p>
|
||||
* A re-opened instance might share one or more subreaders with the old
|
||||
* instance. Index modification operations result in undefined behavior
|
||||
* when performed before the old instance is closed.
|
||||
* (see {@link IndexReader#openIfChanged}).
|
||||
* <p>
|
||||
* If subreaders are shared, then the reference count of those
|
||||
* readers is increased to ensure that the subreaders remain open
|
||||
* until the last referring reader is closed.
|
||||
*
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
@Override
|
||||
protected synchronized IndexReader doOpenIfChanged() throws CorruptIndexException, IOException {
|
||||
return doReopen(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clones the subreaders.
|
||||
* (see {@link IndexReader#clone()}).
|
||||
* <br>
|
||||
* <p>
|
||||
* If subreaders are shared, then the reference count of those
|
||||
* readers is increased to ensure that the subreaders remain open
|
||||
* until the last referring reader is closed.
|
||||
*/
|
||||
@Override
|
||||
public synchronized Object clone() {
|
||||
try {
|
||||
|
@ -128,19 +71,7 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
throw new RuntimeException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bits getLiveDocs() {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getLiveDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits liveDocs");
|
||||
}
|
||||
|
||||
/**
|
||||
* If clone is true then we clone each of the subreaders
|
||||
* @param doClone
|
||||
* @return New IndexReader, or null if open/clone is not necessary
|
||||
* @throws CorruptIndexException
|
||||
* @throws IOException
|
||||
*/
|
||||
private IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
|
||||
|
@ -194,74 +125,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields getTermVectors(int docID) throws IOException {
|
||||
ensureOpen();
|
||||
int i = readerIndex(docID); // find segment num
|
||||
return subReaders[i].getTermVectors(docID - starts[i]); // dispatch to segment
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numDocs() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
// NOTE: multiple threads may wind up init'ing
|
||||
// numDocs... but that's harmless
|
||||
if (numDocs == -1) { // check cache
|
||||
int n = 0; // cache miss--recompute
|
||||
for (int i = 0; i < subReaders.length; i++)
|
||||
n += subReaders[i].numDocs(); // sum from readers
|
||||
numDocs = n;
|
||||
}
|
||||
return numDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int maxDoc() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
return maxDoc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
int i = readerIndex(docID); // find segment num
|
||||
subReaders[i].document(docID - starts[i], visitor); // dispatch to segment reader
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
ensureOpen();
|
||||
return hasDeletions;
|
||||
}
|
||||
|
||||
private int readerIndex(int n) { // find reader for doc n:
|
||||
return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNorms(String field) throws IOException {
|
||||
ensureOpen();
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
if (subReaders[i].hasNorms(field)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized byte[] norms(String field) throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiNorms.norms, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level norms");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq(String field, BytesRef t) throws IOException {
|
||||
ensureOpen();
|
||||
int total = 0; // sum freqs in segments
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
total += subReaders[i].docFreq(field, t);
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void doClose() throws IOException {
|
||||
for (int i = 0; i < subReaders.length; i++) {
|
||||
|
@ -273,15 +136,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
|
||||
ensureOpen();
|
||||
return DirectoryReader.getFieldNames(fieldNames, this.subReaders);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks recursively if all subreaders are up to date.
|
||||
*/
|
||||
@Override
|
||||
public boolean isCurrent() throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
|
@ -302,17 +156,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
public long getVersion() {
|
||||
throw new UnsupportedOperationException("MultiReader does not support this method.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexReader[] getSequentialSubReaders() {
|
||||
return subReaders;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReaderContext getTopReaderContext() {
|
||||
ensureOpen();
|
||||
return topLevelContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addReaderFinishedListener(ReaderFinishedListener listener) {
|
||||
|
@ -330,8 +173,4 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public PerDocValues perDocValues() throws IOException {
|
||||
throw new UnsupportedOperationException("please use MultiPerDocValues#getPerDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Fields");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -337,13 +337,18 @@ public class TestDocSet extends LuceneTestCase {
|
|||
***/
|
||||
|
||||
public IndexReader dummyIndexReader(final int maxDoc) {
|
||||
|
||||
// TODO FIXME: THIS IS HEAVY BROKEN AND ILLEGAL TO DO (null delegate):
|
||||
IndexReader r = new FilterIndexReader(null) {
|
||||
@Override
|
||||
public int maxDoc() {
|
||||
return maxDoc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numDocs() {
|
||||
return maxDoc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
return false;
|
||||
|
|
Loading…
Reference in New Issue