mirror of https://github.com/apache/lucene.git
LUCENE-2600: remove IR.isDeleted in favor of getDeletedDocs(); don't cache MultiBits in IR
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@987961 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1601382d0c
commit
6a4bfc796f
|
@ -105,6 +105,9 @@ Changes in backwards compatibility policy
|
|||
calling setCalibrateSizeByDeletes(false) on the merge policy. (Mike
|
||||
McCandless)
|
||||
|
||||
* LUCENE-2600: Remove IndexReader.isDeleted in favor of
|
||||
IndexReader.getDeletedDocs(). (Mike McCandless)
|
||||
|
||||
API Changes
|
||||
|
||||
* LUCENE-2302, LUCENE-1458, LUCENE-2111, LUCENE-2514: Terms are no longer
|
||||
|
|
|
@ -266,3 +266,15 @@ LUCENE-1458, LUCENE-2111: Flexible Indexing
|
|||
|
||||
Likewise for DocsAndPositionsEnum.
|
||||
|
||||
* LUCENE-2600: remove IndexReader.isDeleted
|
||||
|
||||
Instead of IndexReader.isDeleted, do this:
|
||||
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
|
||||
Bits delDocs = MultiFields.getDeletedDocs(indexReader);
|
||||
if (delDocs.get(docID)) {
|
||||
// document is deleted...
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -95,8 +96,9 @@ public abstract class ReadTask extends PerfTask {
|
|||
// optionally warm and add num docs traversed to count
|
||||
if (withWarm()) {
|
||||
Document doc = null;
|
||||
Bits delDocs = reader.getDeletedDocs();
|
||||
for (int m = 0; m < reader.maxDoc(); m++) {
|
||||
if (!reader.isDeleted(m)) {
|
||||
if (!delDocs.get(m)) {
|
||||
doc = reader.document(m);
|
||||
res += (doc == null ? 0 : 1);
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.lucene.index.MultiFields;
|
|||
import org.apache.lucene.index.TermPositionVector;
|
||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.util.BitVector;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
|
@ -182,8 +183,9 @@ public class InstantiatedIndex
|
|||
}
|
||||
|
||||
// create documents
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(sourceIndexReader);
|
||||
for (int i = 0; i < sourceIndexReader.maxDoc(); i++) {
|
||||
if (sourceIndexReader.hasDeletions() && sourceIndexReader.isDeleted(i)) {
|
||||
if (delDocs != null && delDocs.get(i)) {
|
||||
deletedDocuments.set(i);
|
||||
} else {
|
||||
InstantiatedDocument document = new InstantiatedDocument();
|
||||
|
|
|
@ -152,14 +152,6 @@ public class InstantiatedIndexReader extends IndexReader {
|
|||
return index.getDeletedDocuments() != null || uncommittedDeletedDocuments != null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isDeleted(int n) {
|
||||
return (index.getDeletedDocuments() != null && index.getDeletedDocuments().get(n))
|
||||
|| (uncommittedDeletedDocuments != null && uncommittedDeletedDocuments.get(n));
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void doDelete(int docNum) throws IOException {
|
||||
|
||||
|
@ -313,7 +305,7 @@ public class InstantiatedIndexReader extends IndexReader {
|
|||
|
||||
@Override
|
||||
public Document document(int n) throws IOException {
|
||||
return isDeleted(n) ? null : getIndex().getDocumentsByNumber()[n].getDocument();
|
||||
return getIndex().getDocumentsByNumber()[n].getDocument();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.Payload;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
|
@ -46,6 +45,7 @@ import org.apache.lucene.store.MockRAMDirectory;
|
|||
import org.apache.lucene.util.AttributeImpl;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/**
|
||||
* Asserts equality of content and behaviour of two index readers.
|
||||
|
@ -303,8 +303,14 @@ public class TestIndicesEquals extends LuceneTestCase {
|
|||
assertEquals(air.numDocs(), tir.numDocs());
|
||||
assertEquals(air.numDeletedDocs(), tir.numDeletedDocs());
|
||||
|
||||
for (int d =0; d<air.maxDoc(); d++) {
|
||||
assertEquals(air.isDeleted(d), tir.isDeleted(d));
|
||||
final Bits aDelDocs = MultiFields.getDeletedDocs(air);
|
||||
final Bits tDelDocs = MultiFields.getDeletedDocs(tir);
|
||||
assertTrue((aDelDocs != null && tDelDocs != null) ||
|
||||
(aDelDocs == null && tDelDocs == null));
|
||||
if (aDelDocs != null) {
|
||||
for (int d =0; d<air.maxDoc(); d++) {
|
||||
assertEquals(aDelDocs.get(d), tDelDocs.get(d));
|
||||
}
|
||||
}
|
||||
|
||||
air.close();
|
||||
|
@ -378,11 +384,16 @@ public class TestIndicesEquals extends LuceneTestCase {
|
|||
assertEquals("norms does not equals for field " + field + " in document " + i, aprioriNorms[i], testNorms[i]);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for (int docIndex = 0; docIndex < aprioriReader.numDocs(); docIndex++) {
|
||||
assertEquals(aprioriReader.isDeleted(docIndex), testReader.isDeleted(docIndex));
|
||||
final Bits apDelDocs = MultiFields.getDeletedDocs(aprioriReader);
|
||||
final Bits testDelDocs = MultiFields.getDeletedDocs(testReader);
|
||||
assertTrue((apDelDocs != null && testDelDocs != null) ||
|
||||
(apDelDocs == null && testDelDocs == null));
|
||||
if (apDelDocs != null) {
|
||||
for (int docIndex = 0; docIndex < aprioriReader.numDocs(); docIndex++) {
|
||||
assertEquals(apDelDocs.get(docIndex), testDelDocs.get(docIndex));
|
||||
}
|
||||
}
|
||||
|
||||
// compare term enumeration stepping
|
||||
|
|
|
@ -1197,12 +1197,6 @@ public class MemoryIndex implements Serializable {
|
|||
return new Document(); // there are no stored fields
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDeleted(int n) {
|
||||
if (DEBUG) System.err.println("MemoryIndexReader.isDeleted");
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
if (DEBUG) System.err.println("MemoryIndexReader.hasDeletions");
|
||||
|
|
|
@ -183,8 +183,10 @@ public class MultiPassIndexSplitter {
|
|||
dels = new OpenBitSet(in.maxDoc());
|
||||
if (in.hasDeletions()) {
|
||||
oldDels = new OpenBitSet(in.maxDoc());
|
||||
final Bits oldDelBits = MultiFields.getDeletedDocs(in);
|
||||
assert oldDelBits != null;
|
||||
for (int i = 0; i < in.maxDoc(); i++) {
|
||||
if (in.isDeleted(i)) oldDels.set(i);
|
||||
if (oldDelBits.get(i)) oldDels.set(i);
|
||||
}
|
||||
dels.or(oldDels);
|
||||
}
|
||||
|
@ -205,7 +207,6 @@ public class MultiPassIndexSplitter {
|
|||
if (oldDels != null) {
|
||||
dels.or(oldDels);
|
||||
}
|
||||
storeDelDocs(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -227,10 +228,5 @@ public class MultiPassIndexSplitter {
|
|||
public Bits getDeletedDocs() {
|
||||
return dels;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDeleted(int n) {
|
||||
return dels.get(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ public class LengthNormModifier {
|
|||
}
|
||||
|
||||
for (int d = 0; d < termCounts.length; d++) {
|
||||
if (! reader.isDeleted(d)) {
|
||||
if (!delDocs.get(d)) {
|
||||
byte norm = Similarity.encodeNorm(sim.lengthNorm(fieldName, termCounts[d]));
|
||||
reader.setNorm(d, fieldName, norm);
|
||||
}
|
||||
|
|
|
@ -733,8 +733,9 @@ public class CheckIndex {
|
|||
}
|
||||
|
||||
// Scan stored fields for all documents
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
for (int j = 0; j < info.docCount; ++j) {
|
||||
if (!reader.isDeleted(j)) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
status.docCount++;
|
||||
Document doc = reader.document(j);
|
||||
status.totFields += doc.getFields().size();
|
||||
|
@ -770,8 +771,9 @@ public class CheckIndex {
|
|||
infoStream.print(" test: term vectors........");
|
||||
}
|
||||
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
for (int j = 0; j < info.docCount; ++j) {
|
||||
if (!reader.isDeleted(j)) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
status.docCount++;
|
||||
TermFreqVector[] tfv = reader.getTermFreqVectors(j);
|
||||
if (tfv != null) {
|
||||
|
|
|
@ -93,10 +93,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
|
||||
SegmentInfos infos = new SegmentInfos();
|
||||
infos.read(directory, segmentFileName, codecs2);
|
||||
if (readOnly)
|
||||
return new ReadOnlyDirectoryReader(directory, infos, deletionPolicy, termInfosIndexDivisor, codecs2);
|
||||
else
|
||||
return new DirectoryReader(directory, infos, deletionPolicy, false, termInfosIndexDivisor, codecs2);
|
||||
return new DirectoryReader(directory, infos, deletionPolicy, readOnly, termInfosIndexDivisor, codecs2);
|
||||
}
|
||||
}.run(commit);
|
||||
}
|
||||
|
@ -503,11 +500,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
|
||||
private synchronized DirectoryReader doReopen(SegmentInfos infos, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
|
||||
DirectoryReader reader;
|
||||
if (openReadOnly) {
|
||||
reader = new ReadOnlyDirectoryReader(directory, infos, subReaders, starts, normsCache, doClone, termInfosIndexDivisor, null);
|
||||
} else {
|
||||
reader = new DirectoryReader(directory, infos, subReaders, starts, normsCache, false, doClone, termInfosIndexDivisor, null);
|
||||
}
|
||||
reader = new DirectoryReader(directory, infos, subReaders, starts, normsCache, openReadOnly, doClone, termInfosIndexDivisor, null);
|
||||
return reader;
|
||||
}
|
||||
|
||||
|
@ -587,13 +580,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDeleted(int n) {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
final int i = readerIndex(n); // find segment num
|
||||
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
|
@ -735,7 +721,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
// NOTE: we should not reach this code w/ the core
|
||||
// IndexReader classes; however, an external subclass
|
||||
// of IndexReader could reach this.
|
||||
ReadOnlySegmentReader.noWrite();
|
||||
throw new UnsupportedOperationException("This IndexReader cannot make any changes to the index (it was opened with readOnly = true)");
|
||||
}
|
||||
|
||||
if (segmentInfos != null) {
|
||||
|
|
|
@ -273,7 +273,7 @@ public class FilterIndexReader extends IndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() throws IOException {
|
||||
public Bits getDeletedDocs() {
|
||||
return MultiFields.getDeletedDocs(in);
|
||||
}
|
||||
|
||||
|
@ -323,12 +323,6 @@ public class FilterIndexReader extends IndexReader {
|
|||
return in.document(n, fieldSelector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDeleted(int n) {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
return in.isDeleted(n);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
|
|
|
@ -788,9 +788,6 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
// TODO (1.5): When we convert to JDK 1.5 make this Set<String>
|
||||
public abstract Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException;
|
||||
|
||||
/** Returns true if document <i>n</i> has been deleted */
|
||||
public abstract boolean isDeleted(int n);
|
||||
|
||||
/** Returns true if any documents have been deleted */
|
||||
public abstract boolean hasDeletions();
|
||||
|
||||
|
@ -1120,7 +1117,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* docs.
|
||||
*
|
||||
* @lucene.experimental */
|
||||
public abstract Bits getDeletedDocs() throws IOException;
|
||||
public abstract Bits getDeletedDocs();
|
||||
|
||||
/**
|
||||
* Expert: return the IndexCommit that this reader has
|
||||
|
@ -1304,16 +1301,4 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
Fields retrieveFields() {
|
||||
return fields;
|
||||
}
|
||||
|
||||
private Bits storedDelDocs;
|
||||
|
||||
/** @lucene.internal */
|
||||
void storeDelDocs(Bits delDocs) {
|
||||
this.storedDelDocs = delDocs;
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
Bits retrieveDelDocs() {
|
||||
return storedDelDocs;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.util.Constants;
|
|||
import org.apache.lucene.index.codecs.CodecProvider;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Closeable;
|
||||
|
@ -418,7 +419,7 @@ public class IndexWriter implements Closeable {
|
|||
// just like we do when loading segments_N
|
||||
synchronized(this) {
|
||||
applyDeletes();
|
||||
final IndexReader r = new ReadOnlyDirectoryReader(this, segmentInfos, termInfosIndexDivisor, codecs);
|
||||
final IndexReader r = new DirectoryReader(this, segmentInfos, termInfosIndexDivisor, codecs);
|
||||
if (infoStream != null) {
|
||||
message("return reader version=" + r.getVersion() + " reader=" + r);
|
||||
}
|
||||
|
@ -3464,7 +3465,9 @@ public class IndexWriter implements Closeable {
|
|||
SegmentInfo info = sourceSegments.info(i);
|
||||
int docCount = info.docCount;
|
||||
SegmentReader previousReader = merge.readersClone[i];
|
||||
final Bits prevDelDocs = previousReader.getDeletedDocs();
|
||||
SegmentReader currentReader = merge.readers[i];
|
||||
final Bits currentDelDocs = currentReader.getDeletedDocs();
|
||||
if (previousReader.hasDeletions()) {
|
||||
|
||||
// There were deletes on this segment when the merge
|
||||
|
@ -3479,10 +3482,10 @@ public class IndexWriter implements Closeable {
|
|||
// committed since we started the merge, so we
|
||||
// must merge them:
|
||||
for(int j=0;j<docCount;j++) {
|
||||
if (previousReader.isDeleted(j))
|
||||
assert currentReader.isDeleted(j);
|
||||
if (prevDelDocs.get(j))
|
||||
assert currentDelDocs.get(j);
|
||||
else {
|
||||
if (currentReader.isDeleted(j)) {
|
||||
if (currentDelDocs.get(j)) {
|
||||
mergeReader.doDelete(docUpto);
|
||||
delCount++;
|
||||
}
|
||||
|
@ -3496,7 +3499,7 @@ public class IndexWriter implements Closeable {
|
|||
// This segment had no deletes before but now it
|
||||
// does:
|
||||
for(int j=0; j<docCount; j++) {
|
||||
if (currentReader.isDeleted(j)) {
|
||||
if (currentDelDocs.get(j)) {
|
||||
mergeReader.doDelete(docUpto);
|
||||
delCount++;
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.util.ReaderUtil;
|
|||
import org.apache.lucene.util.ReaderUtil.Gather; // for javadocs
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.MultiBits;
|
||||
|
||||
/**
|
||||
* Exposes flex API, merged from flex API of sub-segments.
|
||||
|
@ -99,35 +98,78 @@ public final class MultiFields extends Fields {
|
|||
}
|
||||
}
|
||||
|
||||
public static Bits getDeletedDocs(IndexReader r) throws IOException {
|
||||
private static class MultiReaderBits implements Bits {
|
||||
private final int[] starts;
|
||||
private final IndexReader[] readers;
|
||||
private final Bits[] delDocs;
|
||||
|
||||
public MultiReaderBits(int[] starts, IndexReader[] readers) {
|
||||
assert readers.length == starts.length-1;
|
||||
this.starts = starts;
|
||||
this.readers = readers;
|
||||
delDocs = new Bits[readers.length];
|
||||
for(int i=0;i<readers.length;i++) {
|
||||
delDocs[i] = readers[i].getDeletedDocs();
|
||||
}
|
||||
}
|
||||
|
||||
public boolean get(int doc) {
|
||||
final int sub = ReaderUtil.subIndex(doc, starts);
|
||||
Bits dels = delDocs[sub];
|
||||
if (dels == null) {
|
||||
// NOTE: this is not sync'd but multiple threads can
|
||||
// come through here; I think this is OK -- worst
|
||||
// case is more than 1 thread ends up filling in the
|
||||
// sub Bits
|
||||
dels = readers[sub].getDeletedDocs();
|
||||
if (dels == null) {
|
||||
return false;
|
||||
} else {
|
||||
delDocs[sub] = dels;
|
||||
}
|
||||
}
|
||||
return dels.get(doc-starts[sub]);
|
||||
}
|
||||
|
||||
public int length() {
|
||||
return starts[starts.length-1];
|
||||
}
|
||||
}
|
||||
|
||||
public static Bits getDeletedDocs(IndexReader r) {
|
||||
Bits result;
|
||||
if (r.hasDeletions()) {
|
||||
|
||||
result = r.retrieveDelDocs();
|
||||
if (result == null) {
|
||||
|
||||
final List<Bits> bits = new ArrayList<Bits>();
|
||||
final List<Integer> starts = new ArrayList<Integer>();
|
||||
final List<IndexReader> readers = new ArrayList<IndexReader>();
|
||||
final List<Integer> starts = new ArrayList<Integer>();
|
||||
|
||||
try {
|
||||
final int maxDoc = new ReaderUtil.Gather(r) {
|
||||
@Override
|
||||
protected void add(int base, IndexReader r) throws IOException {
|
||||
// record all delDocs, even if they are null
|
||||
bits.add(r.getDeletedDocs());
|
||||
starts.add(base);
|
||||
}
|
||||
}.run();
|
||||
@Override
|
||||
protected void add(int base, IndexReader r) throws IOException {
|
||||
// record all delDocs, even if they are null
|
||||
readers.add(r);
|
||||
starts.add(base);
|
||||
}
|
||||
}.run();
|
||||
starts.add(maxDoc);
|
||||
|
||||
assert bits.size() > 0;
|
||||
if (bits.size() == 1) {
|
||||
// Only one actual sub reader -- optimize this case
|
||||
result = bits.get(0);
|
||||
} else {
|
||||
result = new MultiBits(bits, starts);
|
||||
}
|
||||
r.storeDelDocs(result);
|
||||
} catch (IOException ioe) {
|
||||
// should not happen
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
|
||||
assert readers.size() > 0;
|
||||
if (readers.size() == 1) {
|
||||
// Only one actual sub reader -- optimize this case
|
||||
result = readers.get(0).getDeletedDocs();
|
||||
} else {
|
||||
int[] startsArray = new int[starts.size()];
|
||||
for(int i=0;i<startsArray.length;i++) {
|
||||
startsArray[i] = starts.get(i);
|
||||
}
|
||||
result = new MultiReaderBits(startsArray, readers.toArray(new IndexReader[readers.size()]));
|
||||
}
|
||||
|
||||
} else {
|
||||
result = null;
|
||||
}
|
||||
|
|
|
@ -153,7 +153,7 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() throws IOException {
|
||||
public Bits getDeletedDocs() {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getDeletedDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits deletedDocs");
|
||||
}
|
||||
|
||||
|
@ -278,13 +278,6 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDeleted(int n) {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
int i = readerIndex(n); // find segment num
|
||||
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
|
|
|
@ -195,7 +195,7 @@ public class ParallelReader extends IndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() throws IOException {
|
||||
public Bits getDeletedDocs() {
|
||||
return MultiFields.getDeletedDocs(readers.get(0));
|
||||
}
|
||||
|
||||
|
@ -320,15 +320,6 @@ public class ParallelReader extends IndexReader {
|
|||
return hasDeletions;
|
||||
}
|
||||
|
||||
// check first reader
|
||||
@Override
|
||||
public boolean isDeleted(int n) {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
if (readers.size() > 0)
|
||||
return readers.get(0).isDeleted(n);
|
||||
return false;
|
||||
}
|
||||
|
||||
// delete in all readers
|
||||
@Override
|
||||
protected void doDelete(int n) throws CorruptIndexException, IOException {
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.index.codecs.CodecProvider;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
class ReadOnlyDirectoryReader extends DirectoryReader {
|
||||
ReadOnlyDirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, int termInfosIndexDivisor, CodecProvider codecs) throws IOException {
|
||||
super(directory, sis, deletionPolicy, true, termInfosIndexDivisor, codecs);
|
||||
}
|
||||
|
||||
ReadOnlyDirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, Map<String,byte[]> oldNormsCache, boolean doClone,
|
||||
int termInfosIndexDivisor, CodecProvider codecs) throws IOException {
|
||||
super(directory, infos, oldReaders, oldStarts, oldNormsCache, true, doClone, termInfosIndexDivisor, codecs);
|
||||
}
|
||||
|
||||
ReadOnlyDirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor, CodecProvider codecs) throws IOException {
|
||||
super(writer, infos, termInfosIndexDivisor, codecs);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void acquireWriteLock() {
|
||||
ReadOnlySegmentReader.noWrite();
|
||||
}
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
class ReadOnlySegmentReader extends SegmentReader {
|
||||
|
||||
static void noWrite() {
|
||||
throw new UnsupportedOperationException("This IndexReader cannot make any changes to the index (it was opened with readOnly = true)");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void acquireWriteLock() {
|
||||
noWrite();
|
||||
}
|
||||
|
||||
// Not synchronized
|
||||
@Override
|
||||
public boolean isDeleted(int n) {
|
||||
return deletedDocs != null && deletedDocs.get(n);
|
||||
}
|
||||
}
|
|
@ -367,10 +367,11 @@ final class SegmentMerger {
|
|||
throws IOException, MergeAbortedException, CorruptIndexException {
|
||||
int docCount = 0;
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
if (matchingFieldsReader != null) {
|
||||
// We can bulk-copy because the fieldInfos are "congruent"
|
||||
for (int j = 0; j < maxDoc;) {
|
||||
if (reader.isDeleted(j)) {
|
||||
if (delDocs.get(j)) {
|
||||
// skip deleted docs
|
||||
++j;
|
||||
continue;
|
||||
|
@ -382,7 +383,7 @@ final class SegmentMerger {
|
|||
j++;
|
||||
numDocs++;
|
||||
if (j >= maxDoc) break;
|
||||
if (reader.isDeleted(j)) {
|
||||
if (delDocs.get(j)) {
|
||||
j++;
|
||||
break;
|
||||
}
|
||||
|
@ -395,7 +396,7 @@ final class SegmentMerger {
|
|||
}
|
||||
} else {
|
||||
for (int j = 0; j < maxDoc; j++) {
|
||||
if (reader.isDeleted(j)) {
|
||||
if (delDocs.get(j)) {
|
||||
// skip deleted docs
|
||||
continue;
|
||||
}
|
||||
|
@ -485,10 +486,11 @@ final class SegmentMerger {
|
|||
final IndexReader reader)
|
||||
throws IOException, MergeAbortedException {
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
if (matchingVectorsReader != null) {
|
||||
// We can bulk-copy because the fieldInfos are "congruent"
|
||||
for (int docNum = 0; docNum < maxDoc;) {
|
||||
if (reader.isDeleted(docNum)) {
|
||||
if (delDocs.get(docNum)) {
|
||||
// skip deleted docs
|
||||
++docNum;
|
||||
continue;
|
||||
|
@ -500,7 +502,7 @@ final class SegmentMerger {
|
|||
docNum++;
|
||||
numDocs++;
|
||||
if (docNum >= maxDoc) break;
|
||||
if (reader.isDeleted(docNum)) {
|
||||
if (delDocs.get(docNum)) {
|
||||
docNum++;
|
||||
break;
|
||||
}
|
||||
|
@ -512,7 +514,7 @@ final class SegmentMerger {
|
|||
}
|
||||
} else {
|
||||
for (int docNum = 0; docNum < maxDoc; docNum++) {
|
||||
if (reader.isDeleted(docNum)) {
|
||||
if (delDocs.get(docNum)) {
|
||||
// skip deleted docs
|
||||
continue;
|
||||
}
|
||||
|
@ -621,12 +623,13 @@ final class SegmentMerger {
|
|||
inputDocBase += reader.maxDoc();
|
||||
if (mergeState.delCounts[i] != 0) {
|
||||
int delCount = 0;
|
||||
Bits deletedDocs = reader.getDeletedDocs();
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
assert delDocs != null;
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final int[] docMap = mergeState.docMaps[i] = new int[maxDoc];
|
||||
int newDocID = 0;
|
||||
for(int j=0;j<maxDoc;j++) {
|
||||
if (deletedDocs.get(j)) {
|
||||
if (delDocs.get(j)) {
|
||||
docMap[j] = -1;
|
||||
delCount++; // only for assert
|
||||
} else {
|
||||
|
@ -647,10 +650,7 @@ final class SegmentMerger {
|
|||
// NOTE: this is silly, yet, necessary -- we create a
|
||||
// MultiBits as our skip docs only to have it broken
|
||||
// apart when we step through the docs enums in
|
||||
// MultidDcsEnum.... this only matters when we are
|
||||
// interacting with a non-core IR subclass, because
|
||||
// LegacyFieldsEnum.LegacyDocs[AndPositions]Enum checks
|
||||
// that the skipDocs matches the delDocs for the reader
|
||||
// MultiDocsEnum.
|
||||
mergeState.multiDeletedDocs = new MultiBits(bits, bitsStarts);
|
||||
|
||||
try {
|
||||
|
@ -686,6 +686,7 @@ final class SegmentMerger {
|
|||
}
|
||||
for ( IndexReader reader : readers) {
|
||||
int maxDoc = reader.maxDoc();
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
if (normBuffer == null || normBuffer.length < maxDoc) {
|
||||
// the buffer is too small for the current segment
|
||||
normBuffer = new byte[maxDoc];
|
||||
|
@ -698,7 +699,7 @@ final class SegmentMerger {
|
|||
// this segment has deleted docs, so we have to
|
||||
// check for every doc if it is deleted or not
|
||||
for (int k = 0; k < maxDoc; k++) {
|
||||
if (!reader.isDeleted(k)) {
|
||||
if (!delDocs.get(k)) {
|
||||
output.writeByte(normBuffer[k]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
CloseableThreadLocal<FieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
|
||||
CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
|
||||
|
||||
BitVector deletedDocs = null;
|
||||
volatile BitVector deletedDocs;
|
||||
AtomicInteger deletedDocsRef = null;
|
||||
private boolean deletedDocsDirty = false;
|
||||
private boolean normsDirty = false;
|
||||
|
@ -525,7 +525,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
codecs = CodecProvider.getDefault();
|
||||
}
|
||||
|
||||
SegmentReader instance = readOnly ? new ReadOnlySegmentReader() : new SegmentReader();
|
||||
SegmentReader instance = new SegmentReader();
|
||||
instance.readOnly = readOnly;
|
||||
instance.si = si;
|
||||
instance.readBufferSize = readBufferSize;
|
||||
|
@ -559,7 +559,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized Bits getDeletedDocs() {
|
||||
public Bits getDeletedDocs() {
|
||||
return deletedDocs;
|
||||
}
|
||||
|
||||
|
@ -663,7 +663,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
assert !doClone || (normsUpToDate && deletionsUpToDate);
|
||||
|
||||
// clone reader
|
||||
SegmentReader clone = openReadOnly ? new ReadOnlySegmentReader() : new SegmentReader();
|
||||
SegmentReader clone = new SegmentReader();
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
|
@ -882,11 +882,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
return getFieldsReader().doc(n, fieldSelector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isDeleted(int n) {
|
||||
return (deletedDocs != null && deletedDocs.get(n));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields fields() throws IOException {
|
||||
return core.fields;
|
||||
|
|
|
@ -48,7 +48,7 @@ import org.apache.lucene.util.ReaderUtil;
|
|||
public final class SlowMultiReaderWrapper extends FilterIndexReader {
|
||||
/** This method may return the reader back, if the
|
||||
* incoming reader is already atomic. */
|
||||
public static IndexReader wrap(IndexReader reader) {
|
||||
public static IndexReader wrap(IndexReader reader) throws IOException {
|
||||
final List<IndexReader> subs = new ArrayList<IndexReader>();
|
||||
ReaderUtil.gatherSubReaders(subs, reader);
|
||||
if (subs == null) {
|
||||
|
@ -61,7 +61,7 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
|
|||
}
|
||||
}
|
||||
|
||||
private SlowMultiReaderWrapper(IndexReader other) {
|
||||
private SlowMultiReaderWrapper(IndexReader other) throws IOException {
|
||||
super(other);
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() throws IOException {
|
||||
public Bits getDeletedDocs() {
|
||||
return MultiFields.getDeletedDocs(in);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,10 +46,10 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/*
|
||||
Verify we can read the pre-4.0 file format, do searches
|
||||
|
@ -310,8 +310,10 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
|
||||
for(int i=0;i<35;i++) {
|
||||
if (!reader.isDeleted(i)) {
|
||||
if (!delDocs.get(i)) {
|
||||
Document d = reader.document(i);
|
||||
List<Fieldable> fields = d.getFields();
|
||||
if (d.getField("content3") == null) {
|
||||
|
|
|
@ -1149,6 +1149,36 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
public void testMultiReaderDeletes() throws Exception {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random, dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
w.addDocument(doc);
|
||||
doc = new Document();
|
||||
w.commit();
|
||||
doc.add(new Field("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
w.addDocument(doc);
|
||||
IndexReader r = w.getReader();
|
||||
IndexReader wr = SlowMultiReaderWrapper.wrap(r);
|
||||
w.close();
|
||||
|
||||
assertNull(wr.getDeletedDocs());
|
||||
r.close();
|
||||
|
||||
r = IndexReader.open(dir, false);
|
||||
wr = SlowMultiReaderWrapper.wrap(r);
|
||||
|
||||
assertNull(wr.getDeletedDocs());
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "doctor")));
|
||||
assertNotNull(wr.getDeletedDocs());
|
||||
assertTrue(wr.getDeletedDocs().get(0));
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "who")));
|
||||
assertTrue(wr.getDeletedDocs().get(1));
|
||||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void deleteReaderReaderConflict(boolean optimize) throws IOException {
|
||||
Directory dir = getDirectory();
|
||||
|
||||
|
@ -1250,7 +1280,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir.close();
|
||||
}
|
||||
|
||||
|
||||
private void addDocumentWithFields(IndexWriter writer) throws IOException
|
||||
{
|
||||
Document doc = new Document();
|
||||
|
@ -1333,13 +1362,17 @@ public class TestIndexReader extends LuceneTestCase
|
|||
}
|
||||
|
||||
// check deletions
|
||||
final Bits delDocs1 = MultiFields.getDeletedDocs(index1);
|
||||
final Bits delDocs2 = MultiFields.getDeletedDocs(index2);
|
||||
for (int i = 0; i < index1.maxDoc(); i++) {
|
||||
assertEquals("Doc " + i + " only deleted in one index.", index1.isDeleted(i), index2.isDeleted(i));
|
||||
assertEquals("Doc " + i + " only deleted in one index.",
|
||||
delDocs1 == null || delDocs1.get(i),
|
||||
delDocs2 == null || delDocs2.get(i));
|
||||
}
|
||||
|
||||
// check stored fields
|
||||
for (int i = 0; i < index1.maxDoc(); i++) {
|
||||
if (!index1.isDeleted(i)) {
|
||||
if (delDocs1 == null || !delDocs1.get(i)) {
|
||||
Document doc1 = index1.document(i);
|
||||
Document doc2 = index2.document(i);
|
||||
List<Fieldable> fieldable1 = doc1.getFields();
|
||||
|
@ -1670,7 +1703,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
// Reopen to readonly w/ no chnages
|
||||
IndexReader r3 = r.reopen(true);
|
||||
assertTrue(r3 instanceof ReadOnlyDirectoryReader);
|
||||
assertTrue(((DirectoryReader) r3).readOnly);
|
||||
r3.close();
|
||||
|
||||
// Add new segment
|
||||
|
@ -1680,13 +1713,13 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// Reopen reader1 --> reader2
|
||||
IndexReader r2 = r.reopen(true);
|
||||
r.close();
|
||||
assertTrue(r2 instanceof ReadOnlyDirectoryReader);
|
||||
assertTrue(((DirectoryReader) r2).readOnly);
|
||||
IndexReader[] subs = r2.getSequentialSubReaders();
|
||||
final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number");
|
||||
r2.close();
|
||||
|
||||
assertTrue(subs[0] instanceof ReadOnlySegmentReader);
|
||||
assertTrue(subs[1] instanceof ReadOnlySegmentReader);
|
||||
assertTrue(((SegmentReader) subs[0]).readOnly);
|
||||
assertTrue(((SegmentReader) subs[1]).readOnly);
|
||||
assertTrue(ints == ints2);
|
||||
|
||||
dir.close();
|
||||
|
|
|
@ -26,8 +26,8 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/**
|
||||
* Tests cloning multiple types of readers, modifying the deletedDocs and norms
|
||||
|
@ -243,10 +243,13 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public static boolean isReadOnly(IndexReader r) {
|
||||
if (r instanceof ReadOnlySegmentReader
|
||||
|| r instanceof ReadOnlyDirectoryReader)
|
||||
return true;
|
||||
return false;
|
||||
if (r instanceof SegmentReader) {
|
||||
return ((SegmentReader) r).readOnly;
|
||||
} else if (r instanceof DirectoryReader) {
|
||||
return ((DirectoryReader) r).readOnly;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public void testParallelReader() throws Exception {
|
||||
|
@ -286,8 +289,9 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
assertTrue(Similarity.getDefault().decodeNormValue(r1.norms("field1")[4]) == norm1);
|
||||
assertTrue(Similarity.getDefault().decodeNormValue(pr1Clone.norms("field1")[4]) != norm1);
|
||||
|
||||
assertTrue(!r1.isDeleted(10));
|
||||
assertTrue(pr1Clone.isDeleted(10));
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(r1);
|
||||
assertTrue(delDocs == null || !delDocs.get(10));
|
||||
assertTrue(MultiFields.getDeletedDocs(pr1Clone).get(10));
|
||||
|
||||
// try to update the original reader, which should throw an exception
|
||||
try {
|
||||
|
@ -376,9 +380,10 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
assertTrue(origSegmentReader.deletedDocs != clonedSegmentReader.deletedDocs);
|
||||
|
||||
assertDocDeleted(origSegmentReader, clonedSegmentReader, 1);
|
||||
assertTrue(!origSegmentReader.isDeleted(2)); // doc 2 should not be deleted
|
||||
final Bits delDocs = origSegmentReader.getDeletedDocs();
|
||||
assertTrue(delDocs == null || !delDocs.get(2)); // doc 2 should not be deleted
|
||||
// in original segmentreader
|
||||
assertTrue(clonedSegmentReader.isDeleted(2)); // doc 2 should be deleted in
|
||||
assertTrue(clonedSegmentReader.getDeletedDocs().get(2)); // doc 2 should be deleted in
|
||||
// cloned segmentreader
|
||||
|
||||
// deleting a doc from the original segmentreader should throw an exception
|
||||
|
@ -420,7 +425,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
clonedReader.close();
|
||||
|
||||
IndexReader r = IndexReader.open(dir1, false);
|
||||
assertTrue(r.isDeleted(1));
|
||||
assertTrue(MultiFields.getDeletedDocs(r).get(1));
|
||||
r.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
@ -448,7 +453,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
|
||||
private void assertDocDeleted(SegmentReader reader, SegmentReader reader2,
|
||||
int doc) {
|
||||
assertEquals(reader.isDeleted(doc), reader2.isDeleted(doc));
|
||||
assertEquals(reader.getDeletedDocs().get(doc), reader2.getDeletedDocs().get(doc));
|
||||
}
|
||||
|
||||
private void assertDelDocsRefCountEquals(int refCount, SegmentReader reader) {
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.lucene.store.FSDirectory;
|
|||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.BitVector;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
public class TestIndexReaderReopen extends LuceneTestCase {
|
||||
|
||||
|
@ -1153,7 +1154,8 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
r2.deleteDocument(0);
|
||||
|
||||
// r1 should not see the delete
|
||||
assertFalse(r1.isDeleted(0));
|
||||
final Bits r1DelDocs = MultiFields.getDeletedDocs(r1);
|
||||
assertFalse(r1DelDocs != null && r1DelDocs.get(0));
|
||||
|
||||
// Now r2 should have made a private copy of deleted docs:
|
||||
assertTrue(sr1.deletedDocs!=sr2.deletedDocs);
|
||||
|
|
|
@ -76,6 +76,7 @@ import org.apache.lucene.util.UnicodeUtil;
|
|||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
public class TestIndexWriter extends LuceneTestCase {
|
||||
Random random;
|
||||
|
@ -1897,8 +1898,10 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
int numDel = 0;
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
assertNotNull(delDocs);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (reader.isDeleted(j))
|
||||
if (delDocs.get(j))
|
||||
numDel++;
|
||||
else {
|
||||
reader.document(j);
|
||||
|
@ -1924,13 +1927,10 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
numDel = 0;
|
||||
assertNull(MultiFields.getDeletedDocs(reader));
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (reader.isDeleted(j))
|
||||
numDel++;
|
||||
else {
|
||||
reader.document(j);
|
||||
reader.getTermFreqVectors(j);
|
||||
}
|
||||
reader.document(j);
|
||||
reader.getTermFreqVectors(j);
|
||||
}
|
||||
reader.close();
|
||||
assertEquals(0, numDel);
|
||||
|
@ -2011,8 +2011,10 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
assertEquals("i=" + i, expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
int numDel = 0;
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
assertNotNull(delDocs);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (reader.isDeleted(j))
|
||||
if (delDocs.get(j))
|
||||
numDel++;
|
||||
else {
|
||||
reader.document(j);
|
||||
|
@ -2037,17 +2039,12 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
expected += 17-NUM_THREAD*NUM_ITER;
|
||||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
numDel = 0;
|
||||
assertNull(MultiFields.getDeletedDocs(reader));
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (reader.isDeleted(j))
|
||||
numDel++;
|
||||
else {
|
||||
reader.document(j);
|
||||
reader.getTermFreqVectors(j);
|
||||
}
|
||||
reader.document(j);
|
||||
reader.getTermFreqVectors(j);
|
||||
}
|
||||
reader.close();
|
||||
assertEquals(0, numDel);
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
@ -2487,8 +2484,9 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
if (success) {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (!reader.isDeleted(j)) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
reader.document(j);
|
||||
reader.getTermFreqVectors(j);
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
assertTrue(deleteReader != null);
|
||||
assertTrue(deleteReader.numDocs() == 1);
|
||||
deleteReader.deleteDocument(0);
|
||||
assertTrue(deleteReader.isDeleted(0) == true);
|
||||
assertTrue(deleteReader.getDeletedDocs().get(0));
|
||||
assertTrue(deleteReader.hasDeletions() == true);
|
||||
assertTrue(deleteReader.numDocs() == 0);
|
||||
deleteReader.close();
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/**
|
||||
* Test class to illustrate using IndexDeletionPolicy to provide multi-level rollback capability.
|
||||
|
@ -90,9 +90,11 @@ public class TestTransactionRollback extends LuceneTestCase {
|
|||
private void checkExpecteds(BitSet expecteds) throws Exception {
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
|
||||
//Perhaps not the most efficient approach but meets our needs here.
|
||||
//Perhaps not the most efficient approach but meets our
|
||||
//needs here.
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(r);
|
||||
for (int i = 0; i < r.maxDoc(); i++) {
|
||||
if(!r.isDeleted(i)) {
|
||||
if(delDocs == null || !delDocs.get(i)) {
|
||||
String sval=r.document(i).get(FIELD_RECORD_ID);
|
||||
if(sval!=null) {
|
||||
int val=Integer.parseInt(sval);
|
||||
|
|
|
@ -225,7 +225,7 @@ public class SolrIndexReader extends FilterIndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() throws IOException {
|
||||
public Bits getDeletedDocs() {
|
||||
return in.getDeletedDocs();
|
||||
}
|
||||
|
||||
|
@ -266,11 +266,6 @@ public class SolrIndexReader extends FilterIndexReader {
|
|||
return in.document(n, fieldSelector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDeleted(int n) {
|
||||
return in.isDeleted(n);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
return in.hasDeletions();
|
||||
|
|
|
@ -19,11 +19,12 @@ package org.apache.solr.search.function;
|
|||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.solr.search.SolrIndexReader;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
|
||||
|
@ -112,6 +113,7 @@ public class FunctionQuery extends Query {
|
|||
int doc=-1;
|
||||
final DocValues vals;
|
||||
final boolean hasDeletions;
|
||||
final Bits delDocs;
|
||||
|
||||
public AllScorer(Similarity similarity, IndexReader reader, FunctionWeight w) throws IOException {
|
||||
super(similarity);
|
||||
|
@ -120,6 +122,8 @@ public class FunctionQuery extends Query {
|
|||
this.reader = reader;
|
||||
this.maxDoc = reader.maxDoc();
|
||||
this.hasDeletions = reader.hasDeletions();
|
||||
this.delDocs = MultiFields.getDeletedDocs(reader);
|
||||
assert !hasDeletions || delDocs != null;
|
||||
vals = func.getValues(weight.context, reader);
|
||||
}
|
||||
|
||||
|
@ -139,7 +143,7 @@ public class FunctionQuery extends Query {
|
|||
if (doc>=maxDoc) {
|
||||
return doc=NO_MORE_DOCS;
|
||||
}
|
||||
if (hasDeletions && reader.isDeleted(doc)) continue;
|
||||
if (hasDeletions && delDocs.get(doc)) continue;
|
||||
return doc;
|
||||
}
|
||||
}
|
||||
|
@ -161,7 +165,7 @@ public class FunctionQuery extends Query {
|
|||
if (doc>=maxDoc) {
|
||||
return false;
|
||||
}
|
||||
if (hasDeletions && reader.isDeleted(doc)) continue;
|
||||
if (hasDeletions && delDocs.get(doc)) continue;
|
||||
// todo: maybe allow score() to throw a specific exception
|
||||
// and continue on to the next document if it is thrown...
|
||||
// that may be useful, but exceptions aren't really good
|
||||
|
|
|
@ -24,12 +24,13 @@ import org.apache.lucene.search.FieldComparatorSource;
|
|||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Searcher;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
|
@ -177,6 +178,7 @@ class ValueSourceScorer extends Scorer {
|
|||
protected final int maxDoc;
|
||||
protected final DocValues values;
|
||||
protected boolean checkDeletes;
|
||||
private final Bits delDocs;
|
||||
|
||||
protected ValueSourceScorer(IndexReader reader, DocValues values) {
|
||||
super(null);
|
||||
|
@ -184,6 +186,7 @@ class ValueSourceScorer extends Scorer {
|
|||
this.maxDoc = reader.maxDoc();
|
||||
this.values = values;
|
||||
setCheckDeletes(true);
|
||||
this.delDocs = MultiFields.getDeletedDocs(reader);
|
||||
}
|
||||
|
||||
public IndexReader getReader() {
|
||||
|
@ -195,7 +198,7 @@ class ValueSourceScorer extends Scorer {
|
|||
}
|
||||
|
||||
public boolean matches(int doc) {
|
||||
return (!checkDeletes || !reader.isDeleted(doc)) && matchesValue(doc);
|
||||
return (!checkDeletes || !delDocs.get(doc)) && matchesValue(doc);
|
||||
}
|
||||
|
||||
public boolean matchesValue(int doc) {
|
||||
|
|
Loading…
Reference in New Issue