mirror of https://github.com/apache/lucene.git
LUCENE-3246: invert getDelDocs to getLiveDocs as pre-cursor for LUCENE-1536
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1143415 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e7722eebe5
commit
b55eeb510d
|
@ -23,13 +23,13 @@ import org.apache.lucene.util.BytesRef;
|
|||
public class InstantiatedDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
||||
private int upto;
|
||||
private int posUpto;
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private InstantiatedTerm term;
|
||||
protected InstantiatedTermDocumentInformation currentDoc;
|
||||
private final BytesRef payload = new BytesRef();
|
||||
|
||||
public InstantiatedDocsAndPositionsEnum reset(Bits skipDocs, InstantiatedTerm term) {
|
||||
this.skipDocs = skipDocs;
|
||||
public InstantiatedDocsAndPositionsEnum reset(Bits liveDocs, InstantiatedTerm term) {
|
||||
this.liveDocs = liveDocs;
|
||||
this.term = term;
|
||||
upto = -1;
|
||||
return this;
|
||||
|
@ -47,7 +47,7 @@ public class InstantiatedDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
|||
return NO_MORE_DOCS;
|
||||
} else {
|
||||
currentDoc = term.getAssociatedDocuments()[upto];
|
||||
if (skipDocs == null || !skipDocs.get(currentDoc.getDocument().getDocumentNumber())) {
|
||||
if (liveDocs == null || liveDocs.get(currentDoc.getDocument().getDocumentNumber())) {
|
||||
posUpto = -1;
|
||||
return docID();
|
||||
} else {
|
||||
|
@ -69,7 +69,7 @@ public class InstantiatedDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
|||
}
|
||||
currentDoc = term.getAssociatedDocuments()[upto];
|
||||
|
||||
if (skipDocs != null && skipDocs.get(currentDoc.getDocument().getDocumentNumber())) {
|
||||
if (liveDocs != null && !liveDocs.get(currentDoc.getDocument().getDocumentNumber())) {
|
||||
return nextDoc();
|
||||
} else {
|
||||
posUpto = -1;
|
||||
|
|
|
@ -21,12 +21,12 @@ import org.apache.lucene.util.Bits;
|
|||
|
||||
public class InstantiatedDocsEnum extends DocsEnum {
|
||||
private int upto;
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private InstantiatedTerm term;
|
||||
protected InstantiatedTermDocumentInformation currentDoc;
|
||||
|
||||
public InstantiatedDocsEnum reset(Bits skipDocs, InstantiatedTerm term) {
|
||||
this.skipDocs = skipDocs;
|
||||
public InstantiatedDocsEnum reset(Bits liveDocs, InstantiatedTerm term) {
|
||||
this.liveDocs = liveDocs;
|
||||
this.term = term;
|
||||
upto = -1;
|
||||
return this;
|
||||
|
@ -44,7 +44,7 @@ public class InstantiatedDocsEnum extends DocsEnum {
|
|||
return NO_MORE_DOCS;
|
||||
} else {
|
||||
currentDoc = term.getAssociatedDocuments()[upto];
|
||||
if (skipDocs == null || !skipDocs.get(currentDoc.getDocument().getDocumentNumber())) {
|
||||
if (liveDocs == null || liveDocs.get(currentDoc.getDocument().getDocumentNumber())) {
|
||||
return docID();
|
||||
} else {
|
||||
return nextDoc();
|
||||
|
@ -65,7 +65,7 @@ public class InstantiatedDocsEnum extends DocsEnum {
|
|||
}
|
||||
currentDoc = term.getAssociatedDocuments()[upto];
|
||||
|
||||
if (skipDocs != null && skipDocs.get(currentDoc.getDocument().getDocumentNumber())) {
|
||||
if (liveDocs != null && !liveDocs.get(currentDoc.getDocument().getDocumentNumber())) {
|
||||
return nextDoc();
|
||||
} else {
|
||||
return docID();
|
||||
|
|
|
@ -182,9 +182,9 @@ public class InstantiatedIndex
|
|||
}
|
||||
|
||||
// create documents
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(sourceIndexReader);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(sourceIndexReader);
|
||||
for (int i = 0; i < sourceIndexReader.maxDoc(); i++) {
|
||||
if (delDocs != null && delDocs.get(i)) {
|
||||
if (liveDocs != null && !liveDocs.get(i)) {
|
||||
deletedDocuments.set(i);
|
||||
} else {
|
||||
InstantiatedDocument document = new InstantiatedDocument();
|
||||
|
@ -254,7 +254,7 @@ public class InstantiatedIndex
|
|||
// create term-document informations
|
||||
for (InstantiatedTerm term : orderedTerms) {
|
||||
DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(sourceIndexReader,
|
||||
MultiFields.getDeletedDocs(sourceIndexReader),
|
||||
MultiFields.getLiveDocs(sourceIndexReader),
|
||||
term.getTerm().field(),
|
||||
new BytesRef(term.getTerm().text()));
|
||||
int position = 0;
|
||||
|
|
|
@ -107,11 +107,11 @@ public class InstantiatedIndexReader extends IndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
public Bits getLiveDocs() {
|
||||
return new Bits() {
|
||||
public boolean get(int n) {
|
||||
return (index.getDeletedDocuments() != null && index.getDeletedDocuments().get(n))
|
||||
|| (uncommittedDeletedDocuments != null && uncommittedDeletedDocuments.get(n));
|
||||
return !(index.getDeletedDocuments() != null && index.getDeletedDocuments().get(n))
|
||||
&& !(uncommittedDeletedDocuments != null && uncommittedDeletedDocuments.get(n));
|
||||
}
|
||||
|
||||
public int length() {
|
||||
|
|
|
@ -118,19 +118,19 @@ public class InstantiatedTermsEnum extends TermsEnum {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) {
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) {
|
||||
if (reuse == null || !(reuse instanceof InstantiatedDocsEnum)) {
|
||||
reuse = new InstantiatedDocsEnum();
|
||||
}
|
||||
return ((InstantiatedDocsEnum) reuse).reset(skipDocs, terms[upto]);
|
||||
return ((InstantiatedDocsEnum) reuse).reset(liveDocs, terms[upto]);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) {
|
||||
if (reuse == null || !(reuse instanceof InstantiatedDocsAndPositionsEnum)) {
|
||||
reuse = new InstantiatedDocsAndPositionsEnum();
|
||||
}
|
||||
return ((InstantiatedDocsAndPositionsEnum) reuse).reset(skipDocs, terms[upto]);
|
||||
return ((InstantiatedDocsAndPositionsEnum) reuse).reset(liveDocs, terms[upto]);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -138,8 +138,8 @@ public class TestIndicesEquals extends LuceneTestCase {
|
|||
testTermEnum.seekCeil(new BytesRef(t.text()));
|
||||
assertEquals(aprioriTermEnum.term(), testTermEnum.term());
|
||||
|
||||
DocsEnum aprioriTermDocs = aprioriTermEnum.docs(MultiFields.getDeletedDocs(aprioriReader), null);
|
||||
DocsEnum testTermDocs = testTermEnum.docs(MultiFields.getDeletedDocs(testReader), null);
|
||||
DocsEnum aprioriTermDocs = aprioriTermEnum.docs(MultiFields.getLiveDocs(aprioriReader), null);
|
||||
DocsEnum testTermDocs = testTermEnum.docs(MultiFields.getLiveDocs(testReader), null);
|
||||
|
||||
assertEquals(aprioriTermDocs.nextDoc(), testTermDocs.nextDoc());
|
||||
assertEquals(aprioriTermDocs.freq(), testTermDocs.freq());
|
||||
|
@ -186,8 +186,8 @@ public class TestIndicesEquals extends LuceneTestCase {
|
|||
|
||||
assertEquals(aprioriTermEnum.next(), testTermEnum.next());
|
||||
|
||||
aprioriTermDocs = aprioriTermEnum.docs(MultiFields.getDeletedDocs(aprioriReader), aprioriTermDocs);
|
||||
testTermDocs = testTermEnum.docs(MultiFields.getDeletedDocs(testReader), testTermDocs);
|
||||
aprioriTermDocs = aprioriTermEnum.docs(MultiFields.getLiveDocs(aprioriReader), aprioriTermDocs);
|
||||
testTermDocs = testTermEnum.docs(MultiFields.getLiveDocs(testReader), testTermDocs);
|
||||
|
||||
while (aprioriTermDocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
assertTrue(testTermDocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
|
@ -309,13 +309,13 @@ public class TestIndicesEquals extends LuceneTestCase {
|
|||
assertEquals(air.numDocs(), tir.numDocs());
|
||||
assertEquals(air.numDeletedDocs(), tir.numDeletedDocs());
|
||||
|
||||
final Bits aDelDocs = MultiFields.getDeletedDocs(air);
|
||||
final Bits tDelDocs = MultiFields.getDeletedDocs(tir);
|
||||
assertTrue((aDelDocs != null && tDelDocs != null) ||
|
||||
(aDelDocs == null && tDelDocs == null));
|
||||
if (aDelDocs != null) {
|
||||
final Bits aLiveDocs = MultiFields.getLiveDocs(air);
|
||||
final Bits tLiveDocs = MultiFields.getLiveDocs(tir);
|
||||
assertTrue((aLiveDocs != null && tLiveDocs != null) ||
|
||||
(aLiveDocs == null && tLiveDocs == null));
|
||||
if (aLiveDocs != null) {
|
||||
for (int d =0; d<air.maxDoc(); d++) {
|
||||
assertEquals(aDelDocs.get(d), tDelDocs.get(d));
|
||||
assertEquals(aLiveDocs.get(d), tLiveDocs.get(d));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -366,13 +366,13 @@ public class TestIndicesEquals extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
final Bits apDelDocs = MultiFields.getDeletedDocs(aprioriReader);
|
||||
final Bits testDelDocs = MultiFields.getDeletedDocs(testReader);
|
||||
assertTrue((apDelDocs != null && testDelDocs != null) ||
|
||||
(apDelDocs == null && testDelDocs == null));
|
||||
if (apDelDocs != null) {
|
||||
final Bits apLiveDocs = MultiFields.getLiveDocs(aprioriReader);
|
||||
final Bits testLiveDocs = MultiFields.getLiveDocs(testReader);
|
||||
assertTrue((apLiveDocs != null && testLiveDocs != null) ||
|
||||
(apLiveDocs == null && testLiveDocs == null));
|
||||
if (apLiveDocs != null) {
|
||||
for (int docIndex = 0; docIndex < aprioriReader.numDocs(); docIndex++) {
|
||||
assertEquals(apDelDocs.get(docIndex), testDelDocs.get(docIndex));
|
||||
assertEquals(apLiveDocs.get(docIndex), testLiveDocs.get(docIndex));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -407,8 +407,8 @@ public class TestIndicesEquals extends LuceneTestCase {
|
|||
|
||||
// compare termDocs seeking
|
||||
|
||||
DocsEnum aprioriTermDocs = aprioriTermEnum.docs(MultiFields.getDeletedDocs(aprioriReader), null);
|
||||
DocsEnum testTermDocs = testTermEnum.docs(MultiFields.getDeletedDocs(testReader), null);
|
||||
DocsEnum aprioriTermDocs = aprioriTermEnum.docs(MultiFields.getLiveDocs(aprioriReader), null);
|
||||
DocsEnum testTermDocs = testTermEnum.docs(MultiFields.getLiveDocs(testReader), null);
|
||||
|
||||
while (aprioriTermDocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
assertTrue(testTermDocs.advance(aprioriTermDocs.docID()) != DocsEnum.NO_MORE_DOCS);
|
||||
|
@ -419,8 +419,8 @@ public class TestIndicesEquals extends LuceneTestCase {
|
|||
|
||||
assertEquals(aprioriReader.docFreq(aprioriField, aprioriTermEnum.term()), testReader.docFreq(aprioriField, testTermEnum.term()));
|
||||
|
||||
aprioriTermDocs = aprioriTermEnum.docs(MultiFields.getDeletedDocs(aprioriReader), aprioriTermDocs);
|
||||
testTermDocs = testTermEnum.docs(MultiFields.getDeletedDocs(testReader), testTermDocs);
|
||||
aprioriTermDocs = aprioriTermEnum.docs(MultiFields.getLiveDocs(aprioriReader), aprioriTermDocs);
|
||||
testTermDocs = testTermEnum.docs(MultiFields.getLiveDocs(testReader), testTermDocs);
|
||||
|
||||
while (true) {
|
||||
if (aprioriTermDocs.nextDoc() == DocsEnum.NO_MORE_DOCS) {
|
||||
|
@ -439,8 +439,8 @@ public class TestIndicesEquals extends LuceneTestCase {
|
|||
|
||||
// compare term positions
|
||||
|
||||
DocsAndPositionsEnum aprioriTermPositions = aprioriTermEnum.docsAndPositions(MultiFields.getDeletedDocs(aprioriReader), null);
|
||||
DocsAndPositionsEnum testTermPositions = testTermEnum.docsAndPositions(MultiFields.getDeletedDocs(testReader), null);
|
||||
DocsAndPositionsEnum aprioriTermPositions = aprioriTermEnum.docsAndPositions(MultiFields.getLiveDocs(aprioriReader), null);
|
||||
DocsAndPositionsEnum testTermPositions = testTermEnum.docsAndPositions(MultiFields.getLiveDocs(testReader), null);
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: enum1=" + aprioriTermPositions + " enum2=" + testTermPositions);
|
||||
|
|
|
@ -55,6 +55,7 @@ public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase {
|
|||
try {
|
||||
new InstantiatedIndex(unoptimizedReader);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace(System.out);
|
||||
fail("No exceptions when loading an unoptimized reader!");
|
||||
}
|
||||
|
||||
|
|
|
@ -769,7 +769,7 @@ public class MemoryIndex {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
public Bits getLiveDocs() {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -925,19 +925,19 @@ public class MemoryIndex {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) {
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) {
|
||||
if (reuse == null || !(reuse instanceof MemoryDocsEnum)) {
|
||||
reuse = new MemoryDocsEnum();
|
||||
}
|
||||
return ((MemoryDocsEnum) reuse).reset(skipDocs, info.sortedTerms[termUpto].getValue());
|
||||
return ((MemoryDocsEnum) reuse).reset(liveDocs, info.sortedTerms[termUpto].getValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) {
|
||||
if (reuse == null || !(reuse instanceof MemoryDocsAndPositionsEnum)) {
|
||||
reuse = new MemoryDocsAndPositionsEnum();
|
||||
}
|
||||
return ((MemoryDocsAndPositionsEnum) reuse).reset(skipDocs, info.sortedTerms[termUpto].getValue());
|
||||
return ((MemoryDocsAndPositionsEnum) reuse).reset(liveDocs, info.sortedTerms[termUpto].getValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -962,10 +962,10 @@ public class MemoryIndex {
|
|||
private class MemoryDocsEnum extends DocsEnum {
|
||||
private ArrayIntList positions;
|
||||
private boolean hasNext;
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
|
||||
public DocsEnum reset(Bits skipDocs, ArrayIntList positions) {
|
||||
this.skipDocs = skipDocs;
|
||||
public DocsEnum reset(Bits liveDocs, ArrayIntList positions) {
|
||||
this.liveDocs = liveDocs;
|
||||
this.positions = positions;
|
||||
hasNext = true;
|
||||
return this;
|
||||
|
@ -978,7 +978,7 @@ public class MemoryIndex {
|
|||
|
||||
@Override
|
||||
public int nextDoc() {
|
||||
if (hasNext && (skipDocs == null || !skipDocs.get(0))) {
|
||||
if (hasNext && (liveDocs == null || liveDocs.get(0))) {
|
||||
hasNext = false;
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -1001,10 +1001,10 @@ public class MemoryIndex {
|
|||
private ArrayIntList positions;
|
||||
private int posUpto;
|
||||
private boolean hasNext;
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
|
||||
public DocsAndPositionsEnum reset(Bits skipDocs, ArrayIntList positions) {
|
||||
this.skipDocs = skipDocs;
|
||||
public DocsAndPositionsEnum reset(Bits liveDocs, ArrayIntList positions) {
|
||||
this.liveDocs = liveDocs;
|
||||
this.positions = positions;
|
||||
posUpto = 0;
|
||||
hasNext = true;
|
||||
|
@ -1018,7 +1018,7 @@ public class MemoryIndex {
|
|||
|
||||
@Override
|
||||
public int nextDoc() {
|
||||
if (hasNext && (skipDocs == null || !skipDocs.get(0))) {
|
||||
if (hasNext && (liveDocs == null || liveDocs.get(0))) {
|
||||
hasNext = false;
|
||||
return 0;
|
||||
} else {
|
||||
|
|
|
@ -120,7 +120,7 @@ public class FieldNormModifier {
|
|||
|
||||
final FieldInvertState invertState = new FieldInvertState();
|
||||
for(IndexReader subReader : subReaders) {
|
||||
final Bits delDocs = subReader.getDeletedDocs();
|
||||
final Bits liveDocs = subReader.getLiveDocs();
|
||||
|
||||
int[] termCounts = new int[subReader.maxDoc()];
|
||||
Fields fields = subReader.fields();
|
||||
|
@ -130,7 +130,7 @@ public class FieldNormModifier {
|
|||
TermsEnum termsEnum = terms.iterator();
|
||||
DocsEnum docs = null;
|
||||
while(termsEnum.next() != null) {
|
||||
docs = termsEnum.docs(delDocs, docs);
|
||||
docs = termsEnum.docs(liveDocs, docs);
|
||||
while(true) {
|
||||
int docID = docs.nextDoc();
|
||||
if (docID != docs.NO_MORE_DOCS) {
|
||||
|
@ -145,7 +145,7 @@ public class FieldNormModifier {
|
|||
|
||||
invertState.setBoost(1.0f);
|
||||
for (int d = 0; d < termCounts.length; d++) {
|
||||
if (delDocs == null || !delDocs.get(d)) {
|
||||
if (liveDocs == null || liveDocs.get(d)) {
|
||||
invertState.setLength(termCounts[d]);
|
||||
subReader.setNorm(d, field, fieldSim.encodeNormValue(fieldSim.computeNorm(invertState)));
|
||||
}
|
||||
|
|
|
@ -25,8 +25,8 @@ import org.apache.lucene.index.IndexWriter; // javadoc
|
|||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
|
@ -177,27 +177,17 @@ public class MultiPassIndexSplitter {
|
|||
* Instead, deletions are buffered in a bitset and overlaid with the original
|
||||
* list of deletions.
|
||||
*/
|
||||
public static class FakeDeleteIndexReader extends FilterIndexReader {
|
||||
OpenBitSet dels;
|
||||
OpenBitSet oldDels = null;
|
||||
public static final class FakeDeleteIndexReader extends FilterIndexReader {
|
||||
OpenBitSet liveDocs;
|
||||
|
||||
public FakeDeleteIndexReader(IndexReader in) {
|
||||
super(new SlowMultiReaderWrapper(in));
|
||||
dels = new OpenBitSet(in.maxDoc());
|
||||
if (in.hasDeletions()) {
|
||||
oldDels = new OpenBitSet(in.maxDoc());
|
||||
final Bits oldDelBits = MultiFields.getDeletedDocs(in);
|
||||
assert oldDelBits != null;
|
||||
for (int i = 0; i < in.maxDoc(); i++) {
|
||||
if (oldDelBits.get(i)) oldDels.set(i);
|
||||
}
|
||||
dels.or(oldDels);
|
||||
}
|
||||
doUndeleteAll(); // initialize main bitset
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numDocs() {
|
||||
return in.maxDoc() - (int)dels.cardinality();
|
||||
return (int) liveDocs.cardinality();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -205,26 +195,35 @@ public class MultiPassIndexSplitter {
|
|||
* deletions.
|
||||
*/
|
||||
@Override
|
||||
protected void doUndeleteAll() throws CorruptIndexException, IOException {
|
||||
dels = new OpenBitSet(in.maxDoc());
|
||||
if (oldDels != null) {
|
||||
dels.or(oldDels);
|
||||
protected void doUndeleteAll() {
|
||||
final int maxDoc = in.maxDoc();
|
||||
liveDocs = new OpenBitSet(maxDoc);
|
||||
if (in.hasDeletions()) {
|
||||
final Bits oldLiveDocs = in.getLiveDocs();
|
||||
assert oldLiveDocs != null;
|
||||
// this loop is a little bit ineffective, as Bits has no nextSetBit():
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
if (oldLiveDocs.get(i)) liveDocs.fastSet(i);
|
||||
}
|
||||
} else {
|
||||
// mark all docs as valid
|
||||
liveDocs.set(0, maxDoc);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doDelete(int n) throws CorruptIndexException, IOException {
|
||||
dels.set(n);
|
||||
protected void doDelete(int n) {
|
||||
liveDocs.clear(n);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
return !dels.isEmpty();
|
||||
return (in.maxDoc() != this.numDocs());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
return dels;
|
||||
public Bits getLiveDocs() {
|
||||
return liveDocs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,16 +19,16 @@ package org.apache.lucene.index;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.TermRangeFilter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.OpenBitSetDISI;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.OpenBitSetDISI;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
|
@ -87,13 +87,14 @@ public class PKIndexSplitter {
|
|||
}
|
||||
|
||||
public static class DocumentFilteredIndexReader extends FilterIndexReader {
|
||||
final Bits readerDels;
|
||||
final Bits liveDocs;
|
||||
final int numDocs;
|
||||
|
||||
public DocumentFilteredIndexReader(IndexReader reader, Filter preserveFilter, boolean negateFilter) throws IOException {
|
||||
super(new SlowMultiReaderWrapper(reader));
|
||||
|
||||
final OpenBitSetDISI bits = new OpenBitSetDISI(in.maxDoc());
|
||||
final int maxDoc = in.maxDoc();
|
||||
final OpenBitSetDISI bits = new OpenBitSetDISI(maxDoc);
|
||||
final DocIdSet docs = preserveFilter.getDocIdSet((AtomicReaderContext) in.getTopReaderContext());
|
||||
if (docs != null) {
|
||||
final DocIdSetIterator it = docs.iterator();
|
||||
|
@ -101,23 +102,24 @@ public class PKIndexSplitter {
|
|||
bits.inPlaceOr(it);
|
||||
}
|
||||
}
|
||||
// this is somehow inverse, if we negate the filter, we delete all documents it matches!
|
||||
if (!negateFilter) {
|
||||
bits.flip(0, in.maxDoc());
|
||||
if (negateFilter) {
|
||||
bits.flip(0, maxDoc);
|
||||
}
|
||||
|
||||
if (in.hasDeletions()) {
|
||||
final Bits oldDelBits = in.getDeletedDocs();
|
||||
assert oldDelBits != null;
|
||||
for (int i = 0; i < in.maxDoc(); i++) {
|
||||
if (oldDelBits.get(i)) {
|
||||
bits.set(i);
|
||||
final Bits oldLiveDocs = in.getLiveDocs();
|
||||
assert oldLiveDocs != null;
|
||||
final DocIdSetIterator it = bits.iterator();
|
||||
for (int i = it.nextDoc(); i < maxDoc; i = it.nextDoc()) {
|
||||
if (!oldLiveDocs.get(i)) {
|
||||
// we can safely modify the current bit, as the iterator already stepped over it:
|
||||
bits.fastClear(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.readerDels = bits;
|
||||
this.numDocs = in.maxDoc() - (int) bits.cardinality();
|
||||
this.liveDocs = bits;
|
||||
this.numDocs = (int) bits.cardinality();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -131,8 +133,8 @@ public class PKIndexSplitter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
return readerDels;
|
||||
public Bits getLiveDocs() {
|
||||
return liveDocs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ public class TermVectorAccessor {
|
|||
positions.clear();
|
||||
}
|
||||
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(indexReader);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(indexReader);
|
||||
|
||||
Terms terms = MultiFields.getTerms(indexReader, field);
|
||||
boolean anyTerms = false;
|
||||
|
@ -109,9 +109,9 @@ public class TermVectorAccessor {
|
|||
if (text != null) {
|
||||
anyTerms = true;
|
||||
if (!mapper.isIgnoringPositions()) {
|
||||
docs = postings = termsEnum.docsAndPositions(delDocs, postings);
|
||||
docs = postings = termsEnum.docsAndPositions(liveDocs, postings);
|
||||
} else {
|
||||
docs = termsEnum.docs(delDocs, docs);
|
||||
docs = termsEnum.docs(liveDocs, docs);
|
||||
}
|
||||
|
||||
int docID = docs.advance(documentNumber);
|
||||
|
|
|
@ -190,8 +190,8 @@ public class HighFreqTerms {
|
|||
return 0;
|
||||
}
|
||||
|
||||
Bits skipDocs = MultiFields.getDeletedDocs(reader);
|
||||
if (skipDocs == null) {
|
||||
Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
if (liveDocs == null) {
|
||||
// TODO: we could do this up front, during the scan
|
||||
// (next()), instead of after-the-fact here w/ seek,
|
||||
// if the codec supports it and there are no del
|
||||
|
@ -202,7 +202,7 @@ public class HighFreqTerms {
|
|||
}
|
||||
}
|
||||
|
||||
DocsEnum de = termsEnum.docs(skipDocs, null);
|
||||
DocsEnum de = termsEnum.docs(liveDocs, null);
|
||||
|
||||
// use DocsEnum.read() and BulkResult api
|
||||
final DocsEnum.BulkReadResult bulkresult = de.getBulkResult();
|
||||
|
|
|
@ -134,11 +134,11 @@ public class TestNRTManager extends LuceneTestCase {
|
|||
System.out.println("TEST: now warm merged reader=" + reader);
|
||||
}
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
int sum = 0;
|
||||
final int inc = Math.max(1, maxDoc/50);
|
||||
for(int docID=0;docID<maxDoc;docID += inc) {
|
||||
if (delDocs == null || !delDocs.get(docID)) {
|
||||
if (liveDocs == null || liveDocs.get(docID)) {
|
||||
final Document doc = reader.document(docID);
|
||||
sum += doc.getFields().size();
|
||||
}
|
||||
|
|
|
@ -87,9 +87,9 @@ public class TestPKIndexSplitter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void checkContents(IndexReader ir, String indexname) throws Exception {
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(ir);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(ir);
|
||||
for (int i = 0; i < ir.maxDoc(); i++) {
|
||||
if (delDocs == null || !delDocs.get(i)) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
assertEquals(indexname, ir.document(i).get("indexname"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ public class DuplicateFilter extends Filter
|
|||
|
||||
private OpenBitSet correctBits(IndexReader reader) throws IOException {
|
||||
OpenBitSet bits = new OpenBitSet(reader.maxDoc()); //assume all are INvalid
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
Terms terms = reader.fields().terms(fieldName);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
|
@ -96,7 +96,7 @@ public class DuplicateFilter extends Filter
|
|||
if (currTerm == null) {
|
||||
break;
|
||||
} else {
|
||||
docs = termsEnum.docs(delDocs, docs);
|
||||
docs = termsEnum.docs(liveDocs, docs);
|
||||
int doc = docs.nextDoc();
|
||||
if (doc != DocsEnum.NO_MORE_DOCS) {
|
||||
if (keepMode == KM_USE_FIRST_OCCURRENCE) {
|
||||
|
@ -124,7 +124,7 @@ public class DuplicateFilter extends Filter
|
|||
|
||||
OpenBitSet bits=new OpenBitSet(reader.maxDoc());
|
||||
bits.set(0,reader.maxDoc()); //assume all are valid
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
Terms terms = reader.fields().terms(fieldName);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
|
@ -136,7 +136,7 @@ public class DuplicateFilter extends Filter
|
|||
} else {
|
||||
if (termsEnum.docFreq() > 1) {
|
||||
// unset potential duplicates
|
||||
docs = termsEnum.docs(delDocs, docs);
|
||||
docs = termsEnum.docs(liveDocs, docs);
|
||||
int doc = docs.nextDoc();
|
||||
if (doc != DocsEnum.NO_MORE_DOCS) {
|
||||
if (keepMode == KM_USE_FIRST_OCCURRENCE) {
|
||||
|
|
|
@ -63,7 +63,7 @@ public class TermsFilter extends Filter
|
|||
OpenBitSet result=new OpenBitSet(reader.maxDoc());
|
||||
Fields fields = reader.fields();
|
||||
BytesRef br = new BytesRef();
|
||||
Bits delDocs = reader.getDeletedDocs();
|
||||
Bits liveDocs = reader.getLiveDocs();
|
||||
if (fields != null) {
|
||||
String lastField = null;
|
||||
Terms termsC = null;
|
||||
|
@ -80,7 +80,7 @@ public class TermsFilter extends Filter
|
|||
if (terms != null) {
|
||||
br.copy(term.bytes());
|
||||
if (termsEnum.seekCeil(br) == TermsEnum.SeekStatus.FOUND) {
|
||||
docs = termsEnum.docs(delDocs, docs);
|
||||
docs = termsEnum.docs(liveDocs, docs);
|
||||
while(docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
result.set(docs.docID());
|
||||
}
|
||||
|
|
|
@ -139,7 +139,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
|
|||
Document d=searcher.doc(hits[i].doc);
|
||||
String url=d.get(KEY_FIELD);
|
||||
DocsEnum td = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
KEY_FIELD,
|
||||
new BytesRef(url));
|
||||
int lastDoc=0;
|
||||
|
@ -163,7 +163,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
|
|||
Document d=searcher.doc(hits[i].doc);
|
||||
String url=d.get(KEY_FIELD);
|
||||
DocsEnum td = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
KEY_FIELD,
|
||||
new BytesRef(url));
|
||||
int lastDoc=0;
|
||||
|
|
|
@ -46,7 +46,7 @@ public class CartesianShapeFilter extends Filter {
|
|||
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(final AtomicReaderContext context) throws IOException {
|
||||
final Bits delDocs = context.reader.getDeletedDocs();
|
||||
final Bits liveDocs = context.reader.getLiveDocs();
|
||||
final List<Double> area = shape.getArea();
|
||||
final int sz = area.size();
|
||||
|
||||
|
@ -58,7 +58,7 @@ public class CartesianShapeFilter extends Filter {
|
|||
return new DocIdSet() {
|
||||
@Override
|
||||
public DocIdSetIterator iterator() throws IOException {
|
||||
return context.reader.termDocsEnum(delDocs, fieldName, bytesRef);
|
||||
return context.reader.termDocsEnum(liveDocs, fieldName, bytesRef);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -71,7 +71,7 @@ public class CartesianShapeFilter extends Filter {
|
|||
for (int i =0; i< sz; i++) {
|
||||
double boxId = area.get(i).doubleValue();
|
||||
NumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(boxId), 0, bytesRef);
|
||||
final DocsEnum docsEnum = context.reader.termDocsEnum(delDocs, fieldName, bytesRef);
|
||||
final DocsEnum docsEnum = context.reader.termDocsEnum(liveDocs, fieldName, bytesRef);
|
||||
if (docsEnum == null) continue;
|
||||
// iterate through all documents
|
||||
// which have this boxId
|
||||
|
|
|
@ -162,7 +162,7 @@ class BufferedDeletesStream {
|
|||
};
|
||||
|
||||
/** Resolves the buffered deleted Term/Query/docIDs, into
|
||||
* actual deleted docIDs in the deletedDocs BitVector for
|
||||
* actual deleted docIDs in the liveDocs BitVector for
|
||||
* each SegmentReader. */
|
||||
public synchronized ApplyDeletesResult applyDeletes(IndexWriter.ReaderPool readerPool, List<SegmentInfo> infos) throws IOException {
|
||||
final long t0 = System.currentTimeMillis();
|
||||
|
@ -399,7 +399,7 @@ class BufferedDeletesStream {
|
|||
// System.out.println(" term=" + term);
|
||||
|
||||
if (termsEnum.seekExact(term.bytes(), false)) {
|
||||
DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs);
|
||||
DocsEnum docsEnum = termsEnum.docs(reader.getLiveDocs(), docs);
|
||||
|
||||
if (docsEnum != null) {
|
||||
while (true) {
|
||||
|
|
|
@ -17,12 +17,16 @@ package org.apache.lucene.index;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.text.NumberFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.document.AbstractField; // for javadocs
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.codecs.CodecProvider;
|
||||
|
@ -30,20 +34,16 @@ import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter;
|
|||
import org.apache.lucene.index.codecs.PerDocValues;
|
||||
import org.apache.lucene.index.values.IndexDocValues;
|
||||
import org.apache.lucene.index.values.ValuesEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
import java.text.NumberFormat;
|
||||
import java.io.PrintStream;
|
||||
import java.io.IOException;
|
||||
import java.io.File;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Basic tool and API to check the health of an index and
|
||||
* write a new segments file that removes reference to
|
||||
|
@ -520,13 +520,13 @@ public class CheckIndex {
|
|||
final int numDocs = reader.numDocs();
|
||||
toLoseDocCount = numDocs;
|
||||
if (reader.hasDeletions()) {
|
||||
if (reader.deletedDocs.count() != info.getDelCount()) {
|
||||
throw new RuntimeException("delete count mismatch: info=" + info.getDelCount() + " vs deletedDocs.count()=" + reader.deletedDocs.count());
|
||||
if (reader.liveDocs.count() != info.docCount - info.getDelCount()) {
|
||||
throw new RuntimeException("delete count mismatch: info=" + (info.docCount - info.getDelCount()) + " vs reader=" + reader.liveDocs.count());
|
||||
}
|
||||
if (reader.deletedDocs.count() > reader.maxDoc()) {
|
||||
throw new RuntimeException("too many deleted docs: maxDoc()=" + reader.maxDoc() + " vs deletedDocs.count()=" + reader.deletedDocs.count());
|
||||
if ((info.docCount-reader.liveDocs.count()) > reader.maxDoc()) {
|
||||
throw new RuntimeException("too many deleted docs: maxDoc()=" + reader.maxDoc() + " vs del count=" + (info.docCount-reader.liveDocs.count()));
|
||||
}
|
||||
if (info.docCount - numDocs != info.getDelCount()){
|
||||
if (info.docCount - numDocs != info.getDelCount()) {
|
||||
throw new RuntimeException("delete count mismatch: info=" + info.getDelCount() + " vs reader=" + (info.docCount - numDocs));
|
||||
}
|
||||
segInfoStat.numDeleted = info.docCount - numDocs;
|
||||
|
@ -654,7 +654,7 @@ public class CheckIndex {
|
|||
final Status.TermIndexStatus status = new Status.TermIndexStatus();
|
||||
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
|
||||
final IndexSearcher is = new IndexSearcher(reader);
|
||||
|
||||
|
@ -712,8 +712,8 @@ public class CheckIndex {
|
|||
final int docFreq = terms.docFreq();
|
||||
status.totFreq += docFreq;
|
||||
|
||||
docs = terms.docs(delDocs, docs);
|
||||
postings = terms.docsAndPositions(delDocs, postings);
|
||||
docs = terms.docs(liveDocs, docs);
|
||||
postings = terms.docsAndPositions(liveDocs, postings);
|
||||
|
||||
if (hasOrd) {
|
||||
long ord = -1;
|
||||
|
@ -815,7 +815,7 @@ public class CheckIndex {
|
|||
if (hasPositions) {
|
||||
for(int idx=0;idx<7;idx++) {
|
||||
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
|
||||
postings = terms.docsAndPositions(delDocs, postings);
|
||||
postings = terms.docsAndPositions(liveDocs, postings);
|
||||
final int docID = postings.advance(skipDocID);
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
break;
|
||||
|
@ -851,7 +851,7 @@ public class CheckIndex {
|
|||
} else {
|
||||
for(int idx=0;idx<7;idx++) {
|
||||
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
|
||||
docs = terms.docs(delDocs, docs);
|
||||
docs = terms.docs(liveDocs, docs);
|
||||
final int docID = docs.advance(skipDocID);
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
break;
|
||||
|
@ -919,7 +919,7 @@ public class CheckIndex {
|
|||
throw new RuntimeException("seek to existing term " + seekTerms[i] + " failed");
|
||||
}
|
||||
|
||||
docs = terms.docs(delDocs, docs);
|
||||
docs = terms.docs(liveDocs, docs);
|
||||
if (docs == null) {
|
||||
throw new RuntimeException("null DocsEnum from to existing term " + seekTerms[i]);
|
||||
}
|
||||
|
@ -967,9 +967,9 @@ public class CheckIndex {
|
|||
}
|
||||
|
||||
// Scan stored fields for all documents
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
for (int j = 0; j < info.docCount; ++j) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
if (liveDocs == null || liveDocs.get(j)) {
|
||||
status.docCount++;
|
||||
Document doc = reader.document(j);
|
||||
status.totFields += doc.getFields().size();
|
||||
|
@ -1063,9 +1063,9 @@ public class CheckIndex {
|
|||
infoStream.print(" test: term vectors........");
|
||||
}
|
||||
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
for (int j = 0; j < info.docCount; ++j) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
if (liveDocs == null || liveDocs.get(j)) {
|
||||
status.docCount++;
|
||||
TermFreqVector[] tfv = reader.getTermFreqVectors(j);
|
||||
if (tfv != null) {
|
||||
|
|
|
@ -350,8 +350,8 @@ class DirectoryReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getDeletedDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits deletedDocs");
|
||||
public Bits getLiveDocs() {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getLiveDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits liveDocs");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -249,7 +249,7 @@ public class DocTermOrds {
|
|||
|
||||
boolean testedOrd = false;
|
||||
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
|
||||
// we need a minimum of 9 bytes, but round up to 12 since the space would
|
||||
// be wasted with most allocators anyway.
|
||||
|
@ -312,7 +312,7 @@ public class DocTermOrds {
|
|||
final int df = te.docFreq();
|
||||
if (df <= maxTermDocFreq) {
|
||||
|
||||
docsEnum = te.docs(delDocs, docsEnum);
|
||||
docsEnum = te.docs(liveDocs, docsEnum);
|
||||
|
||||
final DocsEnum.BulkReadResult bulkResult = docsEnum.getBulkResult();
|
||||
|
||||
|
@ -653,13 +653,13 @@ public class DocTermOrds {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
return termsEnum.docs(skipDocs, reuse);
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
return termsEnum.docs(liveDocs, reuse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
return termsEnum.docsAndPositions(skipDocs, reuse);
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
return termsEnum.docsAndPositions(liveDocs, reuse);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -111,13 +111,13 @@ public class DocumentsWriterPerThread {
|
|||
static class FlushedSegment {
|
||||
final SegmentInfo segmentInfo;
|
||||
final BufferedDeletes segmentDeletes;
|
||||
final BitVector deletedDocuments;
|
||||
final BitVector liveDocs;
|
||||
|
||||
private FlushedSegment(SegmentInfo segmentInfo,
|
||||
BufferedDeletes segmentDeletes, BitVector deletedDocuments) {
|
||||
BufferedDeletes segmentDeletes, BitVector liveDocs) {
|
||||
this.segmentInfo = segmentInfo;
|
||||
this.segmentDeletes = segmentDeletes;
|
||||
this.deletedDocuments = deletedDocuments;
|
||||
this.liveDocs = liveDocs;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -434,9 +434,10 @@ public class DocumentsWriterPerThread {
|
|||
// happens when an exception is hit processing that
|
||||
// doc, eg if analyzer has some problem w/ the text):
|
||||
if (pendingDeletes.docIDs.size() > 0) {
|
||||
flushState.deletedDocs = new BitVector(numDocsInRAM);
|
||||
flushState.liveDocs = new BitVector(numDocsInRAM);
|
||||
flushState.liveDocs.invertAll();
|
||||
for(int delDocID : pendingDeletes.docIDs) {
|
||||
flushState.deletedDocs.set(delDocID);
|
||||
flushState.liveDocs.clear(delDocID);
|
||||
}
|
||||
pendingDeletes.bytesUsed.addAndGet(-pendingDeletes.docIDs.size() * BufferedDeletes.BYTES_PER_DEL_DOCID);
|
||||
pendingDeletes.docIDs.clear();
|
||||
|
@ -460,7 +461,7 @@ public class DocumentsWriterPerThread {
|
|||
pendingDeletes.terms.clear();
|
||||
final SegmentInfo newSegment = new SegmentInfo(segment, flushState.numDocs, directory, false, flushState.segmentCodecs, fieldInfos.asReadOnly());
|
||||
if (infoStream != null) {
|
||||
message("new segment has " + (flushState.deletedDocs == null ? 0 : flushState.deletedDocs.count()) + " deleted docs");
|
||||
message("new segment has " + (flushState.liveDocs == null ? 0 : (flushState.numDocs - flushState.liveDocs.count())) + " deleted docs");
|
||||
message("new segment has " + (newSegment.getHasVectors() ? "vectors" : "no vectors"));
|
||||
message("flushedFiles=" + newSegment.files());
|
||||
message("flushed codecs=" + newSegment.getSegmentCodecs());
|
||||
|
@ -489,7 +490,7 @@ public class DocumentsWriterPerThread {
|
|||
doAfterFlush();
|
||||
success = true;
|
||||
|
||||
return new FlushedSegment(newSegment, segmentDeletes, flushState.deletedDocs);
|
||||
return new FlushedSegment(newSegment, segmentDeletes, flushState.liveDocs);
|
||||
} finally {
|
||||
if (!success) {
|
||||
if (segment != null) {
|
||||
|
|
|
@ -87,13 +87,13 @@ public class FilterIndexReader extends IndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, BytesRef text, DocsEnum reuse) throws IOException {
|
||||
return in.docs(skipDocs, text, reuse);
|
||||
public DocsEnum docs(Bits liveDocs, BytesRef text, DocsEnum reuse) throws IOException {
|
||||
return in.docs(liveDocs, text, reuse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, BytesRef text, DocsAndPositionsEnum reuse) throws IOException {
|
||||
return in.docsAndPositions(skipDocs, text, reuse);
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, BytesRef text, DocsAndPositionsEnum reuse) throws IOException {
|
||||
return in.docsAndPositions(liveDocs, text, reuse);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -172,13 +172,13 @@ public class FilterIndexReader extends IndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
return in.docs(skipDocs, reuse);
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
return in.docs(liveDocs, reuse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
return in.docsAndPositions(skipDocs, reuse);
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
return in.docsAndPositions(liveDocs, reuse);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -301,8 +301,8 @@ public class FilterIndexReader extends IndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
return in.getDeletedDocs();
|
||||
public Bits getLiveDocs() {
|
||||
return in.getLiveDocs();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -339,10 +339,11 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
|
|||
// Mark it deleted. TODO: we could also skip
|
||||
// writing its postings; this would be
|
||||
// deterministic (just for this Term's docs).
|
||||
if (state.deletedDocs == null) {
|
||||
state.deletedDocs = new BitVector(state.numDocs);
|
||||
if (state.liveDocs == null) {
|
||||
state.liveDocs = new BitVector(state.numDocs);
|
||||
state.liveDocs.invertAll();
|
||||
}
|
||||
state.deletedDocs.set(docID);
|
||||
state.liveDocs.clear(docID);
|
||||
}
|
||||
|
||||
// Carefully copy over the prox + payload info,
|
||||
|
|
|
@ -962,7 +962,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* requested document is deleted, and therefore asking for a deleted document
|
||||
* may yield unspecified results. Usually this is not required, however you
|
||||
* can test if the doc is deleted by checking the {@link
|
||||
* Bits} returned from {@link MultiFields#getDeletedDocs}.
|
||||
* Bits} returned from {@link MultiFields#getLiveDocs}.
|
||||
*
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws IOException if there is a low-level IO error
|
||||
|
@ -987,7 +987,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* requested document is deleted, and therefore asking for a deleted document
|
||||
* may yield unspecified results. Usually this is not required, however you
|
||||
* can test if the doc is deleted by checking the {@link
|
||||
* Bits} returned from {@link MultiFields#getDeletedDocs}.
|
||||
* Bits} returned from {@link MultiFields#getLiveDocs}.
|
||||
*
|
||||
* @param n Get the document at the <code>n</code><sup>th</sup> position
|
||||
* @param fieldSelector The {@link FieldSelector} to use to determine what
|
||||
|
@ -1136,7 +1136,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
/** Returns {@link DocsEnum} for the specified field &
|
||||
* term. This may return null, if either the field or
|
||||
* term does not exist. */
|
||||
public DocsEnum termDocsEnum(Bits skipDocs, String field, BytesRef term) throws IOException {
|
||||
public DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term) throws IOException {
|
||||
assert field != null;
|
||||
assert term != null;
|
||||
final Fields fields = fields();
|
||||
|
@ -1145,7 +1145,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
final Terms terms = fields.terms(field);
|
||||
if (terms != null) {
|
||||
return terms.docs(skipDocs, term, null);
|
||||
return terms.docs(liveDocs, term, null);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -1155,7 +1155,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* field & term. This may return null, if either the
|
||||
* field or term does not exist, or, positions were not
|
||||
* stored for this term. */
|
||||
public DocsAndPositionsEnum termPositionsEnum(Bits skipDocs, String field, BytesRef term) throws IOException {
|
||||
public DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term) throws IOException {
|
||||
assert field != null;
|
||||
assert term != null;
|
||||
final Fields fields = fields();
|
||||
|
@ -1164,7 +1164,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
final Terms terms = fields.terms(field);
|
||||
if (terms != null) {
|
||||
return terms.docsAndPositions(skipDocs, term, null);
|
||||
return terms.docsAndPositions(liveDocs, term, null);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -1175,7 +1175,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* {@link TermState}. This may return null, if either the field or the term
|
||||
* does not exists or the {@link TermState} is invalid for the underlying
|
||||
* implementation.*/
|
||||
public DocsEnum termDocsEnum(Bits skipDocs, String field, BytesRef term, TermState state) throws IOException {
|
||||
public DocsEnum termDocsEnum(Bits liveDocs, String field, BytesRef term, TermState state) throws IOException {
|
||||
assert state != null;
|
||||
assert field != null;
|
||||
final Fields fields = fields();
|
||||
|
@ -1184,7 +1184,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
final Terms terms = fields.terms(field);
|
||||
if (terms != null) {
|
||||
return terms.docs(skipDocs, term, state, null);
|
||||
return terms.docs(liveDocs, term, state, null);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -1195,7 +1195,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
* {@link TermState}. This may return null, if either the field or the term
|
||||
* does not exists, the {@link TermState} is invalid for the underlying
|
||||
* implementation, or positions were not stored for this term.*/
|
||||
public DocsAndPositionsEnum termPositionsEnum(Bits skipDocs, String field, BytesRef term, TermState state) throws IOException {
|
||||
public DocsAndPositionsEnum termPositionsEnum(Bits liveDocs, String field, BytesRef term, TermState state) throws IOException {
|
||||
assert state != null;
|
||||
assert field != null;
|
||||
final Fields fields = fields();
|
||||
|
@ -1204,7 +1204,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
}
|
||||
final Terms terms = fields.terms(field);
|
||||
if (terms != null) {
|
||||
return terms.docsAndPositions(skipDocs, term, state, null);
|
||||
return terms.docsAndPositions(liveDocs, term, state, null);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -1260,7 +1260,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
public int deleteDocuments(Term term) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
|
||||
ensureOpen();
|
||||
DocsEnum docs = MultiFields.getTermDocsEnum(this,
|
||||
MultiFields.getDeletedDocs(this),
|
||||
MultiFields.getLiveDocs(this),
|
||||
term.field(),
|
||||
term.bytes());
|
||||
if (docs == null) return 0;
|
||||
|
@ -1385,15 +1385,17 @@ public abstract class IndexReader implements Cloneable,Closeable {
|
|||
*/
|
||||
public abstract Collection<String> getFieldNames(FieldOption fldOption);
|
||||
|
||||
/** Returns the {@link Bits} representing deleted docs. A
|
||||
* set bit indicates the doc ID has been deleted. This
|
||||
* method should return null when there are no deleted
|
||||
* docs.
|
||||
/** Returns the {@link Bits} representing live (not
|
||||
* deleted) docs. A set bit indicates the doc ID has not
|
||||
* been deleted. If this method returns null it means
|
||||
* there are no deleted documents (all documents are
|
||||
* live).
|
||||
*
|
||||
* The returned instance has been safely published for use by
|
||||
* multiple threads without additional synchronization.
|
||||
* The returned instance has been safely published for
|
||||
* use by multiple threads without additional
|
||||
* synchronization.
|
||||
* @lucene.experimental */
|
||||
public abstract Bits getDeletedDocs();
|
||||
public abstract Bits getLiveDocs();
|
||||
|
||||
/**
|
||||
* Expert: return the IndexCommit that this reader has
|
||||
|
|
|
@ -2214,8 +2214,8 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
|
||||
// Must write deleted docs after the CFS so we don't
|
||||
// slurp the del file into CFS:
|
||||
if (flushedSegment.deletedDocuments != null) {
|
||||
final int delCount = flushedSegment.deletedDocuments.count();
|
||||
if (flushedSegment.liveDocs != null) {
|
||||
final int delCount = flushedSegment.segmentInfo.docCount - flushedSegment.liveDocs.count();
|
||||
assert delCount > 0;
|
||||
newSegment.setDelCount(delCount);
|
||||
newSegment.advanceDelGen();
|
||||
|
@ -2230,7 +2230,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
// shortly-to-be-opened SegmentReader and let it
|
||||
// carry the changes; there's no reason to use
|
||||
// filesystem as intermediary here.
|
||||
flushedSegment.deletedDocuments.write(directory, delFileName);
|
||||
flushedSegment.liveDocs.write(directory, delFileName);
|
||||
success2 = true;
|
||||
} finally {
|
||||
if (!success2) {
|
||||
|
@ -2931,9 +2931,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
// Reader was skipped because it was 100% deletions
|
||||
continue;
|
||||
}
|
||||
final Bits prevDelDocs = previousReader.getDeletedDocs();
|
||||
final Bits prevLiveDocs = previousReader.getLiveDocs();
|
||||
final SegmentReader currentReader = merge.readers.get(i);
|
||||
final Bits currentDelDocs = currentReader.getDeletedDocs();
|
||||
final Bits currentLiveDocs = currentReader.getLiveDocs();
|
||||
if (previousReader.hasDeletions()) {
|
||||
|
||||
// There were deletes on this segment when the merge
|
||||
|
@ -2948,10 +2948,10 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
// committed since we started the merge, so we
|
||||
// must merge them:
|
||||
for(int j=0;j<docCount;j++) {
|
||||
if (prevDelDocs.get(j))
|
||||
assert currentDelDocs.get(j);
|
||||
if (!prevLiveDocs.get(j))
|
||||
assert !currentLiveDocs.get(j);
|
||||
else {
|
||||
if (currentDelDocs.get(j)) {
|
||||
if (!currentLiveDocs.get(j)) {
|
||||
mergedReader.doDelete(docUpto);
|
||||
delCount++;
|
||||
}
|
||||
|
@ -2965,7 +2965,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
// This segment had no deletes before but now it
|
||||
// does:
|
||||
for(int j=0; j<docCount; j++) {
|
||||
if (currentDelDocs.get(j)) {
|
||||
if (!currentLiveDocs.get(j)) {
|
||||
mergedReader.doDelete(docUpto);
|
||||
delCount++;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ package org.apache.lucene.index;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
|
@ -35,7 +34,6 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
|||
int upto;
|
||||
DocsAndPositionsEnum current;
|
||||
int currentBase;
|
||||
Bits skipDocs;
|
||||
int doc = -1;
|
||||
|
||||
MultiDocsAndPositionsEnum reset(final EnumWithSlice[] subs, final int numSubs) throws IOException {
|
||||
|
|
|
@ -17,7 +17,6 @@ package org.apache.lucene.index;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -33,7 +32,6 @@ public final class MultiDocsEnum extends DocsEnum {
|
|||
int upto;
|
||||
DocsEnum current;
|
||||
int currentBase;
|
||||
Bits skipDocs;
|
||||
int doc = -1;
|
||||
|
||||
MultiDocsEnum reset(final EnumWithSlice[] subs, final int numSubs) throws IOException {
|
||||
|
|
|
@ -100,19 +100,19 @@ public final class MultiFields extends Fields {
|
|||
}
|
||||
}
|
||||
|
||||
public static Bits getDeletedDocs(IndexReader r) {
|
||||
public static Bits getLiveDocs(IndexReader r) {
|
||||
Bits result;
|
||||
if (r.hasDeletions()) {
|
||||
|
||||
final List<Bits> delDocs = new ArrayList<Bits>();
|
||||
final List<Bits> liveDocs = new ArrayList<Bits>();
|
||||
final List<Integer> starts = new ArrayList<Integer>();
|
||||
|
||||
try {
|
||||
final int maxDoc = new ReaderUtil.Gather(r) {
|
||||
@Override
|
||||
protected void add(int base, IndexReader r) throws IOException {
|
||||
// record all delDocs, even if they are null
|
||||
delDocs.add(r.getDeletedDocs());
|
||||
// record all liveDocs, even if they are null
|
||||
liveDocs.add(r.getLiveDocs());
|
||||
starts.add(base);
|
||||
}
|
||||
}.run();
|
||||
|
@ -122,12 +122,12 @@ public final class MultiFields extends Fields {
|
|||
throw new RuntimeException(ioe);
|
||||
}
|
||||
|
||||
assert delDocs.size() > 0;
|
||||
if (delDocs.size() == 1) {
|
||||
assert liveDocs.size() > 0;
|
||||
if (liveDocs.size() == 1) {
|
||||
// Only one actual sub reader -- optimize this case
|
||||
result = delDocs.get(0);
|
||||
result = liveDocs.get(0);
|
||||
} else {
|
||||
result = new MultiBits(delDocs, starts);
|
||||
result = new MultiBits(liveDocs, starts, true);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
@ -150,12 +150,12 @@ public final class MultiFields extends Fields {
|
|||
/** Returns {@link DocsEnum} for the specified field &
|
||||
* term. This may return null if the term does not
|
||||
* exist. */
|
||||
public static DocsEnum getTermDocsEnum(IndexReader r, Bits skipDocs, String field, BytesRef term) throws IOException {
|
||||
public static DocsEnum getTermDocsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
|
||||
assert field != null;
|
||||
assert term != null;
|
||||
final Terms terms = getTerms(r, field);
|
||||
if (terms != null) {
|
||||
return terms.docs(skipDocs, term, null);
|
||||
return terms.docs(liveDocs, term, null);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -164,12 +164,12 @@ public final class MultiFields extends Fields {
|
|||
/** Returns {@link DocsAndPositionsEnum} for the specified
|
||||
* field & term. This may return null if the term does
|
||||
* not exist or positions were not indexed. */
|
||||
public static DocsAndPositionsEnum getTermPositionsEnum(IndexReader r, Bits skipDocs, String field, BytesRef term) throws IOException {
|
||||
public static DocsAndPositionsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
|
||||
assert field != null;
|
||||
assert term != null;
|
||||
final Terms terms = getTerms(r, field);
|
||||
if (terms != null) {
|
||||
return terms.docsAndPositions(skipDocs, term, null);
|
||||
return terms.docsAndPositions(liveDocs, term, null);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -141,8 +141,8 @@ public class MultiReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getDeletedDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits deletedDocs");
|
||||
public Bits getLiveDocs() {
|
||||
throw new UnsupportedOperationException("please use MultiFields.getLiveDocs, or wrap your IndexReader with SlowMultiReaderWrapper, if you really need a top level Bits liveDocs");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -346,7 +346,7 @@ public final class MultiTermsEnum extends TermsEnum {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
final MultiDocsEnum docsEnum;
|
||||
if (reuse != null) {
|
||||
docsEnum = (MultiDocsEnum) reuse;
|
||||
|
@ -354,11 +354,11 @@ public final class MultiTermsEnum extends TermsEnum {
|
|||
docsEnum = new MultiDocsEnum();
|
||||
}
|
||||
|
||||
final MultiBits multiSkipDocs;
|
||||
if (skipDocs instanceof MultiBits) {
|
||||
multiSkipDocs = (MultiBits) skipDocs;
|
||||
final MultiBits multiLiveDocs;
|
||||
if (liveDocs instanceof MultiBits) {
|
||||
multiLiveDocs = (MultiBits) liveDocs;
|
||||
} else {
|
||||
multiSkipDocs = null;
|
||||
multiLiveDocs = null;
|
||||
}
|
||||
|
||||
int upto = 0;
|
||||
|
@ -369,22 +369,22 @@ public final class MultiTermsEnum extends TermsEnum {
|
|||
|
||||
final Bits b;
|
||||
|
||||
if (multiSkipDocs != null) {
|
||||
if (multiLiveDocs != null) {
|
||||
// optimize for common case: requested skip docs is a
|
||||
// congruent sub-slice of MultiBits: in this case, we
|
||||
// just pull the skipDocs from the sub reader, rather
|
||||
// just pull the liveDocs from the sub reader, rather
|
||||
// than making the inefficient
|
||||
// Slice(Multi(sub-readers)):
|
||||
final MultiBits.SubResult sub = multiSkipDocs.getMatchingSub(entry.subSlice);
|
||||
final MultiBits.SubResult sub = multiLiveDocs.getMatchingSub(entry.subSlice);
|
||||
if (sub.matches) {
|
||||
b = sub.result;
|
||||
} else {
|
||||
// custom case: requested skip docs is foreign:
|
||||
// must slice it on every access
|
||||
b = new BitsSlice(skipDocs, entry.subSlice);
|
||||
b = new BitsSlice(liveDocs, entry.subSlice);
|
||||
}
|
||||
} else if (skipDocs != null) {
|
||||
b = new BitsSlice(skipDocs, entry.subSlice);
|
||||
} else if (liveDocs != null) {
|
||||
b = new BitsSlice(liveDocs, entry.subSlice);
|
||||
} else {
|
||||
// no deletions
|
||||
b = null;
|
||||
|
@ -407,7 +407,7 @@ public final class MultiTermsEnum extends TermsEnum {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
final MultiDocsAndPositionsEnum docsAndPositionsEnum;
|
||||
if (reuse != null) {
|
||||
docsAndPositionsEnum = (MultiDocsAndPositionsEnum) reuse;
|
||||
|
@ -415,11 +415,11 @@ public final class MultiTermsEnum extends TermsEnum {
|
|||
docsAndPositionsEnum = new MultiDocsAndPositionsEnum();
|
||||
}
|
||||
|
||||
final MultiBits multiSkipDocs;
|
||||
if (skipDocs instanceof MultiBits) {
|
||||
multiSkipDocs = (MultiBits) skipDocs;
|
||||
final MultiBits multiLiveDocs;
|
||||
if (liveDocs instanceof MultiBits) {
|
||||
multiLiveDocs = (MultiBits) liveDocs;
|
||||
} else {
|
||||
multiSkipDocs = null;
|
||||
multiLiveDocs = null;
|
||||
}
|
||||
|
||||
int upto = 0;
|
||||
|
@ -430,23 +430,23 @@ public final class MultiTermsEnum extends TermsEnum {
|
|||
|
||||
final Bits b;
|
||||
|
||||
if (multiSkipDocs != null) {
|
||||
if (multiLiveDocs != null) {
|
||||
// Optimize for common case: requested skip docs is a
|
||||
// congruent sub-slice of MultiBits: in this case, we
|
||||
// just pull the skipDocs from the sub reader, rather
|
||||
// just pull the liveDocs from the sub reader, rather
|
||||
// than making the inefficient
|
||||
// Slice(Multi(sub-readers)):
|
||||
final MultiBits.SubResult sub = multiSkipDocs.getMatchingSub(top[i].subSlice);
|
||||
final MultiBits.SubResult sub = multiLiveDocs.getMatchingSub(top[i].subSlice);
|
||||
if (sub.matches) {
|
||||
b = sub.result;
|
||||
} else {
|
||||
// custom case: requested skip docs is foreign:
|
||||
// must slice it on every access (very
|
||||
// inefficient)
|
||||
b = new BitsSlice(skipDocs, top[i].subSlice);
|
||||
b = new BitsSlice(liveDocs, top[i].subSlice);
|
||||
}
|
||||
} else if (skipDocs != null) {
|
||||
b = new BitsSlice(skipDocs, top[i].subSlice);
|
||||
} else if (liveDocs != null) {
|
||||
b = new BitsSlice(liveDocs, top[i].subSlice);
|
||||
} else {
|
||||
// no deletions
|
||||
b = null;
|
||||
|
|
|
@ -207,8 +207,8 @@ public class ParallelReader extends IndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
return MultiFields.getDeletedDocs(readers.get(0));
|
||||
public Bits getLiveDocs() {
|
||||
return MultiFields.getLiveDocs(readers.get(0));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -282,11 +282,12 @@ final class SegmentMerger {
|
|||
throws IOException, MergeAbortedException, CorruptIndexException {
|
||||
int docCount = 0;
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
assert liveDocs != null;
|
||||
if (matchingFieldsReader != null) {
|
||||
// We can bulk-copy because the fieldInfos are "congruent"
|
||||
for (int j = 0; j < maxDoc;) {
|
||||
if (delDocs.get(j)) {
|
||||
if (!liveDocs.get(j)) {
|
||||
// skip deleted docs
|
||||
++j;
|
||||
continue;
|
||||
|
@ -298,7 +299,7 @@ final class SegmentMerger {
|
|||
j++;
|
||||
numDocs++;
|
||||
if (j >= maxDoc) break;
|
||||
if (delDocs.get(j)) {
|
||||
if (!liveDocs.get(j)) {
|
||||
j++;
|
||||
break;
|
||||
}
|
||||
|
@ -311,7 +312,7 @@ final class SegmentMerger {
|
|||
}
|
||||
} else {
|
||||
for (int j = 0; j < maxDoc; j++) {
|
||||
if (delDocs.get(j)) {
|
||||
if (!liveDocs.get(j)) {
|
||||
// skip deleted docs
|
||||
continue;
|
||||
}
|
||||
|
@ -401,11 +402,11 @@ final class SegmentMerger {
|
|||
final IndexReader reader)
|
||||
throws IOException, MergeAbortedException {
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
if (matchingVectorsReader != null) {
|
||||
// We can bulk-copy because the fieldInfos are "congruent"
|
||||
for (int docNum = 0; docNum < maxDoc;) {
|
||||
if (delDocs.get(docNum)) {
|
||||
if (!liveDocs.get(docNum)) {
|
||||
// skip deleted docs
|
||||
++docNum;
|
||||
continue;
|
||||
|
@ -417,7 +418,7 @@ final class SegmentMerger {
|
|||
docNum++;
|
||||
numDocs++;
|
||||
if (docNum >= maxDoc) break;
|
||||
if (delDocs.get(docNum)) {
|
||||
if (!liveDocs.get(docNum)) {
|
||||
docNum++;
|
||||
break;
|
||||
}
|
||||
|
@ -429,7 +430,7 @@ final class SegmentMerger {
|
|||
}
|
||||
} else {
|
||||
for (int docNum = 0; docNum < maxDoc; docNum++) {
|
||||
if (delDocs.get(docNum)) {
|
||||
if (!liveDocs.get(docNum)) {
|
||||
// skip deleted docs
|
||||
continue;
|
||||
}
|
||||
|
@ -499,14 +500,14 @@ final class SegmentMerger {
|
|||
if (f != null) {
|
||||
slices.add(new ReaderUtil.Slice(docBase, maxDoc, fields.size()));
|
||||
fields.add(f);
|
||||
bits.add(r.getDeletedDocs());
|
||||
bits.add(r.getLiveDocs());
|
||||
bitsStarts.add(docBase);
|
||||
}
|
||||
final PerDocValues producer = r.perDocValues();
|
||||
if (producer != null) {
|
||||
perDocSlices.add(new ReaderUtil.Slice(docBase, maxDoc, fields.size()));
|
||||
perDocProducers.add(producer);
|
||||
perDocBits.add(r.getDeletedDocs());
|
||||
perDocBits.add(r.getLiveDocs());
|
||||
perDocBitsStarts.add(docBase);
|
||||
}
|
||||
docBase += maxDoc;
|
||||
|
@ -544,13 +545,13 @@ final class SegmentMerger {
|
|||
inputDocBase += reader.maxDoc();
|
||||
if (mergeState.delCounts[i] != 0) {
|
||||
int delCount = 0;
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
assert delDocs != null;
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
assert liveDocs != null;
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final int[] docMap = mergeState.docMaps[i] = new int[maxDoc];
|
||||
int newDocID = 0;
|
||||
for(int j=0;j<maxDoc;j++) {
|
||||
if (delDocs.get(j)) {
|
||||
if (!liveDocs.get(j)) {
|
||||
docMap[j] = -1;
|
||||
delCount++; // only for assert
|
||||
} else {
|
||||
|
@ -571,7 +572,7 @@ final class SegmentMerger {
|
|||
// MultiBits as our skip docs only to have it broken
|
||||
// apart when we step through the docs enums in
|
||||
// MultiDocsEnum.
|
||||
mergeState.multiDeletedDocs = new MultiBits(bits, bitsStarts);
|
||||
mergeState.multiLiveDocs = new MultiBits(bits, bitsStarts, true);
|
||||
|
||||
consumer.merge(mergeState,
|
||||
new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
|
||||
|
@ -580,7 +581,7 @@ final class SegmentMerger {
|
|||
consumer.close();
|
||||
}
|
||||
if (!perDocSlices.isEmpty()) {
|
||||
mergeState.multiDeletedDocs = new MultiBits(perDocBits, perDocBitsStarts);
|
||||
mergeState.multiLiveDocs = new MultiBits(perDocBits, perDocBitsStarts, true);
|
||||
final PerDocConsumer docsConsumer = codec
|
||||
.docsConsumer(new PerDocWriteState(segmentWriteState));
|
||||
try {
|
||||
|
@ -592,7 +593,6 @@ final class SegmentMerger {
|
|||
docsConsumer.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private MergeState mergeState;
|
||||
|
@ -635,9 +635,9 @@ final class SegmentMerger {
|
|||
} else {
|
||||
// this segment has deleted docs, so we have to
|
||||
// check for every doc if it is deleted or not
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
for (int k = 0; k < maxDoc; k++) {
|
||||
if (!delDocs.get(k)) {
|
||||
if (liveDocs.get(k)) {
|
||||
output.writeByte(normBuffer[k]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,9 +51,9 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
CloseableThreadLocal<FieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
|
||||
CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
|
||||
|
||||
volatile BitVector deletedDocs;
|
||||
AtomicInteger deletedDocsRef = null;
|
||||
private boolean deletedDocsDirty = false;
|
||||
volatile BitVector liveDocs;
|
||||
AtomicInteger liveDocsRef = null;
|
||||
private boolean liveDocsDirty = false;
|
||||
private boolean normsDirty = false;
|
||||
|
||||
// TODO: we should move this tracking into SegmentInfo;
|
||||
|
@ -116,7 +116,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
if (doOpenStores) {
|
||||
instance.core.openDocStores(si);
|
||||
}
|
||||
instance.loadDeletedDocs();
|
||||
instance.loadLiveDocs();
|
||||
instance.openNorms(instance.core.cfsDir, readBufferSize);
|
||||
success = true;
|
||||
} finally {
|
||||
|
@ -138,17 +138,17 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
return deletedDocs;
|
||||
public Bits getLiveDocs() {
|
||||
return liveDocs;
|
||||
}
|
||||
|
||||
private boolean checkDeletedCounts() throws IOException {
|
||||
final int recomputedCount = deletedDocs.getRecomputedCount();
|
||||
private boolean checkLiveCounts() throws IOException {
|
||||
final int recomputedCount = liveDocs.getRecomputedCount();
|
||||
// First verify BitVector is self consistent:
|
||||
assert liveDocs.count() == recomputedCount : "live count=" + liveDocs.count() + " vs recomputed count=" + recomputedCount;
|
||||
|
||||
assert deletedDocs.count() == recomputedCount : "deleted count=" + deletedDocs.count() + " vs recomputed count=" + recomputedCount;
|
||||
|
||||
assert si.getDelCount() == recomputedCount :
|
||||
"delete count mismatch: info=" + si.getDelCount() + " vs BitVector=" + recomputedCount;
|
||||
assert si.getDelCount() == si.docCount - recomputedCount :
|
||||
"delete count mismatch: info=" + si.getDelCount() + " vs BitVector=" + (si.docCount-recomputedCount);
|
||||
|
||||
// Verify # deletes does not exceed maxDoc for this
|
||||
// segment:
|
||||
|
@ -158,14 +158,17 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
return true;
|
||||
}
|
||||
|
||||
private void loadDeletedDocs() throws IOException {
|
||||
private void loadLiveDocs() throws IOException {
|
||||
// NOTE: the bitvector is stored using the regular directory, not cfs
|
||||
if (hasDeletions(si)) {
|
||||
deletedDocs = new BitVector(directory(), si.getDelFileName());
|
||||
deletedDocsRef = new AtomicInteger(1);
|
||||
assert checkDeletedCounts();
|
||||
if (deletedDocs.size() != si.docCount) {
|
||||
throw new CorruptIndexException("document count mismatch: deleted docs count " + deletedDocs.size() + " vs segment doc count " + si.docCount + " segment=" + si.name);
|
||||
liveDocs = new BitVector(directory(), si.getDelFileName());
|
||||
if (liveDocs.getVersion() < BitVector.VERSION_DGAPS_CLEARED) {
|
||||
liveDocs.invertAll();
|
||||
}
|
||||
liveDocsRef = new AtomicInteger(1);
|
||||
assert checkLiveCounts();
|
||||
if (liveDocs.size() != si.docCount) {
|
||||
throw new CorruptIndexException("document count mismatch: deleted docs count " + liveDocs.size() + " vs segment doc count " + si.docCount + " segment=" + si.name);
|
||||
}
|
||||
} else
|
||||
assert si.getDelCount() == 0;
|
||||
|
@ -256,27 +259,27 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
|
||||
if (!openReadOnly && hasChanges) {
|
||||
// My pending changes transfer to the new reader
|
||||
clone.deletedDocsDirty = deletedDocsDirty;
|
||||
clone.liveDocsDirty = liveDocsDirty;
|
||||
clone.normsDirty = normsDirty;
|
||||
clone.hasChanges = hasChanges;
|
||||
hasChanges = false;
|
||||
}
|
||||
|
||||
if (doClone) {
|
||||
if (deletedDocs != null) {
|
||||
deletedDocsRef.incrementAndGet();
|
||||
clone.deletedDocs = deletedDocs;
|
||||
clone.deletedDocsRef = deletedDocsRef;
|
||||
if (liveDocs != null) {
|
||||
liveDocsRef.incrementAndGet();
|
||||
clone.liveDocs = liveDocs;
|
||||
clone.liveDocsRef = liveDocsRef;
|
||||
}
|
||||
} else {
|
||||
if (!deletionsUpToDate) {
|
||||
// load deleted docs
|
||||
assert clone.deletedDocs == null;
|
||||
clone.loadDeletedDocs();
|
||||
} else if (deletedDocs != null) {
|
||||
deletedDocsRef.incrementAndGet();
|
||||
clone.deletedDocs = deletedDocs;
|
||||
clone.deletedDocsRef = deletedDocsRef;
|
||||
assert clone.liveDocs == null;
|
||||
clone.loadLiveDocs();
|
||||
} else if (liveDocs != null) {
|
||||
liveDocsRef.incrementAndGet();
|
||||
clone.liveDocs = liveDocs;
|
||||
clone.liveDocsRef = liveDocsRef;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -326,10 +329,10 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
|
||||
private synchronized void commitChanges(Map<String,String> commitUserData) throws IOException {
|
||||
if (deletedDocsDirty) { // re-write deleted
|
||||
if (liveDocsDirty) { // re-write deleted
|
||||
si.advanceDelGen();
|
||||
|
||||
assert deletedDocs.length() == si.docCount;
|
||||
assert liveDocs.length() == si.docCount;
|
||||
|
||||
// We can write directly to the actual name (vs to a
|
||||
// .tmp & renaming it) because the file is not live
|
||||
|
@ -337,7 +340,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
final String delFileName = si.getDelFileName();
|
||||
boolean success = false;
|
||||
try {
|
||||
deletedDocs.write(directory(), delFileName);
|
||||
liveDocs.write(directory(), delFileName);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
|
@ -349,10 +352,9 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
si.setDelCount(si.getDelCount()+pendingDeleteCount);
|
||||
pendingDeleteCount = 0;
|
||||
assert deletedDocs.count() == si.getDelCount(): "delete count mismatch during commit: info=" + si.getDelCount() + " vs BitVector=" + deletedDocs.count();
|
||||
assert (maxDoc()-liveDocs.count()) == si.getDelCount(): "delete count mismatch during commit: info=" + si.getDelCount() + " vs BitVector=" + (maxDoc()-liveDocs.count());
|
||||
} else {
|
||||
assert pendingDeleteCount == 0;
|
||||
}
|
||||
|
@ -365,7 +367,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
}
|
||||
}
|
||||
}
|
||||
deletedDocsDirty = false;
|
||||
liveDocsDirty = false;
|
||||
normsDirty = false;
|
||||
hasChanges = false;
|
||||
}
|
||||
|
@ -379,10 +381,10 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
termVectorsLocal.close();
|
||||
fieldsReaderLocal.close();
|
||||
|
||||
if (deletedDocs != null) {
|
||||
deletedDocsRef.decrementAndGet();
|
||||
if (liveDocs != null) {
|
||||
liveDocsRef.decrementAndGet();
|
||||
// null so if an app hangs on to us we still free most ram
|
||||
deletedDocs = null;
|
||||
liveDocs = null;
|
||||
}
|
||||
|
||||
for (final SegmentNorms norm : norms.values()) {
|
||||
|
@ -401,7 +403,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
@Override
|
||||
public boolean hasDeletions() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
return deletedDocs != null;
|
||||
return liveDocs != null;
|
||||
}
|
||||
|
||||
static boolean usesCompoundFile(SegmentInfo si) throws IOException {
|
||||
|
@ -414,38 +416,39 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
|
||||
@Override
|
||||
protected void doDelete(int docNum) {
|
||||
if (deletedDocs == null) {
|
||||
deletedDocs = new BitVector(maxDoc());
|
||||
deletedDocsRef = new AtomicInteger(1);
|
||||
if (liveDocs == null) {
|
||||
liveDocs = new BitVector(maxDoc());
|
||||
liveDocs.setAll();
|
||||
liveDocsRef = new AtomicInteger(1);
|
||||
}
|
||||
// there is more than 1 SegmentReader with a reference to this
|
||||
// deletedDocs BitVector so decRef the current deletedDocsRef,
|
||||
// clone the BitVector, create a new deletedDocsRef
|
||||
if (deletedDocsRef.get() > 1) {
|
||||
AtomicInteger oldRef = deletedDocsRef;
|
||||
deletedDocs = cloneDeletedDocs(deletedDocs);
|
||||
deletedDocsRef = new AtomicInteger(1);
|
||||
// liveDocs BitVector so decRef the current liveDocsRef,
|
||||
// clone the BitVector, create a new liveDocsRef
|
||||
if (liveDocsRef.get() > 1) {
|
||||
AtomicInteger oldRef = liveDocsRef;
|
||||
liveDocs = cloneDeletedDocs(liveDocs);
|
||||
liveDocsRef = new AtomicInteger(1);
|
||||
oldRef.decrementAndGet();
|
||||
}
|
||||
deletedDocsDirty = true;
|
||||
if (!deletedDocs.getAndSet(docNum)) {
|
||||
liveDocsDirty = true;
|
||||
if (liveDocs.getAndClear(docNum)) {
|
||||
pendingDeleteCount++;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUndeleteAll() {
|
||||
deletedDocsDirty = false;
|
||||
if (deletedDocs != null) {
|
||||
assert deletedDocsRef != null;
|
||||
deletedDocsRef.decrementAndGet();
|
||||
deletedDocs = null;
|
||||
deletedDocsRef = null;
|
||||
liveDocsDirty = false;
|
||||
if (liveDocs != null) {
|
||||
assert liveDocsRef != null;
|
||||
liveDocsRef.decrementAndGet();
|
||||
liveDocs = null;
|
||||
liveDocsRef = null;
|
||||
pendingDeleteCount = 0;
|
||||
si.clearDelGen();
|
||||
si.setDelCount(0);
|
||||
} else {
|
||||
assert deletedDocsRef == null;
|
||||
assert liveDocsRef == null;
|
||||
assert pendingDeleteCount == 0;
|
||||
}
|
||||
}
|
||||
|
@ -484,10 +487,11 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
@Override
|
||||
public int numDocs() {
|
||||
// Don't call ensureOpen() here (it could affect performance)
|
||||
int n = maxDoc();
|
||||
if (deletedDocs != null)
|
||||
n -= deletedDocs.count();
|
||||
return n;
|
||||
if (liveDocs != null) {
|
||||
return liveDocs.count();
|
||||
} else {
|
||||
return maxDoc();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -790,7 +794,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
void startCommit() {
|
||||
rollbackSegmentInfo = (SegmentInfo) si.clone();
|
||||
rollbackHasChanges = hasChanges;
|
||||
rollbackDeletedDocsDirty = deletedDocsDirty;
|
||||
rollbackDeletedDocsDirty = liveDocsDirty;
|
||||
rollbackNormsDirty = normsDirty;
|
||||
rollbackPendingDeleteCount = pendingDeleteCount;
|
||||
for (SegmentNorms norm : norms.values()) {
|
||||
|
@ -801,7 +805,7 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
void rollbackCommit() {
|
||||
si.reset(rollbackSegmentInfo);
|
||||
hasChanges = rollbackHasChanges;
|
||||
deletedDocsDirty = rollbackDeletedDocsDirty;
|
||||
liveDocsDirty = rollbackDeletedDocsDirty;
|
||||
normsDirty = rollbackNormsDirty;
|
||||
pendingDeleteCount = rollbackPendingDeleteCount;
|
||||
for (SegmentNorms norm : norms.values()) {
|
||||
|
|
|
@ -40,7 +40,7 @@ public class SegmentWriteState {
|
|||
public final BufferedDeletes segDeletes;
|
||||
|
||||
// Lazily created:
|
||||
public BitVector deletedDocs;
|
||||
public BitVector liveDocs;
|
||||
|
||||
final SegmentCodecs segmentCodecs;
|
||||
public final int codecId;
|
||||
|
|
|
@ -76,11 +76,10 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Bits getDeletedDocs() {
|
||||
return MultiFields.getDeletedDocs(in);
|
||||
public Bits getLiveDocs() {
|
||||
return MultiFields.getLiveDocs(in);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public IndexReader[] getSequentialSubReaders() {
|
||||
return null;
|
||||
|
|
|
@ -74,10 +74,10 @@ public abstract class Terms {
|
|||
|
||||
/** Get {@link DocsEnum} for the specified term. This
|
||||
* method may return null if the term does not exist. */
|
||||
public DocsEnum docs(Bits skipDocs, BytesRef text, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(Bits liveDocs, BytesRef text, DocsEnum reuse) throws IOException {
|
||||
final TermsEnum termsEnum = getThreadTermsEnum();
|
||||
if (termsEnum.seekExact(text, true)) {
|
||||
return termsEnum.docs(skipDocs, reuse);
|
||||
return termsEnum.docs(liveDocs, reuse);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -86,10 +86,10 @@ public abstract class Terms {
|
|||
/** Get {@link DocsEnum} for the specified term. This
|
||||
* method will may return null if the term does not
|
||||
* exists, or positions were not indexed. */
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, BytesRef text, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, BytesRef text, DocsAndPositionsEnum reuse) throws IOException {
|
||||
final TermsEnum termsEnum = getThreadTermsEnum();
|
||||
if (termsEnum.seekExact(text, true)) {
|
||||
return termsEnum.docsAndPositions(skipDocs, reuse);
|
||||
return termsEnum.docsAndPositions(liveDocs, reuse);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -101,10 +101,10 @@ public abstract class Terms {
|
|||
*
|
||||
* @see TermsEnum#termState()
|
||||
* @see TermsEnum#seekExact(BytesRef, TermState) */
|
||||
public DocsEnum docs(Bits skipDocs, BytesRef term, TermState termState, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(Bits liveDocs, BytesRef term, TermState termState, DocsEnum reuse) throws IOException {
|
||||
final TermsEnum termsEnum = getThreadTermsEnum();
|
||||
termsEnum.seekExact(term, termState);
|
||||
return termsEnum.docs(skipDocs, reuse);
|
||||
return termsEnum.docs(liveDocs, reuse);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -114,10 +114,10 @@ public abstract class Terms {
|
|||
*
|
||||
* @see TermsEnum#termState()
|
||||
* @see TermsEnum#seekExact(BytesRef, TermState) */
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, BytesRef term, TermState termState, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, BytesRef term, TermState termState, DocsAndPositionsEnum reuse) throws IOException {
|
||||
final TermsEnum termsEnum = getThreadTermsEnum();
|
||||
termsEnum.seekExact(term, termState);
|
||||
return termsEnum.docsAndPositions(skipDocs, reuse);
|
||||
return termsEnum.docsAndPositions(liveDocs, reuse);
|
||||
}
|
||||
|
||||
public long getUniqueTermCount() throws IOException {
|
||||
|
|
|
@ -147,16 +147,16 @@ public abstract class TermsEnum {
|
|||
* call this when the enum is unpositioned. This method
|
||||
* will not return null.
|
||||
*
|
||||
* @param skipDocs set bits are documents that should not
|
||||
* @param liveDocs set bits are documents that should not
|
||||
* be returned
|
||||
* @param reuse pass a prior DocsEnum for possible reuse */
|
||||
public abstract DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException;
|
||||
public abstract DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException;
|
||||
|
||||
/** Get {@link DocsAndPositionsEnum} for the current term.
|
||||
* Do not call this when the enum is unpositioned.
|
||||
* This method will only return null if positions were
|
||||
* not indexed into the postings by this codec. */
|
||||
public abstract DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException;
|
||||
public abstract DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException;
|
||||
|
||||
/**
|
||||
* Expert: Returns the TermsEnums internal state to position the TermsEnum
|
||||
|
@ -224,12 +224,12 @@ public abstract class TermsEnum {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits bits, DocsEnum reuse) {
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) {
|
||||
throw new IllegalStateException("this method should never be called");
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits bits, DocsAndPositionsEnum reuse) {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) {
|
||||
throw new IllegalStateException("this method should never be called");
|
||||
}
|
||||
|
||||
|
|
|
@ -688,23 +688,23 @@ public class BlockTermsReader extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
//System.out.println("BTR.docs this=" + this);
|
||||
decodeMetaData();
|
||||
//System.out.println(" state.docFreq=" + state.docFreq);
|
||||
final DocsEnum docsEnum = postingsReader.docs(fieldInfo, state, skipDocs, reuse);
|
||||
final DocsEnum docsEnum = postingsReader.docs(fieldInfo, state, liveDocs, reuse);
|
||||
assert docsEnum != null;
|
||||
return docsEnum;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
//System.out.println("BTR.d&p this=" + this);
|
||||
decodeMetaData();
|
||||
if (fieldInfo.omitTermFreqAndPositions) {
|
||||
return null;
|
||||
} else {
|
||||
DocsAndPositionsEnum dpe = postingsReader.docsAndPositions(fieldInfo, state, skipDocs, reuse);
|
||||
DocsAndPositionsEnum dpe = postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse);
|
||||
//System.out.println(" return d&pe=" + dpe);
|
||||
return dpe;
|
||||
}
|
||||
|
|
|
@ -114,8 +114,8 @@ public abstract class DocValuesConsumer {
|
|||
final IndexDocValues r = reader.docValues(mergeState.fieldInfo.name);
|
||||
if (r != null) {
|
||||
merged = true;
|
||||
merge(new Writer.MergeState(r, docBase, reader.maxDoc(), reader
|
||||
.getDeletedDocs()));
|
||||
merge(new Writer.MergeState(r, docBase, reader.maxDoc(),
|
||||
reader.getLiveDocs()));
|
||||
}
|
||||
docBase += reader.numDocs();
|
||||
}
|
||||
|
@ -152,15 +152,15 @@ public abstract class DocValuesConsumer {
|
|||
public final int docBase;
|
||||
/** the number of documents in this MergeState */
|
||||
public final int docCount;
|
||||
/** the deleted bits for this MergeState */
|
||||
public final Bits bits;
|
||||
/** the not deleted bits for this MergeState */
|
||||
public final Bits liveDocs;
|
||||
|
||||
public MergeState(IndexDocValues reader, int docBase, int docCount, Bits bits) {
|
||||
public MergeState(IndexDocValues reader, int docBase, int docCount, Bits liveDocs) {
|
||||
assert reader != null;
|
||||
this.reader = reader;
|
||||
this.docBase = docBase;
|
||||
this.docCount = docCount;
|
||||
this.bits = bits;
|
||||
this.liveDocs = liveDocs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ public class MergeState {
|
|||
public int[] delCounts; // Deletion count per reader
|
||||
public int[] docBase; // New docID base per reader
|
||||
public int mergedDocCount; // Total # merged docs
|
||||
public Bits multiDeletedDocs;
|
||||
public Bits multiLiveDocs;
|
||||
public CheckAbort checkAbort;
|
||||
|
||||
// Updated per field;
|
||||
|
|
|
@ -49,11 +49,11 @@ public abstract class PostingsReaderBase implements Closeable {
|
|||
|
||||
/** Must fully consume state, since after this call that
|
||||
* TermState may be reused. */
|
||||
public abstract DocsEnum docs(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsEnum reuse) throws IOException;
|
||||
public abstract DocsEnum docs(FieldInfo fieldInfo, BlockTermState state, Bits liveDocs, DocsEnum reuse) throws IOException;
|
||||
|
||||
/** Must fully consume state, since after this call that
|
||||
* TermState may be reused. */
|
||||
public abstract DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException;
|
||||
public abstract DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState state, Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException;
|
||||
|
||||
public abstract void close() throws IOException;
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ public abstract class TermsConsumer {
|
|||
MultiDocsEnum docsEnumIn = null;
|
||||
|
||||
while((term = termsEnum.next()) != null) {
|
||||
docsEnumIn = (MultiDocsEnum) termsEnum.docs(mergeState.multiDeletedDocs, docsEnumIn);
|
||||
docsEnumIn = (MultiDocsEnum) termsEnum.docs(mergeState.multiLiveDocs, docsEnumIn);
|
||||
if (docsEnumIn != null) {
|
||||
docsEnum.reset(docsEnumIn);
|
||||
final PostingsConsumer postingsConsumer = startTerm(term);
|
||||
|
@ -89,7 +89,7 @@ public abstract class TermsConsumer {
|
|||
postingsEnum.setMergeState(mergeState);
|
||||
MultiDocsAndPositionsEnum postingsEnumIn = null;
|
||||
while((term = termsEnum.next()) != null) {
|
||||
postingsEnumIn = (MultiDocsAndPositionsEnum) termsEnum.docsAndPositions(mergeState.multiDeletedDocs, postingsEnumIn);
|
||||
postingsEnumIn = (MultiDocsAndPositionsEnum) termsEnum.docsAndPositions(mergeState.multiLiveDocs, postingsEnumIn);
|
||||
if (postingsEnumIn != null) {
|
||||
postingsEnum.reset(postingsEnumIn);
|
||||
// set PayloadProcessor
|
||||
|
|
|
@ -269,7 +269,7 @@ public class MemoryCodec extends Codec {
|
|||
private byte[] buffer = new byte[16];
|
||||
private final ByteArrayDataInput in = new ByteArrayDataInput(buffer);
|
||||
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private int docUpto;
|
||||
private int docID;
|
||||
private int freq;
|
||||
|
@ -285,14 +285,14 @@ public class MemoryCodec extends Codec {
|
|||
return omitTFAP == this.omitTFAP && storePayloads == this.storePayloads;
|
||||
}
|
||||
|
||||
public FSTDocsEnum reset(BytesRef bufferIn, Bits skipDocs, int numDocs) {
|
||||
public FSTDocsEnum reset(BytesRef bufferIn, Bits liveDocs, int numDocs) {
|
||||
assert numDocs > 0;
|
||||
if (buffer.length < bufferIn.length - bufferIn.offset) {
|
||||
buffer = ArrayUtil.grow(buffer, bufferIn.length - bufferIn.offset);
|
||||
}
|
||||
in.reset(buffer, 0, bufferIn.length - bufferIn.offset);
|
||||
System.arraycopy(bufferIn.bytes, bufferIn.offset, buffer, 0, bufferIn.length - bufferIn.offset);
|
||||
this.skipDocs = skipDocs;
|
||||
this.liveDocs = liveDocs;
|
||||
docID = 0;
|
||||
docUpto = 0;
|
||||
payloadLen = 0;
|
||||
|
@ -339,7 +339,7 @@ public class MemoryCodec extends Codec {
|
|||
}
|
||||
}
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(docID)) {
|
||||
if (liveDocs == null || liveDocs.get(docID)) {
|
||||
if (VERBOSE) System.out.println(" return docID=" + docID + " freq=" + freq);
|
||||
return docID;
|
||||
}
|
||||
|
@ -375,7 +375,7 @@ public class MemoryCodec extends Codec {
|
|||
private byte[] buffer = new byte[16];
|
||||
private final ByteArrayDataInput in = new ByteArrayDataInput(buffer);
|
||||
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private int docUpto;
|
||||
private int docID;
|
||||
private int freq;
|
||||
|
@ -396,7 +396,7 @@ public class MemoryCodec extends Codec {
|
|||
return omitTFAP == this.omitTFAP && storePayloads == this.storePayloads;
|
||||
}
|
||||
|
||||
public FSTDocsAndPositionsEnum reset(BytesRef bufferIn, Bits skipDocs, int numDocs) {
|
||||
public FSTDocsAndPositionsEnum reset(BytesRef bufferIn, Bits liveDocs, int numDocs) {
|
||||
assert numDocs > 0;
|
||||
if (VERBOSE) {
|
||||
System.out.println("D&P reset bytes this=" + this);
|
||||
|
@ -409,7 +409,7 @@ public class MemoryCodec extends Codec {
|
|||
}
|
||||
in.reset(buffer, 0, bufferIn.length - bufferIn.offset);
|
||||
System.arraycopy(bufferIn.bytes, bufferIn.offset, buffer, 0, bufferIn.length - bufferIn.offset);
|
||||
this.skipDocs = skipDocs;
|
||||
this.liveDocs = liveDocs;
|
||||
docID = 0;
|
||||
docUpto = 0;
|
||||
payload.bytes = buffer;
|
||||
|
@ -446,7 +446,7 @@ public class MemoryCodec extends Codec {
|
|||
}
|
||||
}
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(docID)) {
|
||||
if (liveDocs == null || liveDocs.get(docID)) {
|
||||
pos = 0;
|
||||
posPending = freq;
|
||||
if (VERBOSE) System.out.println(" return docID=" + docID + " freq=" + freq);
|
||||
|
@ -598,7 +598,7 @@ public class MemoryCodec extends Codec {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
decodeMetaData();
|
||||
FSTDocsEnum docsEnum;
|
||||
if (reuse == null || !(reuse instanceof FSTDocsEnum)) {
|
||||
|
@ -609,11 +609,11 @@ public class MemoryCodec extends Codec {
|
|||
docsEnum = new FSTDocsEnum(field.omitTermFreqAndPositions, field.storePayloads);
|
||||
}
|
||||
}
|
||||
return docsEnum.reset(current.output, skipDocs, docFreq);
|
||||
return docsEnum.reset(current.output, liveDocs, docFreq);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
if (field.omitTermFreqAndPositions) {
|
||||
return null;
|
||||
}
|
||||
|
@ -628,7 +628,7 @@ public class MemoryCodec extends Codec {
|
|||
}
|
||||
}
|
||||
if (VERBOSE) System.out.println("D&P reset this=" + this);
|
||||
return docsAndPositionsEnum.reset(current.output, skipDocs, docFreq);
|
||||
return docsAndPositionsEnum.reset(current.output, liveDocs, docFreq);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -951,7 +951,7 @@ public class PreFlexFields extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
PreDocsEnum docsEnum;
|
||||
if (reuse == null || !(reuse instanceof PreDocsEnum)) {
|
||||
docsEnum = new PreDocsEnum();
|
||||
|
@ -961,11 +961,11 @@ public class PreFlexFields extends FieldsProducer {
|
|||
docsEnum = new PreDocsEnum();
|
||||
}
|
||||
}
|
||||
return docsEnum.reset(termEnum, skipDocs);
|
||||
return docsEnum.reset(termEnum, liveDocs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
PreDocsAndPositionsEnum docsPosEnum;
|
||||
if (fieldInfo.omitTermFreqAndPositions) {
|
||||
return null;
|
||||
|
@ -977,7 +977,7 @@ public class PreFlexFields extends FieldsProducer {
|
|||
docsPosEnum = new PreDocsAndPositionsEnum();
|
||||
}
|
||||
}
|
||||
return docsPosEnum.reset(termEnum, skipDocs);
|
||||
return docsPosEnum.reset(termEnum, liveDocs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -992,8 +992,8 @@ public class PreFlexFields extends FieldsProducer {
|
|||
return freqStream;
|
||||
}
|
||||
|
||||
public PreDocsEnum reset(SegmentTermEnum termEnum, Bits skipDocs) throws IOException {
|
||||
docs.setSkipDocs(skipDocs);
|
||||
public PreDocsEnum reset(SegmentTermEnum termEnum, Bits liveDocs) throws IOException {
|
||||
docs.setLiveDocs(liveDocs);
|
||||
docs.seek(termEnum);
|
||||
return this;
|
||||
}
|
||||
|
@ -1048,8 +1048,8 @@ public class PreFlexFields extends FieldsProducer {
|
|||
return freqStream;
|
||||
}
|
||||
|
||||
public DocsAndPositionsEnum reset(SegmentTermEnum termEnum, Bits skipDocs) throws IOException {
|
||||
pos.setSkipDocs(skipDocs);
|
||||
public DocsAndPositionsEnum reset(SegmentTermEnum termEnum, Bits liveDocs) throws IOException {
|
||||
pos.setLiveDocs(liveDocs);
|
||||
pos.seek(termEnum);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ public class SegmentTermDocs {
|
|||
//protected SegmentReader parent;
|
||||
private final FieldInfos fieldInfos;
|
||||
private final TermInfosReader tis;
|
||||
protected Bits skipDocs;
|
||||
protected Bits liveDocs;
|
||||
protected IndexInput freqStream;
|
||||
protected int count;
|
||||
protected int df;
|
||||
|
@ -53,18 +53,6 @@ public class SegmentTermDocs {
|
|||
protected boolean currentFieldStoresPayloads;
|
||||
protected boolean currentFieldOmitTermFreqAndPositions;
|
||||
|
||||
/*
|
||||
protected SegmentTermDocs(SegmentReader parent) {
|
||||
this.parent = parent;
|
||||
this.freqStream = (IndexInput) parent.core.freqStream.clone();
|
||||
synchronized (parent) {
|
||||
this.deletedDocs = parent.deletedDocs;
|
||||
}
|
||||
this.skipInterval = parent.core.getTermsReader().getSkipInterval();
|
||||
this.maxSkipLevels = parent.core.getTermsReader().getMaxSkipLevels();
|
||||
}
|
||||
*/
|
||||
|
||||
public SegmentTermDocs(IndexInput freqStream, TermInfosReader tis, FieldInfos fieldInfos) {
|
||||
this.freqStream = (IndexInput) freqStream.clone();
|
||||
this.tis = tis;
|
||||
|
@ -78,8 +66,8 @@ public class SegmentTermDocs {
|
|||
seek(ti, term);
|
||||
}
|
||||
|
||||
public void setSkipDocs(Bits skipDocs) {
|
||||
this.skipDocs = skipDocs;
|
||||
public void setLiveDocs(Bits liveDocs) {
|
||||
this.liveDocs = liveDocs;
|
||||
}
|
||||
|
||||
public void seek(SegmentTermEnum segmentTermEnum) throws IOException {
|
||||
|
@ -149,7 +137,7 @@ public class SegmentTermDocs {
|
|||
|
||||
count++;
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
break;
|
||||
}
|
||||
skippingDoc();
|
||||
|
@ -175,7 +163,7 @@ public class SegmentTermDocs {
|
|||
freq = freqStream.readVInt(); // else read freq
|
||||
count++;
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
docs[i] = doc;
|
||||
freqs[i] = freq;
|
||||
++i;
|
||||
|
@ -192,7 +180,7 @@ public class SegmentTermDocs {
|
|||
doc += freqStream.readVInt();
|
||||
count++;
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
docs[i] = doc;
|
||||
// Hardware freq to 1 when term freqs were not
|
||||
// stored in the index
|
||||
|
|
|
@ -167,7 +167,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
// TODO: we could actually reuse, by having TL that
|
||||
// holds the last wrapped reuse, and vice-versa
|
||||
@Override
|
||||
public DocsEnum docs(FieldInfo field, BlockTermState _termState, Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(FieldInfo field, BlockTermState _termState, Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
PulsingTermState termState = (PulsingTermState) _termState;
|
||||
if (termState.postingsSize != -1) {
|
||||
PulsingDocsEnum postings;
|
||||
|
@ -179,20 +179,20 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
} else {
|
||||
postings = new PulsingDocsEnum(field);
|
||||
}
|
||||
return postings.reset(skipDocs, termState);
|
||||
return postings.reset(liveDocs, termState);
|
||||
} else {
|
||||
// TODO: not great that we lose reuse of PulsingDocsEnum in this case:
|
||||
if (reuse instanceof PulsingDocsEnum) {
|
||||
return wrappedPostingsReader.docs(field, termState.wrappedTermState, skipDocs, null);
|
||||
return wrappedPostingsReader.docs(field, termState.wrappedTermState, liveDocs, null);
|
||||
} else {
|
||||
return wrappedPostingsReader.docs(field, termState.wrappedTermState, skipDocs, reuse);
|
||||
return wrappedPostingsReader.docs(field, termState.wrappedTermState, liveDocs, reuse);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: -- not great that we can't always reuse
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(FieldInfo field, BlockTermState _termState, Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(FieldInfo field, BlockTermState _termState, Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
if (field.omitTermFreqAndPositions) {
|
||||
return null;
|
||||
}
|
||||
|
@ -211,12 +211,12 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
postings = new PulsingDocsAndPositionsEnum(field);
|
||||
}
|
||||
|
||||
return postings.reset(skipDocs, termState);
|
||||
return postings.reset(liveDocs, termState);
|
||||
} else {
|
||||
if (reuse instanceof PulsingDocsAndPositionsEnum) {
|
||||
return wrappedPostingsReader.docsAndPositions(field, termState.wrappedTermState, skipDocs, null);
|
||||
return wrappedPostingsReader.docsAndPositions(field, termState.wrappedTermState, liveDocs, null);
|
||||
} else {
|
||||
return wrappedPostingsReader.docsAndPositions(field, termState.wrappedTermState, skipDocs, reuse);
|
||||
return wrappedPostingsReader.docsAndPositions(field, termState.wrappedTermState, liveDocs, reuse);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
private final ByteArrayDataInput postings = new ByteArrayDataInput();
|
||||
private final boolean omitTF;
|
||||
private final boolean storePayloads;
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private int docID;
|
||||
private int freq;
|
||||
private int payloadLength;
|
||||
|
@ -235,7 +235,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
storePayloads = fieldInfo.storePayloads;
|
||||
}
|
||||
|
||||
public PulsingDocsEnum reset(Bits skipDocs, PulsingTermState termState) {
|
||||
public PulsingDocsEnum reset(Bits liveDocs, PulsingTermState termState) {
|
||||
//System.out.println("PR docsEnum termState=" + termState + " docFreq=" + termState.docFreq);
|
||||
assert termState.postingsSize != -1;
|
||||
final byte[] bytes = new byte[termState.postingsSize];
|
||||
|
@ -244,7 +244,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
docID = 0;
|
||||
payloadLength = 0;
|
||||
freq = 1;
|
||||
this.skipDocs = skipDocs;
|
||||
this.liveDocs = liveDocs;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -291,7 +291,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
}
|
||||
}
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(docID)) {
|
||||
if (liveDocs == null || liveDocs.get(docID)) {
|
||||
//System.out.println(" return docID=" + docID + " freq=" + freq);
|
||||
return docID;
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
private final ByteArrayDataInput postings = new ByteArrayDataInput();
|
||||
private final boolean storePayloads;
|
||||
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private int docID;
|
||||
private int freq;
|
||||
private int posPending;
|
||||
|
@ -341,12 +341,12 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
return storePayloads == fieldInfo.storePayloads;
|
||||
}
|
||||
|
||||
public PulsingDocsAndPositionsEnum reset(Bits skipDocs, PulsingTermState termState) {
|
||||
public PulsingDocsAndPositionsEnum reset(Bits liveDocs, PulsingTermState termState) {
|
||||
assert termState.postingsSize != -1;
|
||||
final byte[] bytes = new byte[termState.postingsSize];
|
||||
System.arraycopy(termState.postings, 0, bytes, 0, termState.postingsSize);
|
||||
postings.reset(bytes);
|
||||
this.skipDocs = skipDocs;
|
||||
this.liveDocs = liveDocs;
|
||||
payloadLength = 0;
|
||||
posPending = 0;
|
||||
docID = 0;
|
||||
|
@ -378,7 +378,7 @@ public class PulsingPostingsReaderImpl extends PostingsReaderBase {
|
|||
}
|
||||
posPending = freq;
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(docID)) {
|
||||
if (liveDocs == null || liveDocs.get(docID)) {
|
||||
//System.out.println(" return docID=" + docID + " freq=" + freq);
|
||||
position = 0;
|
||||
return docID;
|
||||
|
|
|
@ -256,7 +256,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(FieldInfo fieldInfo, BlockTermState _termState, Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(FieldInfo fieldInfo, BlockTermState _termState, Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
final SepTermState termState = (SepTermState) _termState;
|
||||
SepDocsEnum docsEnum;
|
||||
if (reuse == null || !(reuse instanceof SepDocsEnum)) {
|
||||
|
@ -271,11 +271,11 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
}
|
||||
}
|
||||
|
||||
return docsEnum.init(fieldInfo, termState, skipDocs);
|
||||
return docsEnum.init(fieldInfo, termState, liveDocs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState _termState, Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState _termState, Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
assert !fieldInfo.omitTermFreqAndPositions;
|
||||
final SepTermState termState = (SepTermState) _termState;
|
||||
SepDocsAndPositionsEnum postingsEnum;
|
||||
|
@ -291,7 +291,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
}
|
||||
}
|
||||
|
||||
return postingsEnum.init(fieldInfo, termState, skipDocs);
|
||||
return postingsEnum.init(fieldInfo, termState, liveDocs);
|
||||
}
|
||||
|
||||
class SepDocsEnum extends DocsEnum {
|
||||
|
@ -304,7 +304,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
// TODO: -- should we do omitTF with 2 different enum classes?
|
||||
private boolean omitTF;
|
||||
private boolean storePayloads;
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private final IntIndexInput.Reader docReader;
|
||||
private final IntIndexInput.Reader freqReader;
|
||||
private long skipFP;
|
||||
|
@ -337,8 +337,8 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
}
|
||||
}
|
||||
|
||||
SepDocsEnum init(FieldInfo fieldInfo, SepTermState termState, Bits skipDocs) throws IOException {
|
||||
this.skipDocs = skipDocs;
|
||||
SepDocsEnum init(FieldInfo fieldInfo, SepTermState termState, Bits liveDocs) throws IOException {
|
||||
this.liveDocs = liveDocs;
|
||||
omitTF = fieldInfo.omitTermFreqAndPositions;
|
||||
storePayloads = fieldInfo.storePayloads;
|
||||
|
||||
|
@ -383,7 +383,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
freq = freqReader.next();
|
||||
}
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -408,7 +408,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
freq = freqReader.next();
|
||||
}
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
docs[i] = doc;
|
||||
freqs[i] = freq;
|
||||
//System.out.println(" docs[" + i + "]=" + doc + " count=" + count + " dF=" + docFreq);
|
||||
|
@ -493,7 +493,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
long freqStart;
|
||||
|
||||
private boolean storePayloads;
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private final IntIndexInput.Reader docReader;
|
||||
private final IntIndexInput.Reader freqReader;
|
||||
private final IntIndexInput.Reader posReader;
|
||||
|
@ -528,8 +528,8 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
payloadIn = (IndexInput) SepPostingsReaderImpl.this.payloadIn.clone();
|
||||
}
|
||||
|
||||
SepDocsAndPositionsEnum init(FieldInfo fieldInfo, SepTermState termState, Bits skipDocs) throws IOException {
|
||||
this.skipDocs = skipDocs;
|
||||
SepDocsAndPositionsEnum init(FieldInfo fieldInfo, SepTermState termState, Bits liveDocs) throws IOException {
|
||||
this.liveDocs = liveDocs;
|
||||
storePayloads = fieldInfo.storePayloads;
|
||||
//System.out.println("Sep D&P init");
|
||||
|
||||
|
@ -584,7 +584,7 @@ public class SepPostingsReaderImpl extends PostingsReaderBase {
|
|||
|
||||
pendingPosCount += freq;
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -211,18 +211,18 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
SimpleTextDocsEnum docsEnum;
|
||||
if (reuse != null && reuse instanceof SimpleTextDocsEnum && ((SimpleTextDocsEnum) reuse).canReuse(in)) {
|
||||
docsEnum = (SimpleTextDocsEnum) reuse;
|
||||
} else {
|
||||
docsEnum = new SimpleTextDocsEnum();
|
||||
}
|
||||
return docsEnum.reset(docsStart, skipDocs, omitTF);
|
||||
return docsEnum.reset(docsStart, liveDocs, omitTF);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
if (omitTF) {
|
||||
return null;
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
} else {
|
||||
docsAndPositionsEnum = new SimpleTextDocsAndPositionsEnum();
|
||||
}
|
||||
return docsAndPositionsEnum.reset(docsStart, skipDocs);
|
||||
return docsAndPositionsEnum.reset(docsStart, liveDocs);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -248,7 +248,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
private boolean omitTF;
|
||||
private int docID;
|
||||
private int tf;
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private final BytesRef scratch = new BytesRef(10);
|
||||
private final CharsRef scratchUTF16 = new CharsRef(10);
|
||||
|
||||
|
@ -261,8 +261,8 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
return in == inStart;
|
||||
}
|
||||
|
||||
public SimpleTextDocsEnum reset(long fp, Bits skipDocs, boolean omitTF) throws IOException {
|
||||
this.skipDocs = skipDocs;
|
||||
public SimpleTextDocsEnum reset(long fp, Bits liveDocs, boolean omitTF) throws IOException {
|
||||
this.liveDocs = liveDocs;
|
||||
in.seek(fp);
|
||||
this.omitTF = omitTF;
|
||||
if (omitTF) {
|
||||
|
@ -292,7 +292,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
final long lineStart = in.getFilePointer();
|
||||
readLine(in, scratch);
|
||||
if (scratch.startsWith(DOC)) {
|
||||
if (!first && (skipDocs == null || !skipDocs.get(docID))) {
|
||||
if (!first && (liveDocs == null || liveDocs.get(docID))) {
|
||||
in.seek(lineStart);
|
||||
if (!omitTF) {
|
||||
tf = termFreq;
|
||||
|
@ -309,7 +309,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
// skip
|
||||
} else {
|
||||
assert scratch.startsWith(TERM) || scratch.startsWith(FIELD) || scratch.startsWith(END): "scratch=" + scratch.utf8ToString();
|
||||
if (!first && (skipDocs == null || !skipDocs.get(docID))) {
|
||||
if (!first && (liveDocs == null || liveDocs.get(docID))) {
|
||||
in.seek(lineStart);
|
||||
if (!omitTF) {
|
||||
tf = termFreq;
|
||||
|
@ -334,7 +334,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
private final IndexInput in;
|
||||
private int docID;
|
||||
private int tf;
|
||||
private Bits skipDocs;
|
||||
private Bits liveDocs;
|
||||
private final BytesRef scratch = new BytesRef(10);
|
||||
private final BytesRef scratch2 = new BytesRef(10);
|
||||
private final CharsRef scratchUTF16 = new CharsRef(10);
|
||||
|
@ -351,8 +351,8 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
return in == inStart;
|
||||
}
|
||||
|
||||
public SimpleTextDocsAndPositionsEnum reset(long fp, Bits skipDocs) {
|
||||
this.skipDocs = skipDocs;
|
||||
public SimpleTextDocsAndPositionsEnum reset(long fp, Bits liveDocs) {
|
||||
this.liveDocs = liveDocs;
|
||||
nextDocStart = fp;
|
||||
return this;
|
||||
}
|
||||
|
@ -376,7 +376,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
final long lineStart = in.getFilePointer();
|
||||
readLine(in, scratch);
|
||||
if (scratch.startsWith(DOC)) {
|
||||
if (!first && (skipDocs == null || !skipDocs.get(docID))) {
|
||||
if (!first && (liveDocs == null || liveDocs.get(docID))) {
|
||||
nextDocStart = lineStart;
|
||||
in.seek(posStart);
|
||||
return docID;
|
||||
|
@ -392,7 +392,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
|
|||
// skip
|
||||
} else {
|
||||
assert scratch.startsWith(TERM) || scratch.startsWith(FIELD) || scratch.startsWith(END);
|
||||
if (!first && (skipDocs == null || !skipDocs.get(docID))) {
|
||||
if (!first && (liveDocs == null || liveDocs.get(docID))) {
|
||||
nextDocStart = lineStart;
|
||||
in.seek(posStart);
|
||||
return docID;
|
||||
|
|
|
@ -200,7 +200,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(FieldInfo fieldInfo, BlockTermState termState, Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
SegmentDocsEnum docsEnum;
|
||||
if (reuse == null || !(reuse instanceof SegmentDocsEnum)) {
|
||||
docsEnum = new SegmentDocsEnum(freqIn);
|
||||
|
@ -213,11 +213,11 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
docsEnum = new SegmentDocsEnum(freqIn);
|
||||
}
|
||||
}
|
||||
return docsEnum.reset(fieldInfo, (StandardTermState) termState, skipDocs);
|
||||
return docsEnum.reset(fieldInfo, (StandardTermState) termState, liveDocs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState termState, Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
if (fieldInfo.omitTermFreqAndPositions) {
|
||||
return null;
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
docsEnum = new SegmentDocsAndPositionsAndPayloadsEnum(freqIn, proxIn);
|
||||
}
|
||||
}
|
||||
return docsEnum.reset(fieldInfo, (StandardTermState) termState, skipDocs);
|
||||
return docsEnum.reset(fieldInfo, (StandardTermState) termState, liveDocs);
|
||||
} else {
|
||||
SegmentDocsAndPositionsEnum docsEnum;
|
||||
if (reuse == null || !(reuse instanceof SegmentDocsAndPositionsEnum)) {
|
||||
|
@ -250,7 +250,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
docsEnum = new SegmentDocsAndPositionsEnum(freqIn, proxIn);
|
||||
}
|
||||
}
|
||||
return docsEnum.reset(fieldInfo, (StandardTermState) termState, skipDocs);
|
||||
return docsEnum.reset(fieldInfo, (StandardTermState) termState, liveDocs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -267,7 +267,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
int doc; // doc we last read
|
||||
int freq; // freq we last read
|
||||
|
||||
Bits skipDocs;
|
||||
Bits liveDocs;
|
||||
|
||||
long freqOffset;
|
||||
int skipOffset;
|
||||
|
@ -280,13 +280,13 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
this.freqIn = (IndexInput) freqIn.clone();
|
||||
}
|
||||
|
||||
public SegmentDocsEnum reset(FieldInfo fieldInfo, StandardTermState termState, Bits skipDocs) throws IOException {
|
||||
public SegmentDocsEnum reset(FieldInfo fieldInfo, StandardTermState termState, Bits liveDocs) throws IOException {
|
||||
omitTF = fieldInfo.omitTermFreqAndPositions;
|
||||
if (omitTF) {
|
||||
freq = 1;
|
||||
}
|
||||
storePayloads = fieldInfo.storePayloads;
|
||||
this.skipDocs = skipDocs;
|
||||
this.liveDocs = liveDocs;
|
||||
freqOffset = termState.freqOffset;
|
||||
skipOffset = termState.skipOffset;
|
||||
|
||||
|
@ -327,7 +327,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
}
|
||||
}
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -357,7 +357,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
}
|
||||
}
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
docs[i] = doc;
|
||||
freqs[i] = freq;
|
||||
++i;
|
||||
|
@ -435,7 +435,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
int freq; // freq we last read
|
||||
int position;
|
||||
|
||||
Bits skipDocs;
|
||||
Bits liveDocs;
|
||||
|
||||
long freqOffset;
|
||||
int skipOffset;
|
||||
|
@ -453,11 +453,11 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
this.proxIn = (IndexInput) proxIn.clone();
|
||||
}
|
||||
|
||||
public SegmentDocsAndPositionsEnum reset(FieldInfo fieldInfo, StandardTermState termState, Bits skipDocs) throws IOException {
|
||||
public SegmentDocsAndPositionsEnum reset(FieldInfo fieldInfo, StandardTermState termState, Bits liveDocs) throws IOException {
|
||||
assert !fieldInfo.omitTermFreqAndPositions;
|
||||
assert !fieldInfo.storePayloads;
|
||||
|
||||
this.skipDocs = skipDocs;
|
||||
this.liveDocs = liveDocs;
|
||||
|
||||
// TODO: for full enum case (eg segment merging) this
|
||||
// seek is unnecessary; maybe we can avoid in such
|
||||
|
@ -504,7 +504,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
}
|
||||
posPendingCount += freq;
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -626,7 +626,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
int freq; // freq we last read
|
||||
int position;
|
||||
|
||||
Bits skipDocs;
|
||||
Bits liveDocs;
|
||||
|
||||
long freqOffset;
|
||||
int skipOffset;
|
||||
|
@ -647,7 +647,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
this.proxIn = (IndexInput) proxIn.clone();
|
||||
}
|
||||
|
||||
public SegmentDocsAndPositionsAndPayloadsEnum reset(FieldInfo fieldInfo, StandardTermState termState, Bits skipDocs) throws IOException {
|
||||
public SegmentDocsAndPositionsAndPayloadsEnum reset(FieldInfo fieldInfo, StandardTermState termState, Bits liveDocs) throws IOException {
|
||||
assert !fieldInfo.omitTermFreqAndPositions;
|
||||
assert fieldInfo.storePayloads;
|
||||
if (payload == null) {
|
||||
|
@ -655,7 +655,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
payload.bytes = new byte[1];
|
||||
}
|
||||
|
||||
this.skipDocs = skipDocs;
|
||||
this.liveDocs = liveDocs;
|
||||
|
||||
// TODO: for full enum case (eg segment merging) this
|
||||
// seek is unnecessary; maybe we can avoid in such
|
||||
|
@ -701,7 +701,7 @@ public class StandardPostingsReader extends PostingsReaderBase {
|
|||
}
|
||||
posPendingCount += freq;
|
||||
|
||||
if (skipDocs == null || !skipDocs.get(doc)) {
|
||||
if (liveDocs == null || liveDocs.get(doc)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ class FixedStraightBytesImpl {
|
|||
datOut = getDataOut();
|
||||
boolean success = false;
|
||||
try {
|
||||
if (state.bits == null && state.reader instanceof Reader) {
|
||||
if (state.liveDocs == null && state.reader instanceof Reader) {
|
||||
Reader reader = (Reader) state.reader;
|
||||
final int maxDocs = reader.maxDoc;
|
||||
if (maxDocs == 0) {
|
||||
|
|
|
@ -131,7 +131,7 @@ public class Floats {
|
|||
if (datOut == null) {
|
||||
initDataOut();
|
||||
}
|
||||
if (state.bits == null && state.reader instanceof FloatsReader) {
|
||||
if (state.liveDocs == null && state.reader instanceof FloatsReader) {
|
||||
// no deletes - bulk copy
|
||||
final FloatsReader reader = (FloatsReader) state.reader;
|
||||
assert reader.precisionBytes == (int) precision;
|
||||
|
|
|
@ -182,7 +182,7 @@ class IntsImpl {
|
|||
merging = true;
|
||||
if (typeOrd != PACKED) {
|
||||
initDataOut(typeOrd); // init datOut since we merge directly
|
||||
if (state.bits == null && state.reader instanceof IntsReader) {
|
||||
if (state.liveDocs == null && state.reader instanceof IntsReader) {
|
||||
// no deleted docs - try bulk copy
|
||||
final IntsReader reader = (IntsReader) state.reader;
|
||||
if (reader.type == typeOrd) {
|
||||
|
|
|
@ -97,7 +97,7 @@ class VarStraightBytesImpl {
|
|||
datOut = getDataOut();
|
||||
boolean success = false;
|
||||
try {
|
||||
if (state.bits == null && state.reader instanceof Reader) {
|
||||
if (state.liveDocs == null && state.reader instanceof Reader) {
|
||||
// bulk merge since we don't have any deletes
|
||||
Reader reader = (Reader) state.reader;
|
||||
final int maxDocs = reader.maxDoc;
|
||||
|
|
|
@ -147,12 +147,12 @@ public abstract class Writer extends DocValuesConsumer {
|
|||
// impl. will get the correct reference for the type
|
||||
// it supports
|
||||
int docID = state.docBase;
|
||||
final Bits bits = state.bits;
|
||||
final Bits liveDocs = state.liveDocs;
|
||||
final int docCount = state.docCount;
|
||||
int currentDocId;
|
||||
if ((currentDocId = valEnum.advance(0)) != ValuesEnum.NO_MORE_DOCS) {
|
||||
for (int i = 0; i < docCount; i++) {
|
||||
if (bits == null || !bits.get(i)) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
if (currentDocId < i) {
|
||||
if ((currentDocId = valEnum.advance(i)) == ValuesEnum.NO_MORE_DOCS) {
|
||||
break; // advance can jump over default values
|
||||
|
|
|
@ -54,7 +54,7 @@ public class CachingSpanFilter extends SpanFilter {
|
|||
}
|
||||
this.cache = new CachingWrapperFilter.FilterCache<SpanFilterResult>(deletesMode) {
|
||||
@Override
|
||||
protected SpanFilterResult mergeDeletes(final Bits delDocs, final SpanFilterResult value) {
|
||||
protected SpanFilterResult mergeLiveDocs(final Bits liveDocs, final SpanFilterResult value) {
|
||||
throw new IllegalStateException("DeletesMode.DYNAMIC is not supported");
|
||||
}
|
||||
};
|
||||
|
@ -73,7 +73,7 @@ public class CachingSpanFilter extends SpanFilter {
|
|||
final IndexReader reader = context.reader;
|
||||
|
||||
final Object coreKey = reader.getCoreCacheKey();
|
||||
final Object delCoreKey = reader.hasDeletions() ? reader.getDeletedDocs() : coreKey;
|
||||
final Object delCoreKey = reader.hasDeletions() ? reader.getLiveDocs() : coreKey;
|
||||
|
||||
SpanFilterResult result = cache.get(reader, coreKey, delCoreKey);
|
||||
if (result != null) {
|
||||
|
|
|
@ -103,13 +103,13 @@ public class CachingWrapperFilter extends Filter {
|
|||
value = cache.get(delCoreKey);
|
||||
|
||||
if (value == null) {
|
||||
// now for core match, but dynamically AND NOT
|
||||
// deletions
|
||||
// now for core match, but dynamically AND
|
||||
// live docs
|
||||
value = cache.get(coreKey);
|
||||
if (value != null) {
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
if (delDocs != null) {
|
||||
value = mergeDeletes(delDocs, value);
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
if (liveDocs != null) {
|
||||
value = mergeLiveDocs(liveDocs, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ public class CachingWrapperFilter extends Filter {
|
|||
return value;
|
||||
}
|
||||
|
||||
protected abstract T mergeDeletes(Bits delDocs, T value);
|
||||
protected abstract T mergeLiveDocs(Bits liveDocs, T value);
|
||||
|
||||
public synchronized void put(Object coreKey, Object delCoreKey, T value) {
|
||||
if (deletesMode == DeletesMode.IGNORE) {
|
||||
|
@ -158,11 +158,11 @@ public class CachingWrapperFilter extends Filter {
|
|||
this.filter = filter;
|
||||
cache = new FilterCache<DocIdSet>(deletesMode) {
|
||||
@Override
|
||||
public DocIdSet mergeDeletes(final Bits delDocs, final DocIdSet docIdSet) {
|
||||
public DocIdSet mergeLiveDocs(final Bits liveDocs, final DocIdSet docIdSet) {
|
||||
return new FilteredDocIdSet(docIdSet) {
|
||||
@Override
|
||||
protected boolean match(int docID) {
|
||||
return !delDocs.get(docID);
|
||||
return liveDocs.get(docID);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ public class CachingWrapperFilter extends Filter {
|
|||
public DocIdSet getDocIdSet(AtomicReaderContext context) throws IOException {
|
||||
final IndexReader reader = context.reader;
|
||||
final Object coreKey = reader.getCoreCacheKey();
|
||||
final Object delCoreKey = reader.hasDeletions() ? reader.getDeletedDocs() : coreKey;
|
||||
final Object delCoreKey = reader.hasDeletions() ? reader.getLiveDocs() : coreKey;
|
||||
|
||||
DocIdSet docIdSet = cache.get(reader, coreKey, delCoreKey);
|
||||
if (docIdSet != null) {
|
||||
|
|
|
@ -136,16 +136,18 @@ public class ConstantScoreQuery extends Query {
|
|||
if (filter != null) {
|
||||
assert query == null;
|
||||
final DocIdSet dis = filter.getDocIdSet(context);
|
||||
if (dis == null)
|
||||
if (dis == null) {
|
||||
return null;
|
||||
}
|
||||
disi = dis.iterator();
|
||||
} else {
|
||||
assert query != null && innerWeight != null;
|
||||
disi =
|
||||
innerWeight.scorer(context, scorerContext);
|
||||
disi = innerWeight.scorer(context, scorerContext);
|
||||
}
|
||||
if (disi == null)
|
||||
|
||||
if (disi == null) {
|
||||
return null;
|
||||
}
|
||||
return new ConstantScorer(disi, this);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -533,9 +532,9 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
@Override
|
||||
public DocIdSetIterator iterator() throws IOException {
|
||||
|
||||
final Bits skipDocs = canIgnoreDeletedDocs ? null : reader.getDeletedDocs();
|
||||
final Bits liveDocs = canIgnoreDeletedDocs ? null : reader.getLiveDocs();
|
||||
|
||||
if (skipDocs == null) {
|
||||
if (liveDocs == null) {
|
||||
// Specialization optimization disregard deletions
|
||||
return new DocIdSetIterator() {
|
||||
private int doc = -1;
|
||||
|
@ -575,7 +574,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
final int maxDoc = reader.maxDoc();
|
||||
|
||||
// a DocIdSetIterator generating docIds by
|
||||
// incrementing a variable & checking skipDocs -
|
||||
// incrementing a variable & checking liveDocs -
|
||||
return new DocIdSetIterator() {
|
||||
private int doc = -1;
|
||||
@Override
|
||||
|
@ -590,14 +589,14 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
if (doc >= maxDoc) {
|
||||
return doc = NO_MORE_DOCS;
|
||||
}
|
||||
} while (skipDocs.get(doc) || !matchDoc(doc));
|
||||
} while (!liveDocs.get(doc) || !matchDoc(doc));
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) {
|
||||
for(doc=target;doc<maxDoc;doc++) {
|
||||
if (!skipDocs.get(doc) && matchDoc(doc)) {
|
||||
if (liveDocs.get(doc) && matchDoc(doc)) {
|
||||
return doc;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ public abstract class Filter {
|
|||
* represent the whole underlying index i.e. if the index has more than
|
||||
* one segment the given reader only represents a single segment.
|
||||
* The provided context is always an atomic context, so you can call
|
||||
* {@link IndexReader#fields()} or {@link IndexReader#getDeletedDocs()}
|
||||
* {@link IndexReader#fields()} or {@link IndexReader#getLiveDocs()}
|
||||
* on the context's reader, for example.
|
||||
*
|
||||
* @return a DocIdSet that provides the documents which should be permitted or
|
||||
|
|
|
@ -252,14 +252,14 @@ public final class FuzzyTermsEnum extends TermsEnum {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
return actualEnum.docs(skipDocs, reuse);
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
return actualEnum.docs(liveDocs, reuse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs,
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
|
||||
DocsAndPositionsEnum reuse) throws IOException {
|
||||
return actualEnum.docsAndPositions(skipDocs, reuse);
|
||||
return actualEnum.docsAndPositions(liveDocs, reuse);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -50,14 +50,14 @@ public class MatchAllDocsQuery extends Query {
|
|||
final byte[] norms;
|
||||
private int doc = -1;
|
||||
private final int maxDoc;
|
||||
private final Bits delDocs;
|
||||
private final Bits liveDocs;
|
||||
private final Similarity similarity;
|
||||
|
||||
MatchAllScorer(IndexReader reader, Similarity similarity, Weight w,
|
||||
byte[] norms) throws IOException {
|
||||
super(w);
|
||||
this.similarity = similarity;
|
||||
delDocs = reader.getDeletedDocs();
|
||||
liveDocs = reader.getLiveDocs();
|
||||
score = w.getValue();
|
||||
maxDoc = reader.maxDoc();
|
||||
this.norms = norms;
|
||||
|
@ -71,7 +71,7 @@ public class MatchAllDocsQuery extends Query {
|
|||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
doc++;
|
||||
while(delDocs != null && doc < maxDoc && delDocs.get(doc)) {
|
||||
while(liveDocs != null && doc < maxDoc && !liveDocs.get(doc)) {
|
||||
doc++;
|
||||
}
|
||||
if (doc == maxDoc) {
|
||||
|
|
|
@ -175,7 +175,7 @@ public class MultiPhraseQuery extends Query {
|
|||
if (termArrays.size() == 0) // optimize zero-term case
|
||||
return null;
|
||||
final IndexReader reader = context.reader;
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
|
||||
PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.size()];
|
||||
|
||||
|
@ -196,12 +196,12 @@ public class MultiPhraseQuery extends Query {
|
|||
}
|
||||
} else {
|
||||
final Term term = terms[0];
|
||||
postingsEnum = reader.termPositionsEnum(delDocs,
|
||||
postingsEnum = reader.termPositionsEnum(liveDocs,
|
||||
term.field(),
|
||||
term.bytes());
|
||||
|
||||
if (postingsEnum == null) {
|
||||
if (reader.termDocsEnum(delDocs, term.field(), term.bytes()) != null) {
|
||||
if (reader.termDocsEnum(liveDocs, term.field(), term.bytes()) != null) {
|
||||
// term does exist, but has no positions
|
||||
throw new IllegalStateException("field \"" + term.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + term.text() + ")");
|
||||
} else {
|
||||
|
@ -497,15 +497,15 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
|||
|
||||
public UnionDocsAndPositionsEnum(IndexReader indexReader, Term[] terms) throws IOException {
|
||||
List<DocsAndPositionsEnum> docsEnums = new LinkedList<DocsAndPositionsEnum>();
|
||||
final Bits delDocs = indexReader.getDeletedDocs();
|
||||
final Bits liveDocs = indexReader.getLiveDocs();
|
||||
for (int i = 0; i < terms.length; i++) {
|
||||
DocsAndPositionsEnum postings = indexReader.termPositionsEnum(delDocs,
|
||||
DocsAndPositionsEnum postings = indexReader.termPositionsEnum(liveDocs,
|
||||
terms[i].field(),
|
||||
terms[i].bytes());
|
||||
if (postings != null) {
|
||||
docsEnums.add(postings);
|
||||
} else {
|
||||
if (indexReader.termDocsEnum(delDocs, terms[i].field(), terms[i].bytes()) != null) {
|
||||
if (indexReader.termDocsEnum(liveDocs, terms[i].field(), terms[i].bytes()) != null) {
|
||||
// term does exist, but has no positions
|
||||
throw new IllegalStateException("field \"" + terms[i].field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + terms[i].text() + ")");
|
||||
}
|
||||
|
|
|
@ -19,14 +19,14 @@ package org.apache.lucene.search;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.OpenBitSet;
|
||||
|
||||
/**
|
||||
* A wrapper for {@link MultiTermQuery}, that exposes its
|
||||
|
@ -123,15 +123,15 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
|
|||
assert termsEnum != null;
|
||||
if (termsEnum.next() != null) {
|
||||
// fill into a OpenBitSet
|
||||
final OpenBitSet bitSet = new OpenBitSet(context.reader.maxDoc());
|
||||
final OpenBitSet bitSet = new OpenBitSet(reader.maxDoc());
|
||||
int termCount = 0;
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
DocsEnum docsEnum = null;
|
||||
do {
|
||||
termCount++;
|
||||
// System.out.println(" iter termCount=" + termCount + " term=" +
|
||||
// enumerator.term().toBytesString());
|
||||
docsEnum = termsEnum.docs(delDocs, docsEnum);
|
||||
docsEnum = termsEnum.docs(liveDocs, docsEnum);
|
||||
final DocsEnum.BulkReadResult result = docsEnum.getBulkResult();
|
||||
while (true) {
|
||||
final int count = docsEnum.read();
|
||||
|
|
|
@ -212,17 +212,17 @@ public class PhraseQuery extends Query {
|
|||
if (terms.size() == 0) // optimize zero-term case
|
||||
return null;
|
||||
final IndexReader reader = context.reader;
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.size()];
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
for (int i = 0; i < terms.size(); i++) {
|
||||
final Term t = terms.get(i);
|
||||
DocsAndPositionsEnum postingsEnum = reader.termPositionsEnum(delDocs,
|
||||
DocsAndPositionsEnum postingsEnum = reader.termPositionsEnum(liveDocs,
|
||||
t.field(),
|
||||
t.bytes());
|
||||
// PhraseQuery on a field that did not index
|
||||
// positions.
|
||||
if (postingsEnum == null) {
|
||||
if (reader.termDocsEnum(delDocs, t.field(), t.bytes()) != null) {
|
||||
if (reader.termDocsEnum(liveDocs, t.field(), t.bytes()) != null) {
|
||||
// term does exist, but has no positions
|
||||
throw new IllegalStateException("field \"" + t.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + t.text() + ")");
|
||||
} else {
|
||||
|
|
|
@ -90,13 +90,12 @@ public class TermQuery extends Query {
|
|||
final String field = term.field();
|
||||
final IndexReader reader = context.reader;
|
||||
assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
|
||||
final TermState state = termStates
|
||||
.get(context.ord);
|
||||
final TermState state = termStates.get(context.ord);
|
||||
if (state == null) { // term is not present in that reader
|
||||
assert termNotInReader(reader, field, term.bytes()) : "no termstate found but term exists in reader";
|
||||
return null;
|
||||
}
|
||||
final DocsEnum docs = reader.termDocsEnum(reader.getDeletedDocs(), field, term.bytes(), state);
|
||||
final DocsEnum docs = reader.termDocsEnum(reader.getLiveDocs(), field, term.bytes(), state);
|
||||
assert docs != null;
|
||||
return new TermScorer(this, docs, similarity, context.reader.norms(field));
|
||||
}
|
||||
|
@ -143,7 +142,7 @@ public class TermQuery extends Query {
|
|||
|
||||
Explanation tfExplanation = new Explanation();
|
||||
int tf = 0;
|
||||
DocsEnum docs = reader.termDocsEnum(reader.getDeletedDocs(), term.field(), term.bytes());
|
||||
DocsEnum docs = reader.termDocsEnum(context.reader.getLiveDocs(), term.field(), term.bytes());
|
||||
if (docs != null) {
|
||||
int newDoc = docs.advance(doc);
|
||||
if (newDoc == doc) {
|
||||
|
|
|
@ -105,7 +105,7 @@ public class DocTermsCreator extends EntryCreatorWithOptions<DocTerms>
|
|||
if (terms != null) {
|
||||
int termCount = 0;
|
||||
final TermsEnum termsEnum = terms.iterator();
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
DocsEnum docs = null;
|
||||
while(true) {
|
||||
if (termCount++ == termCountHardLimit) {
|
||||
|
@ -120,7 +120,7 @@ public class DocTermsCreator extends EntryCreatorWithOptions<DocTerms>
|
|||
break;
|
||||
}
|
||||
final long pointer = bytes.copyUsingLengthPrefix(term);
|
||||
docs = termsEnum.docs(delDocs, docs);
|
||||
docs = termsEnum.docs(liveDocs, docs);
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
|
|
|
@ -322,12 +322,12 @@ public class DocTermsIndexCreator extends EntryCreatorWithOptions<DocTermsIndex>
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
|
|
|
@ -83,14 +83,14 @@ public class SpanTermQuery extends SpanQuery {
|
|||
@Override
|
||||
public Spans getSpans(final AtomicReaderContext context) throws IOException {
|
||||
final IndexReader reader = context.reader;
|
||||
final DocsAndPositionsEnum postings = reader.termPositionsEnum(reader.getDeletedDocs(),
|
||||
final DocsAndPositionsEnum postings = reader.termPositionsEnum(reader.getLiveDocs(),
|
||||
term.field(),
|
||||
term.bytes());
|
||||
|
||||
if (postings != null) {
|
||||
return new TermSpans(postings, term);
|
||||
} else {
|
||||
if (reader.termDocsEnum(reader.getDeletedDocs(), term.field(), term.bytes()) != null) {
|
||||
if (reader.termDocsEnum(reader.getLiveDocs(), term.field(), term.bytes()) != null) {
|
||||
// term does exist, but has no positions
|
||||
throw new IllegalStateException("field \"" + term.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run SpanTermQuery (term=" + term.text() + ")");
|
||||
} else {
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.util;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -39,6 +40,7 @@ public final class BitVector implements Cloneable, Bits {
|
|||
private byte[] bits;
|
||||
private int size;
|
||||
private int count;
|
||||
private int version;
|
||||
|
||||
/** Constructs a vector capable of holding <code>n</code> bits. */
|
||||
public BitVector(int n) {
|
||||
|
@ -92,8 +94,10 @@ public final class BitVector implements Cloneable, Bits {
|
|||
return true;
|
||||
else {
|
||||
bits[pos] = (byte) (v | flag);
|
||||
if (count != -1)
|
||||
if (count != -1) {
|
||||
count++;
|
||||
assert count <= size;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -107,6 +111,25 @@ public final class BitVector implements Cloneable, Bits {
|
|||
count = -1;
|
||||
}
|
||||
|
||||
public final boolean getAndClear(int bit) {
|
||||
if (bit >= size) {
|
||||
throw new ArrayIndexOutOfBoundsException(bit);
|
||||
}
|
||||
final int pos = bit >> 3;
|
||||
final int v = bits[pos];
|
||||
final int flag = 1 << (bit & 7);
|
||||
if ((flag & v) == 0) {
|
||||
return false;
|
||||
} else {
|
||||
bits[pos] &= ~flag;
|
||||
if (count != -1) {
|
||||
count--;
|
||||
assert count >= 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns <code>true</code> if <code>bit</code> is one and
|
||||
<code>false</code> if it is zero. */
|
||||
public final boolean get(int bit) {
|
||||
|
@ -133,8 +156,9 @@ public final class BitVector implements Cloneable, Bits {
|
|||
if (count == -1) {
|
||||
int c = 0;
|
||||
int end = bits.length;
|
||||
for (int i = 0; i < end; i++)
|
||||
for (int i = 0; i < end; i++) {
|
||||
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
|
||||
}
|
||||
count = c;
|
||||
}
|
||||
return count;
|
||||
|
@ -144,8 +168,9 @@ public final class BitVector implements Cloneable, Bits {
|
|||
public final int getRecomputedCount() {
|
||||
int c = 0;
|
||||
int end = bits.length;
|
||||
for (int i = 0; i < end; i++)
|
||||
for (int i = 0; i < end; i++) {
|
||||
c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
|
@ -171,13 +196,21 @@ public final class BitVector implements Cloneable, Bits {
|
|||
private static String CODEC = "BitVector";
|
||||
|
||||
// Version before version tracking was added:
|
||||
private final static int VERSION_PRE = -1;
|
||||
public final static int VERSION_PRE = -1;
|
||||
|
||||
// First version:
|
||||
private final static int VERSION_START = 0;
|
||||
public final static int VERSION_START = 0;
|
||||
|
||||
// Changed DGaps to encode gaps between cleared bits, not
|
||||
// set:
|
||||
public final static int VERSION_DGAPS_CLEARED = 1;
|
||||
|
||||
// Increment version to change it:
|
||||
private final static int VERSION_CURRENT = VERSION_START;
|
||||
public final static int VERSION_CURRENT = VERSION_DGAPS_CLEARED;
|
||||
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/** Writes this vector to the file <code>name</code> in Directory
|
||||
<code>d</code>, in a format that can be read by the constructor {@link
|
||||
|
@ -188,7 +221,8 @@ public final class BitVector implements Cloneable, Bits {
|
|||
output.writeInt(-2);
|
||||
CodecUtil.writeHeader(output, CODEC, VERSION_CURRENT);
|
||||
if (isSparse()) {
|
||||
writeDgaps(output); // sparse bit-set more efficiently saved as d-gaps.
|
||||
// sparse bit-set more efficiently saved as d-gaps.
|
||||
writeClearedDgaps(output);
|
||||
} else {
|
||||
writeBits(output);
|
||||
}
|
||||
|
@ -197,6 +231,38 @@ public final class BitVector implements Cloneable, Bits {
|
|||
}
|
||||
}
|
||||
|
||||
/** Invert all bits */
|
||||
public void invertAll() {
|
||||
if (count != -1) {
|
||||
count = size - count;
|
||||
}
|
||||
if (bits.length > 0) {
|
||||
for(int idx=0;idx<bits.length;idx++) {
|
||||
bits[idx] = (byte) (~bits[idx]);
|
||||
}
|
||||
clearUnusedBits();
|
||||
}
|
||||
}
|
||||
|
||||
private void clearUnusedBits() {
|
||||
// Take care not to invert the "unused" bits in the
|
||||
// last byte:
|
||||
if (bits.length > 0) {
|
||||
final int lastNBits = size & 7;
|
||||
if (lastNBits != 0) {
|
||||
final int mask = (1 << lastNBits)-1;
|
||||
bits[bits.length-1] &= mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Set all bits */
|
||||
public void setAll() {
|
||||
Arrays.fill(bits, (byte) 0xff);
|
||||
clearUnusedBits();
|
||||
count = size;
|
||||
}
|
||||
|
||||
/** Write as a bit set */
|
||||
private void writeBits(IndexOutput output) throws IOException {
|
||||
output.writeInt(size()); // write size
|
||||
|
@ -205,19 +271,20 @@ public final class BitVector implements Cloneable, Bits {
|
|||
}
|
||||
|
||||
/** Write as a d-gaps list */
|
||||
private void writeDgaps(IndexOutput output) throws IOException {
|
||||
private void writeClearedDgaps(IndexOutput output) throws IOException {
|
||||
output.writeInt(-1); // mark using d-gaps
|
||||
output.writeInt(size()); // write size
|
||||
output.writeInt(count()); // write count
|
||||
int last=0;
|
||||
int n = count();
|
||||
int numCleared = size()-count();
|
||||
int m = bits.length;
|
||||
for (int i=0; i<m && n>0; i++) {
|
||||
if (bits[i]!=0) {
|
||||
for (int i=0; i<m && numCleared>0; i++) {
|
||||
if (bits[i]!=0xff) {
|
||||
output.writeVInt(i-last);
|
||||
output.writeByte(bits[i]);
|
||||
last = i;
|
||||
n -= BYTE_COUNTS[bits[i] & 0xFF];
|
||||
numCleared -= (8-BYTE_COUNTS[bits[i] & 0xFF]);
|
||||
assert numCleared >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -225,12 +292,12 @@ public final class BitVector implements Cloneable, Bits {
|
|||
/** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */
|
||||
private boolean isSparse() {
|
||||
|
||||
final int setCount = count();
|
||||
if (setCount == 0) {
|
||||
final int clearedCount = size() - count();
|
||||
if (clearedCount == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
final int avgGapLength = bits.length / setCount;
|
||||
final int avgGapLength = bits.length / clearedCount;
|
||||
|
||||
// expected number of bytes for vInt encoding of each gap
|
||||
final int expectedDGapBytes;
|
||||
|
@ -266,17 +333,21 @@ public final class BitVector implements Cloneable, Bits {
|
|||
|
||||
try {
|
||||
final int firstInt = input.readInt();
|
||||
final int version;
|
||||
|
||||
if (firstInt == -2) {
|
||||
// New format, with full header & version:
|
||||
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_START);
|
||||
version = CodecUtil.checkHeader(input, CODEC, VERSION_START, VERSION_CURRENT);
|
||||
size = input.readInt();
|
||||
} else {
|
||||
version = VERSION_PRE;
|
||||
size = firstInt;
|
||||
}
|
||||
if (size == -1) {
|
||||
readDgaps(input);
|
||||
if (version >= VERSION_DGAPS_CLEARED) {
|
||||
readClearedDgaps(input);
|
||||
} else {
|
||||
readSetDgaps(input);
|
||||
}
|
||||
} else {
|
||||
readBits(input);
|
||||
}
|
||||
|
@ -293,7 +364,7 @@ public final class BitVector implements Cloneable, Bits {
|
|||
}
|
||||
|
||||
/** read as a d-gaps list */
|
||||
private void readDgaps(IndexInput input) throws IOException {
|
||||
private void readSetDgaps(IndexInput input) throws IOException {
|
||||
size = input.readInt(); // (re)read size
|
||||
count = input.readInt(); // read count
|
||||
bits = new byte[(size >> 3) + 1]; // allocate bits
|
||||
|
@ -303,6 +374,24 @@ public final class BitVector implements Cloneable, Bits {
|
|||
last += input.readVInt();
|
||||
bits[last] = input.readByte();
|
||||
n -= BYTE_COUNTS[bits[last] & 0xFF];
|
||||
assert n >= 0;
|
||||
}
|
||||
}
|
||||
|
||||
/** read as a d-gaps cleared bits list */
|
||||
private void readClearedDgaps(IndexInput input) throws IOException {
|
||||
size = input.readInt(); // (re)read size
|
||||
count = input.readInt(); // read count
|
||||
bits = new byte[(size >> 3) + 1]; // allocate bits
|
||||
Arrays.fill(bits, (byte) 0xff);
|
||||
clearUnusedBits();
|
||||
int last=0;
|
||||
int numCleared = size()-count();
|
||||
while (numCleared>0) {
|
||||
last += input.readVInt();
|
||||
bits[last] = input.readByte();
|
||||
numCleared -= 8-BYTE_COUNTS[bits[last] & 0xFF];
|
||||
assert numCleared >= 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,13 +34,16 @@ public final class MultiBits implements Bits {
|
|||
// length is 1+subs.length (the last entry has the maxDoc):
|
||||
private final int[] starts;
|
||||
|
||||
public MultiBits(List<Bits> bits, List<Integer> starts) {
|
||||
private final boolean defaultValue;
|
||||
|
||||
public MultiBits(List<Bits> bits, List<Integer> starts, boolean defaultValue) {
|
||||
assert starts.size() == 1+bits.size();
|
||||
this.subs = bits.toArray(Bits.EMPTY_ARRAY);
|
||||
this.starts = new int[starts.size()];
|
||||
for(int i=0;i<this.starts.length;i++) {
|
||||
this.starts[i] = starts.get(i);
|
||||
}
|
||||
this.defaultValue = defaultValue;
|
||||
}
|
||||
|
||||
private boolean checkLength(int reader, int doc) {
|
||||
|
@ -54,7 +57,7 @@ public final class MultiBits implements Bits {
|
|||
assert reader != -1;
|
||||
final Bits bits = subs[reader];
|
||||
if (bits == null) {
|
||||
return false;
|
||||
return defaultValue;
|
||||
} else {
|
||||
assert checkLength(reader, doc);
|
||||
return bits.get(doc-starts[reader]);
|
||||
|
|
|
@ -869,7 +869,6 @@ public class OpenBitSet extends DocIdSet implements Bits, Cloneable {
|
|||
// empty sets from returning 0, which is too common.
|
||||
return (int)((h>>32) ^ h) + 0x98761234;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -346,26 +346,26 @@ public class TestExternalCodecs extends LuceneTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits skipDocs, DocsEnum reuse) {
|
||||
return new RAMDocsEnum(ramField.termToDocs.get(current), skipDocs);
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse) {
|
||||
return new RAMDocsEnum(ramField.termToDocs.get(current), liveDocs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) {
|
||||
return new RAMDocsAndPositionsEnum(ramField.termToDocs.get(current), skipDocs);
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) {
|
||||
return new RAMDocsAndPositionsEnum(ramField.termToDocs.get(current), liveDocs);
|
||||
}
|
||||
}
|
||||
|
||||
private static class RAMDocsEnum extends DocsEnum {
|
||||
private final RAMTerm ramTerm;
|
||||
private final Bits skipDocs;
|
||||
private final Bits liveDocs;
|
||||
private RAMDoc current;
|
||||
int upto = -1;
|
||||
int posUpto = 0;
|
||||
|
||||
public RAMDocsEnum(RAMTerm ramTerm, Bits skipDocs) {
|
||||
public RAMDocsEnum(RAMTerm ramTerm, Bits liveDocs) {
|
||||
this.ramTerm = ramTerm;
|
||||
this.skipDocs = skipDocs;
|
||||
this.liveDocs = liveDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -383,7 +383,7 @@ public class TestExternalCodecs extends LuceneTestCase {
|
|||
upto++;
|
||||
if (upto < ramTerm.docs.size()) {
|
||||
current = ramTerm.docs.get(upto);
|
||||
if (skipDocs == null || !skipDocs.get(current.docID)) {
|
||||
if (liveDocs == null || liveDocs.get(current.docID)) {
|
||||
posUpto = 0;
|
||||
return current.docID;
|
||||
}
|
||||
|
@ -406,14 +406,14 @@ public class TestExternalCodecs extends LuceneTestCase {
|
|||
|
||||
private static class RAMDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
||||
private final RAMTerm ramTerm;
|
||||
private final Bits skipDocs;
|
||||
private final Bits liveDocs;
|
||||
private RAMDoc current;
|
||||
int upto = -1;
|
||||
int posUpto = 0;
|
||||
|
||||
public RAMDocsAndPositionsEnum(RAMTerm ramTerm, Bits skipDocs) {
|
||||
public RAMDocsAndPositionsEnum(RAMTerm ramTerm, Bits liveDocs) {
|
||||
this.ramTerm = ramTerm;
|
||||
this.skipDocs = skipDocs;
|
||||
this.liveDocs = liveDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -431,7 +431,7 @@ public class TestExternalCodecs extends LuceneTestCase {
|
|||
upto++;
|
||||
if (upto < ramTerm.docs.size()) {
|
||||
current = ramTerm.docs.get(upto);
|
||||
if (skipDocs == null || !skipDocs.get(current.docID)) {
|
||||
if (liveDocs == null || liveDocs.get(current.docID)) {
|
||||
posUpto = 0;
|
||||
return current.docID;
|
||||
}
|
||||
|
@ -560,6 +560,9 @@ public class TestExternalCodecs extends LuceneTestCase {
|
|||
r.close();
|
||||
s.close();
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: now delete 2nd doc");
|
||||
}
|
||||
w.deleteDocuments(new Term("id", "44"));
|
||||
w.optimize();
|
||||
r = IndexReader.open(w, true);
|
||||
|
|
|
@ -73,7 +73,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
|
|||
|
||||
IndexReader reader = writer.getReader();
|
||||
DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
"preanalyzed",
|
||||
new BytesRef("term1"));
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
|
@ -81,7 +81,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
|
|||
assertEquals(0, termPositions.nextPosition());
|
||||
|
||||
termPositions = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
"preanalyzed",
|
||||
new BytesRef("term2"));
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
|
@ -90,7 +90,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
|
|||
assertEquals(3, termPositions.nextPosition());
|
||||
|
||||
termPositions = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
"preanalyzed",
|
||||
new BytesRef("term3"));
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
|
|
|
@ -281,10 +281,10 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
|
||||
for(int i=0;i<35;i++) {
|
||||
if (!delDocs.get(i)) {
|
||||
if (liveDocs.get(i)) {
|
||||
Document d = reader.document(i);
|
||||
List<Fieldable> fields = d.getFields();
|
||||
if (d.getField("content3") == null) {
|
||||
|
|
|
@ -611,6 +611,9 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
final int N = 10;
|
||||
|
||||
for(int pass=0;pass<2;pass++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: pass=" + pass);
|
||||
}
|
||||
|
||||
boolean useCompoundFile = (pass % 2) != 0;
|
||||
|
||||
|
@ -631,7 +634,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
|
||||
for(int i=0;i<N+1;i++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: cycle i=" + i);
|
||||
System.out.println("\nTEST: write i=" + i);
|
||||
}
|
||||
conf = newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
|
@ -692,8 +695,14 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
int expectedCount = 176;
|
||||
searcher.close();
|
||||
for(int i=0;i<N+1;i++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: i=" + i);
|
||||
}
|
||||
try {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" got reader=" + reader);
|
||||
}
|
||||
|
||||
// Work backwards in commits on what the expected
|
||||
// count should be.
|
||||
|
@ -706,7 +715,7 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
expectedCount -= 17;
|
||||
}
|
||||
}
|
||||
assertEquals(expectedCount, hits.length);
|
||||
assertEquals("maxDoc=" + searcher.maxDoc() + " numDocs=" + searcher.getIndexReader().numDocs(), expectedCount, hits.length);
|
||||
searcher.close();
|
||||
reader.close();
|
||||
if (i == N) {
|
||||
|
|
|
@ -168,13 +168,13 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
TermsEnum te2 = MultiFields.getTerms(mr2, "body").iterator();
|
||||
te2.seekCeil(new BytesRef("wow"));
|
||||
DocsEnum td = MultiFields.getTermDocsEnum(mr2,
|
||||
MultiFields.getDeletedDocs(mr2),
|
||||
MultiFields.getLiveDocs(mr2),
|
||||
"body",
|
||||
te2.term());
|
||||
|
||||
TermsEnum te3 = MultiFields.getTerms(mr3, "body").iterator();
|
||||
te3.seekCeil(new BytesRef("wow"));
|
||||
td = te3.docs(MultiFields.getDeletedDocs(mr3),
|
||||
td = te3.docs(MultiFields.getLiveDocs(mr3),
|
||||
td);
|
||||
|
||||
int ret = 0;
|
||||
|
|
|
@ -234,7 +234,7 @@ public class TestDoc extends LuceneTestCase {
|
|||
out.print(" term=" + field + ":" + tis.term());
|
||||
out.println(" DF=" + tis.docFreq());
|
||||
|
||||
DocsAndPositionsEnum positions = tis.docsAndPositions(reader.getDeletedDocs(), null);
|
||||
DocsAndPositionsEnum positions = tis.docsAndPositions(reader.getLiveDocs(), null);
|
||||
|
||||
while (positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
out.print(" doc=" + positions.docID());
|
||||
|
|
|
@ -89,17 +89,17 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public DocsAndPositionsEnum getDocsAndPositions(IndexReader reader,
|
||||
BytesRef bytes, Bits skipDocs) throws IOException {
|
||||
BytesRef bytes, Bits liveDocs) throws IOException {
|
||||
return reader.termPositionsEnum(null, fieldName, bytes);
|
||||
}
|
||||
|
||||
public DocsEnum getDocsEnum(IndexReader reader, BytesRef bytes,
|
||||
boolean freqs, Bits skipDocs) throws IOException {
|
||||
boolean freqs, Bits liveDocs) throws IOException {
|
||||
int randInt = random.nextInt(10);
|
||||
if (randInt == 0) { // once in a while throw in a positions enum
|
||||
return getDocsAndPositions(reader, bytes, skipDocs);
|
||||
return getDocsAndPositions(reader, bytes, liveDocs);
|
||||
} else {
|
||||
return reader.termDocsEnum(skipDocs, fieldName, bytes);
|
||||
return reader.termDocsEnum(liveDocs, fieldName, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.close();
|
||||
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
|
||||
DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getDeletedDocs(reader),
|
||||
DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
|
||||
"repeated", new BytesRef("repeated"));
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
int freq = termPositions.freq();
|
||||
|
@ -195,7 +195,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.close();
|
||||
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
|
||||
DocsAndPositionsEnum termPositions = reader.fields().terms("f1").docsAndPositions(reader.getDeletedDocs(), new BytesRef("a"), null);
|
||||
DocsAndPositionsEnum termPositions = reader.fields().terms("f1").docsAndPositions(reader.getLiveDocs(), new BytesRef("a"), null);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
int freq = termPositions.freq();
|
||||
assertEquals(3, freq);
|
||||
|
@ -239,18 +239,18 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
writer.close();
|
||||
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
|
||||
DocsAndPositionsEnum termPositions = reader.fields().terms("preanalyzed").docsAndPositions(reader.getDeletedDocs(), new BytesRef("term1"), null);
|
||||
DocsAndPositionsEnum termPositions = reader.fields().terms("preanalyzed").docsAndPositions(reader.getLiveDocs(), new BytesRef("term1"), null);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertEquals(1, termPositions.freq());
|
||||
assertEquals(0, termPositions.nextPosition());
|
||||
|
||||
termPositions = reader.fields().terms("preanalyzed").docsAndPositions(reader.getDeletedDocs(), new BytesRef("term2"), null);
|
||||
termPositions = reader.fields().terms("preanalyzed").docsAndPositions(reader.getLiveDocs(), new BytesRef("term2"), null);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertEquals(2, termPositions.freq());
|
||||
assertEquals(1, termPositions.nextPosition());
|
||||
assertEquals(3, termPositions.nextPosition());
|
||||
|
||||
termPositions = reader.fields().terms("preanalyzed").docsAndPositions(reader.getDeletedDocs(), new BytesRef("term3"), null);
|
||||
termPositions = reader.fields().terms("preanalyzed").docsAndPositions(reader.getLiveDocs(), new BytesRef("term3"), null);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertEquals(1, termPositions.freq());
|
||||
assertEquals(2, termPositions.nextPosition());
|
||||
|
|
|
@ -87,8 +87,8 @@ public class TestFilterIndexReader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
return new TestPositions(super.docsAndPositions(skipDocs, reuse == null ? null : ((FilterDocsAndPositionsEnum) reuse).in));
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
|
||||
return new TestPositions(super.docsAndPositions(liveDocs, reuse == null ? null : ((FilterDocsAndPositionsEnum) reuse).in));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ public class TestFilterIndexReader extends LuceneTestCase {
|
|||
|
||||
assertEquals(TermsEnum.SeekStatus.FOUND, terms.seekCeil(new BytesRef("one")));
|
||||
|
||||
DocsAndPositionsEnum positions = terms.docsAndPositions(MultiFields.getDeletedDocs(reader),
|
||||
DocsAndPositionsEnum positions = terms.docsAndPositions(MultiFields.getLiveDocs(reader),
|
||||
null);
|
||||
while (positions.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
assertTrue((positions.docID() % 2) == 1);
|
||||
|
|
|
@ -310,7 +310,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
int expected)
|
||||
throws IOException {
|
||||
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
term.field(),
|
||||
new BytesRef(term.text()));
|
||||
int count = 0;
|
||||
|
@ -849,17 +849,17 @@ public class TestIndexReader extends LuceneTestCase
|
|||
}
|
||||
|
||||
// check deletions
|
||||
final Bits delDocs1 = MultiFields.getDeletedDocs(index1);
|
||||
final Bits delDocs2 = MultiFields.getDeletedDocs(index2);
|
||||
final Bits liveDocs1 = MultiFields.getLiveDocs(index1);
|
||||
final Bits liveDocs2 = MultiFields.getLiveDocs(index2);
|
||||
for (int i = 0; i < index1.maxDoc(); i++) {
|
||||
assertEquals("Doc " + i + " only deleted in one index.",
|
||||
delDocs1 == null || delDocs1.get(i),
|
||||
delDocs2 == null || delDocs2.get(i));
|
||||
liveDocs1 == null || !liveDocs1.get(i),
|
||||
liveDocs2 == null || !liveDocs2.get(i));
|
||||
}
|
||||
|
||||
// check stored fields
|
||||
for (int i = 0; i < index1.maxDoc(); i++) {
|
||||
if (delDocs1 == null || !delDocs1.get(i)) {
|
||||
if (liveDocs1 == null || liveDocs1.get(i)) {
|
||||
Document doc1 = index1.document(i);
|
||||
Document doc2 = index2.document(i);
|
||||
List<Fieldable> fieldable1 = doc1.getFields();
|
||||
|
@ -880,15 +880,15 @@ public class TestIndexReader extends LuceneTestCase
|
|||
FieldsEnum fenum1 = MultiFields.getFields(index1).iterator();
|
||||
FieldsEnum fenum2 = MultiFields.getFields(index1).iterator();
|
||||
String field1 = null;
|
||||
Bits delDocs = MultiFields.getDeletedDocs(index1);
|
||||
Bits liveDocs = MultiFields.getLiveDocs(index1);
|
||||
while((field1=fenum1.next()) != null) {
|
||||
assertEquals("Different fields", field1, fenum2.next());
|
||||
TermsEnum enum1 = fenum1.terms();
|
||||
TermsEnum enum2 = fenum2.terms();
|
||||
while(enum1.next() != null) {
|
||||
assertEquals("Different terms", enum1.term(), enum2.next());
|
||||
DocsAndPositionsEnum tp1 = enum1.docsAndPositions(delDocs, null);
|
||||
DocsAndPositionsEnum tp2 = enum2.docsAndPositions(delDocs, null);
|
||||
DocsAndPositionsEnum tp1 = enum1.docsAndPositions(liveDocs, null);
|
||||
DocsAndPositionsEnum tp2 = enum2.docsAndPositions(liveDocs, null);
|
||||
|
||||
while(tp1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
assertTrue(tp2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/**
|
||||
* Tests cloning multiple types of readers, modifying the deletedDocs and norms
|
||||
* and verifies copy on write semantics of the deletedDocs and norms is
|
||||
* Tests cloning multiple types of readers, modifying the liveDocs and norms
|
||||
* and verifies copy on write semantics of the liveDocs and norms is
|
||||
* implemented properly
|
||||
*/
|
||||
public class TestIndexReaderClone extends LuceneTestCase {
|
||||
|
@ -282,9 +282,9 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
assertTrue(sim.decodeNormValue(MultiNorms.norms(r1, "field1")[4]) == norm1);
|
||||
assertTrue(sim.decodeNormValue(MultiNorms.norms(pr1Clone, "field1")[4]) != norm1);
|
||||
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(r1);
|
||||
assertTrue(delDocs == null || !delDocs.get(10));
|
||||
assertTrue(MultiFields.getDeletedDocs(pr1Clone).get(10));
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(r1);
|
||||
assertTrue(liveDocs == null || liveDocs.get(10));
|
||||
assertFalse(MultiFields.getLiveDocs(pr1Clone).get(10));
|
||||
|
||||
// try to update the original reader, which should throw an exception
|
||||
try {
|
||||
|
@ -318,7 +318,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
origSegmentReader.deleteDocument(10);
|
||||
assertDelDocsRefCountEquals(1, origSegmentReader);
|
||||
origSegmentReader.undeleteAll();
|
||||
assertNull(origSegmentReader.deletedDocsRef);
|
||||
assertNull(origSegmentReader.liveDocsRef);
|
||||
origSegmentReader.close();
|
||||
// need to test norms?
|
||||
dir1.close();
|
||||
|
@ -350,10 +350,10 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
|
||||
IndexReader origReader = IndexReader.open(dir1, false);
|
||||
SegmentReader origSegmentReader = getOnlySegmentReader(origReader);
|
||||
// deletedDocsRef should be null because nothing has updated yet
|
||||
assertNull(origSegmentReader.deletedDocsRef);
|
||||
// liveDocsRef should be null because nothing has updated yet
|
||||
assertNull(origSegmentReader.liveDocsRef);
|
||||
|
||||
// we deleted a document, so there is now a deletedDocs bitvector and a
|
||||
// we deleted a document, so there is now a liveDocs bitvector and a
|
||||
// reference to it
|
||||
origReader.deleteDocument(1);
|
||||
assertDelDocsRefCountEquals(1, origSegmentReader);
|
||||
|
@ -363,7 +363,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
IndexReader clonedReader = (IndexReader) origReader.clone();
|
||||
SegmentReader clonedSegmentReader = getOnlySegmentReader(clonedReader);
|
||||
assertDelDocsRefCountEquals(2, origSegmentReader);
|
||||
// deleting a document creates a new deletedDocs bitvector, the refs goes to
|
||||
// deleting a document creates a new liveDocs bitvector, the refs goes to
|
||||
// 1
|
||||
clonedReader.deleteDocument(2);
|
||||
assertDelDocsRefCountEquals(1, origSegmentReader);
|
||||
|
@ -371,13 +371,13 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
|
||||
// make sure the deletedocs objects are different (copy
|
||||
// on write)
|
||||
assertTrue(origSegmentReader.deletedDocs != clonedSegmentReader.deletedDocs);
|
||||
assertTrue(origSegmentReader.liveDocs != clonedSegmentReader.liveDocs);
|
||||
|
||||
assertDocDeleted(origSegmentReader, clonedSegmentReader, 1);
|
||||
final Bits delDocs = origSegmentReader.getDeletedDocs();
|
||||
assertTrue(delDocs == null || !delDocs.get(2)); // doc 2 should not be deleted
|
||||
final Bits liveDocs = origSegmentReader.getLiveDocs();
|
||||
assertTrue(liveDocs == null || liveDocs.get(2)); // doc 2 should not be deleted
|
||||
// in original segmentreader
|
||||
assertTrue(clonedSegmentReader.getDeletedDocs().get(2)); // doc 2 should be deleted in
|
||||
assertFalse(clonedSegmentReader.getLiveDocs().get(2)); // doc 2 should be deleted in
|
||||
// cloned segmentreader
|
||||
|
||||
// deleting a doc from the original segmentreader should throw an exception
|
||||
|
@ -419,7 +419,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
clonedReader.close();
|
||||
|
||||
IndexReader r = IndexReader.open(dir1, false);
|
||||
assertTrue(MultiFields.getDeletedDocs(r).get(1));
|
||||
assertFalse(MultiFields.getLiveDocs(r).get(1));
|
||||
r.close();
|
||||
dir1.close();
|
||||
}
|
||||
|
@ -448,11 +448,11 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
|
||||
private void assertDocDeleted(SegmentReader reader, SegmentReader reader2,
|
||||
int doc) {
|
||||
assertEquals(reader.getDeletedDocs().get(doc), reader2.getDeletedDocs().get(doc));
|
||||
assertEquals(reader.getLiveDocs().get(doc), reader2.getLiveDocs().get(doc));
|
||||
}
|
||||
|
||||
private void assertDelDocsRefCountEquals(int refCount, SegmentReader reader) {
|
||||
assertEquals(refCount, reader.deletedDocsRef.get());
|
||||
assertEquals(refCount, reader.liveDocsRef.get());
|
||||
}
|
||||
|
||||
public void testCloneSubreaders() throws Exception {
|
||||
|
|
|
@ -285,17 +285,17 @@ public class TestIndexReaderDelete extends LuceneTestCase {
|
|||
IndexReader r = new SlowMultiReaderWrapper(w.getReader());
|
||||
w.close();
|
||||
|
||||
assertNull(r.getDeletedDocs());
|
||||
assertNull(r.getLiveDocs());
|
||||
r.close();
|
||||
|
||||
r = new SlowMultiReaderWrapper(IndexReader.open(dir, false));
|
||||
|
||||
assertNull(r.getDeletedDocs());
|
||||
assertNull(r.getLiveDocs());
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "doctor")));
|
||||
assertNotNull(r.getDeletedDocs());
|
||||
assertTrue(r.getDeletedDocs().get(0));
|
||||
assertNotNull(r.getLiveDocs());
|
||||
assertFalse(r.getLiveDocs().get(0));
|
||||
assertEquals(1, r.deleteDocuments(new Term("f", "who")));
|
||||
assertTrue(r.getDeletedDocs().get(1));
|
||||
assertFalse(r.getLiveDocs().get(1));
|
||||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
|
|
@ -1115,16 +1115,16 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
SegmentReader sr2 = (SegmentReader) r2.getSequentialSubReaders()[0]; // and reopened IRs
|
||||
|
||||
// At this point they share the same BitVector
|
||||
assertTrue(sr1.deletedDocs==sr2.deletedDocs);
|
||||
assertTrue(sr1.liveDocs==sr2.liveDocs);
|
||||
|
||||
r2.deleteDocument(0);
|
||||
|
||||
// r1 should not see the delete
|
||||
final Bits r1DelDocs = MultiFields.getDeletedDocs(r1);
|
||||
assertFalse(r1DelDocs != null && r1DelDocs.get(0));
|
||||
final Bits r1LiveDocs = MultiFields.getLiveDocs(r1);
|
||||
assertFalse(r1LiveDocs != null && !r1LiveDocs.get(0));
|
||||
|
||||
// Now r2 should have made a private copy of deleted docs:
|
||||
assertTrue(sr1.deletedDocs!=sr2.deletedDocs);
|
||||
assertTrue(sr1.liveDocs!=sr2.liveDocs);
|
||||
|
||||
r1.close();
|
||||
r2.close();
|
||||
|
@ -1150,12 +1150,12 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
SegmentReader sr2 = (SegmentReader) rs2[0];
|
||||
|
||||
// At this point they share the same BitVector
|
||||
assertTrue(sr1.deletedDocs==sr2.deletedDocs);
|
||||
final BitVector delDocs = sr1.deletedDocs;
|
||||
assertTrue(sr1.liveDocs==sr2.liveDocs);
|
||||
final BitVector liveDocs = sr1.liveDocs;
|
||||
r1.close();
|
||||
|
||||
r2.deleteDocument(0);
|
||||
assertTrue(delDocs==sr2.deletedDocs);
|
||||
assertTrue(liveDocs==sr2.liveDocs);
|
||||
r2.close();
|
||||
dir.close();
|
||||
}
|
||||
|
|
|
@ -536,7 +536,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
Term t = new Term("field", "a");
|
||||
assertEquals(1, reader.docFreq(t));
|
||||
DocsEnum td = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
"field",
|
||||
new BytesRef("a"));
|
||||
td.nextDoc();
|
||||
|
@ -947,7 +947,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
assertEquals(1, hits.length);
|
||||
|
||||
DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(s.getIndexReader(),
|
||||
MultiFields.getDeletedDocs(s.getIndexReader()),
|
||||
MultiFields.getLiveDocs(s.getIndexReader()),
|
||||
"field",
|
||||
new BytesRef("a"));
|
||||
|
||||
|
|
|
@ -484,7 +484,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
// Make sure the doc that hit the exception was marked
|
||||
// as deleted:
|
||||
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
t.field(),
|
||||
new BytesRef(t.text()));
|
||||
|
||||
|
@ -624,10 +624,10 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
int numDel = 0;
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
assertNotNull(delDocs);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
assertNotNull(liveDocs);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (delDocs.get(j))
|
||||
if (!liveDocs.get(j))
|
||||
numDel++;
|
||||
else {
|
||||
reader.document(j);
|
||||
|
@ -653,7 +653,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
int numDel = 0;
|
||||
assertNull(MultiFields.getDeletedDocs(reader));
|
||||
assertNull(MultiFields.getLiveDocs(reader));
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
reader.document(j);
|
||||
reader.getTermFreqVectors(j);
|
||||
|
@ -743,10 +743,10 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
assertEquals("i=" + i, expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
int numDel = 0;
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
assertNotNull(delDocs);
|
||||
final Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
assertNotNull(liveDocs);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (delDocs.get(j))
|
||||
if (!liveDocs.get(j))
|
||||
numDel++;
|
||||
else {
|
||||
reader.document(j);
|
||||
|
@ -771,7 +771,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
expected += 17-NUM_THREAD*NUM_ITER;
|
||||
assertEquals(expected, reader.docFreq(new Term("contents", "here")));
|
||||
assertEquals(expected, reader.maxDoc());
|
||||
assertNull(MultiFields.getDeletedDocs(reader));
|
||||
assertNull(MultiFields.getLiveDocs(reader));
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
reader.document(j);
|
||||
reader.getTermFreqVectors(j);
|
||||
|
|
|
@ -53,7 +53,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
public static int count(Term t, IndexReader r) throws IOException {
|
||||
int count = 0;
|
||||
DocsEnum td = MultiFields.getTermDocsEnum(r,
|
||||
MultiFields.getDeletedDocs(r),
|
||||
MultiFields.getLiveDocs(r),
|
||||
t.field(), new BytesRef(t.text()));
|
||||
|
||||
if (td != null) {
|
||||
|
|
|
@ -203,7 +203,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
// Quick test to make sure index is not corrupt:
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
DocsEnum tdocs = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
"field",
|
||||
new BytesRef("aaa"));
|
||||
int count = 0;
|
||||
|
@ -268,7 +268,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
|
||||
if (success) {
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
final Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
final Bits delDocs = MultiFields.getLiveDocs(reader);
|
||||
for(int j=0;j<reader.maxDoc();j++) {
|
||||
if (delDocs == null || !delDocs.get(j)) {
|
||||
reader.document(j);
|
||||
|
|
|
@ -155,7 +155,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
|
|||
IndexReader reader = IndexReader.open(directory, true);
|
||||
|
||||
DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
this.field,
|
||||
new BytesRef("b"));
|
||||
|
||||
|
@ -166,7 +166,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
|
|||
}
|
||||
|
||||
tp = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
this.field,
|
||||
new BytesRef("a"));
|
||||
|
||||
|
|
|
@ -90,9 +90,9 @@ public class TestMultiFields extends LuceneTestCase {
|
|||
w.close();
|
||||
//System.out.println("TEST reader=" + reader);
|
||||
|
||||
Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
for(int delDoc : deleted) {
|
||||
assertTrue(delDocs.get(delDoc));
|
||||
assertFalse(liveDocs.get(delDoc));
|
||||
}
|
||||
Terms terms2 = MultiFields.getTerms(reader, "field");
|
||||
|
||||
|
@ -102,7 +102,7 @@ public class TestMultiFields extends LuceneTestCase {
|
|||
System.out.println("TEST: seek to term= "+ UnicodeUtil.toHexString(term.utf8ToString()));
|
||||
}
|
||||
|
||||
DocsEnum docsEnum = terms2.docs(delDocs, term, null);
|
||||
DocsEnum docsEnum = terms2.docs(liveDocs, term, null);
|
||||
assertNotNull(docsEnum);
|
||||
|
||||
for(int docID : docs.get(term)) {
|
||||
|
@ -121,7 +121,7 @@ public class TestMultiFields extends LuceneTestCase {
|
|||
/*
|
||||
private void verify(IndexReader r, String term, List<Integer> expected) throws Exception {
|
||||
DocsEnum docs = MultiFields.getTermDocsEnum(r,
|
||||
MultiFields.getDeletedDocs(r),
|
||||
MultiFields.getLiveDocs(r),
|
||||
"field",
|
||||
new BytesRef(term));
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
|
|||
|
||||
for (int i = 0; i < 2; i++) {
|
||||
counter = 0;
|
||||
DocsAndPositionsEnum tp = reader.termPositionsEnum(reader.getDeletedDocs(),
|
||||
DocsAndPositionsEnum tp = reader.termPositionsEnum(reader.getLiveDocs(),
|
||||
term.field(),
|
||||
new BytesRef(term.text()));
|
||||
|
||||
|
|
|
@ -106,11 +106,11 @@ public class TestNRTThreads extends LuceneTestCase {
|
|||
System.out.println("TEST: now warm merged reader=" + reader);
|
||||
}
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits delDocs = reader.getDeletedDocs();
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
int sum = 0;
|
||||
final int inc = Math.max(1, maxDoc/50);
|
||||
for(int docID=0;docID<maxDoc;docID += inc) {
|
||||
if (delDocs == null || !delDocs.get(docID)) {
|
||||
if (liveDocs == null || liveDocs.get(docID)) {
|
||||
final Document doc = reader.document(docID);
|
||||
sum += doc.getFields().size();
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TestParallelTermEnum extends LuceneTestCase {
|
|||
pr.add(ir1);
|
||||
pr.add(ir2);
|
||||
|
||||
Bits delDocs = pr.getDeletedDocs();
|
||||
Bits liveDocs = pr.getLiveDocs();
|
||||
|
||||
FieldsEnum fe = pr.fields().iterator();
|
||||
|
||||
|
@ -92,31 +92,31 @@ public class TestParallelTermEnum extends LuceneTestCase {
|
|||
TermsEnum te = fe.terms();
|
||||
|
||||
assertEquals("brown", te.next().utf8ToString());
|
||||
DocsEnum td = te.docs(delDocs, null);
|
||||
DocsEnum td = te.docs(liveDocs, null);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("fox", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("jumps", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("quick", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("the", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
@ -127,31 +127,31 @@ public class TestParallelTermEnum extends LuceneTestCase {
|
|||
te = fe.terms();
|
||||
|
||||
assertEquals("brown", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("fox", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("jumps", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("quick", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("the", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
@ -162,37 +162,37 @@ public class TestParallelTermEnum extends LuceneTestCase {
|
|||
te = fe.terms();
|
||||
|
||||
assertEquals("dog", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("fox", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("jumps", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("lazy", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("over", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
assertEquals("the", te.next().utf8ToString());
|
||||
td = te.docs(delDocs, td);
|
||||
td = te.docs(liveDocs, td);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
|
|
|
@ -223,7 +223,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
DocsAndPositionsEnum[] tps = new DocsAndPositionsEnum[numTerms];
|
||||
for (int i = 0; i < numTerms; i++) {
|
||||
tps[i] = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
terms[i].field(),
|
||||
new BytesRef(terms[i].text()));
|
||||
}
|
||||
|
@ -260,7 +260,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
* test lazy skipping
|
||||
*/
|
||||
DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
terms[0].field(),
|
||||
new BytesRef(terms[0].text()));
|
||||
tp.nextDoc();
|
||||
|
@ -288,7 +288,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
* Test different lengths at skip points
|
||||
*/
|
||||
tp = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
terms[1].field(),
|
||||
new BytesRef(terms[1].text()));
|
||||
tp.nextDoc();
|
||||
|
@ -331,7 +331,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
|
||||
reader = IndexReader.open(dir, true);
|
||||
tp = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
fieldName,
|
||||
new BytesRef(singleTerm));
|
||||
tp.nextDoc();
|
||||
|
@ -516,11 +516,11 @@ public class TestPayloads extends LuceneTestCase {
|
|||
writer.close();
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
TermsEnum terms = MultiFields.getFields(reader).terms(field).iterator();
|
||||
Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
Bits liveDocs = MultiFields.getLiveDocs(reader);
|
||||
DocsAndPositionsEnum tp = null;
|
||||
while (terms.next() != null) {
|
||||
String termText = terms.term().utf8ToString();
|
||||
tp = terms.docsAndPositions(delDocs, tp);
|
||||
tp = terms.docsAndPositions(liveDocs, tp);
|
||||
while(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
int freq = tp.freq();
|
||||
for (int i = 0; i < freq; i++) {
|
||||
|
|
|
@ -147,7 +147,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
|
|||
|
||||
IndexReader r = writer.getReader();
|
||||
IndexReader r1 = r.getSequentialSubReaders()[0];
|
||||
printDelDocs(r1.getDeletedDocs());
|
||||
printDelDocs(r1.getLiveDocs());
|
||||
int[] docs = toDocsArray(id3, null, r);
|
||||
System.out.println("id3 docs:"+Arrays.toString(docs));
|
||||
// there shouldn't be any docs for id:3
|
||||
|
|
|
@ -98,7 +98,7 @@ public class TestSegmentMerger extends LuceneTestCase {
|
|||
assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
|
||||
|
||||
DocsEnum termDocs = MultiFields.getTermDocsEnum(mergedReader,
|
||||
MultiFields.getDeletedDocs(mergedReader),
|
||||
MultiFields.getLiveDocs(mergedReader),
|
||||
DocHelper.TEXT_FIELD_2_KEY,
|
||||
new BytesRef("field"));
|
||||
assertTrue(termDocs != null);
|
||||
|
|
|
@ -81,7 +81,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
assertTrue(deleteReader != null);
|
||||
assertTrue(deleteReader.numDocs() == 1);
|
||||
deleteReader.deleteDocument(0);
|
||||
assertTrue(deleteReader.getDeletedDocs().get(0));
|
||||
assertFalse(deleteReader.getLiveDocs().get(0));
|
||||
assertTrue(deleteReader.hasDeletions() == true);
|
||||
assertTrue(deleteReader.numDocs() == 0);
|
||||
deleteReader.close();
|
||||
|
@ -131,13 +131,13 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
DocsEnum termDocs = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
DocHelper.TEXT_FIELD_1_KEY,
|
||||
new BytesRef("field"));
|
||||
assertTrue(termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
|
||||
termDocs = MultiFields.getTermDocsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
DocHelper.NO_NORMS_KEY,
|
||||
new BytesRef(DocHelper.NO_NORMS_TEXT));
|
||||
|
||||
|
@ -145,7 +145,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
|
||||
|
||||
DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader,
|
||||
MultiFields.getDeletedDocs(reader),
|
||||
MultiFields.getLiveDocs(reader),
|
||||
DocHelper.TEXT_FIELD_1_KEY,
|
||||
new BytesRef("field"));
|
||||
// NOTE: prior rev of this test was failing to first
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue