LUCENE-4605: Add FLAG_NONE to DocsEnum and DocsAndPositionsEnum

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1419991 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Shai Erera 2012-12-11 04:28:17 +00:00
parent edca562dad
commit ea9bffece7
52 changed files with 145 additions and 130 deletions

View File

@ -170,6 +170,9 @@ API Changes
* LUCENE-4591: CompressingStoredFields{Writer,Reader} now accept a segment
suffix as a constructor parameter. (Renaud Delbru via Adrien Grand)
* LUCENE-4605: Added DocsEnum.FLAG_NONE which can be passed instead of 0 as
the flag to .docs() and .docsAndPositions(). (Shai Erera)
Bug Fixes
* LUCENE-1822: BaseFragListBuilder hard-coded 6 char margin is too naive.

View File

@ -87,7 +87,7 @@ public class Test10KPulsings extends LuceneTestCase {
for (int i = 0; i < 10050; i++) {
String expected = df.format(i);
assertEquals(expected, te.next().utf8ToString());
de = _TestUtil.docs(random(), te, null, de, 0);
de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc());
}
@ -145,7 +145,7 @@ public class Test10KPulsings extends LuceneTestCase {
for (int i = 0; i < 10050; i++) {
String expected = df.format(i);
assertEquals(expected, te.next().utf8ToString());
de = _TestUtil.docs(random(), te, null, de, 0);
de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc());
}

View File

@ -60,7 +60,7 @@ public class TestPulsingReuse extends LuceneTestCase {
Map<DocsEnum,Boolean> allEnums = new IdentityHashMap<DocsEnum,Boolean>();
TermsEnum te = segment.terms("foo").iterator(null);
while (te.next() != null) {
reuse = te.docs(null, reuse, 0);
reuse = te.docs(null, reuse, DocsEnum.FLAG_NONE);
allEnums.put(reuse, true);
}
@ -101,7 +101,7 @@ public class TestPulsingReuse extends LuceneTestCase {
Map<DocsEnum,Boolean> allEnums = new IdentityHashMap<DocsEnum,Boolean>();
TermsEnum te = segment.terms("foo").iterator(null);
while (te.next() != null) {
reuse = te.docs(null, reuse, 0);
reuse = te.docs(null, reuse, DocsEnum.FLAG_NONE);
allEnums.put(reuse, true);
}

View File

@ -23,6 +23,7 @@ import java.util.Comparator;
import org.apache.lucene.index.FieldInfo; // javadocs
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.MultiDocsEnum;
@ -103,7 +104,7 @@ public abstract class TermsConsumer {
while((term = termsEnum.next()) != null) {
// We can pass null for liveDocs, because the
// mapping enum will skip the non-live docs:
docsEnumIn = (MultiDocsEnum) termsEnum.docs(null, docsEnumIn, 0);
docsEnumIn = (MultiDocsEnum) termsEnum.docs(null, docsEnumIn, DocsEnum.FLAG_NONE);
if (docsEnumIn != null) {
docsEnum.reset(docsEnumIn);
final PostingsConsumer postingsConsumer = startTerm(term);

View File

@ -397,7 +397,7 @@ class BufferedDeletesStream {
if (termsEnum.seekExact(term.bytes(), false)) {
// we don't need term frequencies for this
DocsEnum docsEnum = termsEnum.docs(rld.getLiveDocs(), docs, 0);
DocsEnum docsEnum = termsEnum.docs(rld.getLiveDocs(), docs, DocsEnum.FLAG_NONE);
//System.out.println("BDS: got docsEnum=" + docsEnum);
if (docsEnum != null) {

View File

@ -905,7 +905,7 @@ public class CheckIndex {
totalTermFreq += docsNoDel.freq();
}
} else {
final DocsEnum docsNoDel = termsEnum.docs(null, docs, 0);
final DocsEnum docsNoDel = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
docCount = 0;
totalTermFreq = -1;
while(docsNoDel.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
@ -991,7 +991,7 @@ public class CheckIndex {
} else {
for(int idx=0;idx<7;idx++) {
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
docs = termsEnum.docs(liveDocs, docs, 0);
docs = termsEnum.docs(liveDocs, docs, DocsEnum.FLAG_NONE);
final int docID = docs.advance(skipDocID);
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
break;
@ -1057,7 +1057,7 @@ public class CheckIndex {
}
int expectedDocFreq = termsEnum.docFreq();
DocsEnum d = termsEnum.docs(null, null, 0);
DocsEnum d = termsEnum.docs(null, null, DocsEnum.FLAG_NONE);
int docFreq = 0;
while (d.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
docFreq++;
@ -1098,7 +1098,7 @@ public class CheckIndex {
throw new RuntimeException("seek to existing term " + seekTerms[i] + " failed");
}
docs = termsEnum.docs(liveDocs, docs, 0);
docs = termsEnum.docs(liveDocs, docs, DocsEnum.FLAG_NONE);
if (docs == null) {
throw new RuntimeException("null DocsEnum from to existing term " + seekTerms[i]);
}
@ -1116,7 +1116,7 @@ public class CheckIndex {
}
totDocFreq += termsEnum.docFreq();
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
if (docs == null) {
throw new RuntimeException("null DocsEnum from to existing term " + seekTerms[i]);
}

View File

@ -363,7 +363,7 @@ public class DocTermOrds {
final int df = te.docFreq();
if (df <= maxTermDocFreq) {
docsEnum = te.docs(liveDocs, docsEnum, 0);
docsEnum = te.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
// dF, but takes deletions into account
int actualDF = 0;

View File

@ -24,6 +24,7 @@ import org.apache.lucene.util.BytesRef;
/** Also iterates through positions. */
public abstract class DocsAndPositionsEnum extends DocsEnum {
/** Flag to pass to {@link TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}
* if you require offsets in the returned enum. */
public static final int FLAG_OFFSETS = 0x1;

View File

@ -27,6 +27,14 @@ import org.apache.lucene.util.Bits; // javadocs
* NOTE: you must first call {@link #nextDoc} before using
* any of the per-doc methods. */
public abstract class DocsEnum extends DocIdSetIterator {
/**
* Flag to pass to {@link TermsEnum#docs(Bits,DocsEnum,int)} if you don't
* require term frequencies in the returned enum. When passed to
* {@link TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)} means
* that no offsets and payloads will be returned.
*/
public static final int FLAG_NONE = 0x0;
/** Flag to pass to {@link TermsEnum#docs(Bits,DocsEnum,int)}
* if you require term frequencies in the returned enum. */

View File

@ -479,7 +479,7 @@ public final class MultiTermsEnum extends TermsEnum {
subDocsAndPositions[upto].slice = entry.subSlice;
upto++;
} else {
if (entry.terms.docs(b, null, 0) != null) {
if (entry.terms.docs(b, null, DocsEnum.FLAG_NONE) != null) {
// At least one of our subs does not store
// offsets or positions -- we can't correctly
// produce a MultiDocsAndPositions enum

View File

@ -364,7 +364,7 @@ class FieldCacheImpl implements FieldCache {
break;
}
final byte termval = parser.parseByte(term);
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@ -437,7 +437,7 @@ class FieldCacheImpl implements FieldCache {
break;
}
final short termval = parser.parseShort(term);
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@ -541,7 +541,7 @@ class FieldCacheImpl implements FieldCache {
retArray = new int[maxDoc];
}
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@ -608,7 +608,7 @@ class FieldCacheImpl implements FieldCache {
res = new FixedBitSet(maxDoc);
}
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
// TODO: use bulk API
while (true) {
final int docID = docs.nextDoc();
@ -691,7 +691,7 @@ class FieldCacheImpl implements FieldCache {
retArray = new float[maxDoc];
}
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@ -779,7 +779,7 @@ class FieldCacheImpl implements FieldCache {
retArray = new long[maxDoc];
}
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@ -868,7 +868,7 @@ class FieldCacheImpl implements FieldCache {
retArray = new double[maxDoc];
}
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@ -1161,7 +1161,7 @@ class FieldCacheImpl implements FieldCache {
termOrdToBytesOffset = termOrdToBytesOffset.resize(ArrayUtil.oversize(1+termOrd, 1));
}
termOrdToBytesOffset.set(termOrd, bytes.copyUsingLengthPrefix(term));
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@ -1277,7 +1277,7 @@ class FieldCacheImpl implements FieldCache {
break;
}
final long pointer = bytes.copyUsingLengthPrefix(term);
docs = termsEnum.docs(null, docs, 0);
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {

View File

@ -23,6 +23,7 @@ import java.util.*;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.AtomicReader;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.Term;
@ -224,11 +225,11 @@ public class MultiPhraseQuery extends Query {
return null;
}
termsEnum.seekExact(term.bytes(), termState);
postingsEnum = termsEnum.docsAndPositions(liveDocs, null, 0);
postingsEnum = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
if (postingsEnum == null) {
// term does exist, but has no positions
assert termsEnum.docs(liveDocs, null, 0) != null: "termstate found but no term exists in reader";
assert termsEnum.docs(liveDocs, null, DocsEnum.FLAG_NONE) != null: "termstate found but no term exists in reader";
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
}
@ -482,7 +483,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
continue;
}
termsEnum.seekExact(term.bytes(), termState);
DocsAndPositionsEnum postings = termsEnum.docsAndPositions(liveDocs, null, 0);
DocsAndPositionsEnum postings = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
if (postings == null) {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");

View File

@ -106,7 +106,7 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
do {
// System.out.println(" iter termCount=" + termCount + " term=" +
// enumerator.term().toBytesString());
docsEnum = termsEnum.docs(acceptDocs, docsEnum, 0);
docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
int docid;
while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
bitSet.set(docid);

View File

@ -24,6 +24,7 @@ import java.util.Set;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicReader;
import org.apache.lucene.index.IndexReaderContext;
@ -262,7 +263,7 @@ public class PhraseQuery extends Query {
return null;
}
te.seekExact(t.bytes(), state);
DocsAndPositionsEnum postingsEnum = te.docsAndPositions(liveDocs, null, 0);
DocsAndPositionsEnum postingsEnum = te.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
// PhraseQuery on a field that did not index
// positions.

View File

@ -58,7 +58,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
IdentityHashMap<DocsEnum, Boolean> enums = new IdentityHashMap<DocsEnum, Boolean>();
MatchNoBits bits = new Bits.MatchNoBits(indexReader.maxDoc());
while ((iterator.next()) != null) {
DocsEnum docs = iterator.docs(random().nextBoolean() ? bits : new Bits.MatchNoBits(indexReader.maxDoc()), null, random().nextBoolean() ? DocsEnum.FLAG_FREQS : 0);
DocsEnum docs = iterator.docs(random().nextBoolean() ? bits : new Bits.MatchNoBits(indexReader.maxDoc()), null, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
enums.put(docs, true);
}
@ -85,7 +85,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
MatchNoBits bits = new Bits.MatchNoBits(open.maxDoc());
DocsEnum docs = null;
while ((iterator.next()) != null) {
docs = iterator.docs(bits, docs, random().nextBoolean() ? DocsEnum.FLAG_FREQS : 0);
docs = iterator.docs(bits, docs, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
enums.put(docs, true);
}
@ -94,7 +94,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
iterator = terms.iterator(null);
docs = null;
while ((iterator.next()) != null) {
docs = iterator.docs(new Bits.MatchNoBits(open.maxDoc()), docs, random().nextBoolean() ? DocsEnum.FLAG_FREQS : 0);
docs = iterator.docs(new Bits.MatchNoBits(open.maxDoc()), docs, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
enums.put(docs, true);
}
assertEquals(terms.size(), enums.size());
@ -103,7 +103,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
iterator = terms.iterator(null);
docs = null;
while ((iterator.next()) != null) {
docs = iterator.docs(null, docs, random().nextBoolean() ? DocsEnum.FLAG_FREQS : 0);
docs = iterator.docs(null, docs, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
enums.put(docs, true);
}
assertEquals(1, enums.size());
@ -135,7 +135,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
DocsEnum docs = null;
BytesRef term = null;
while ((term = iterator.next()) != null) {
docs = iterator.docs(null, randomDocsEnum("body", term, leaves2, bits), random().nextBoolean() ? DocsEnum.FLAG_FREQS : 0);
docs = iterator.docs(null, randomDocsEnum("body", term, leaves2, bits), random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
enums.put(docs, true);
}
assertEquals(terms.size(), enums.size());
@ -144,7 +144,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
enums.clear();
docs = null;
while ((term = iterator.next()) != null) {
docs = iterator.docs(bits, randomDocsEnum("body", term, leaves2, bits), random().nextBoolean() ? DocsEnum.FLAG_FREQS : 0);
docs = iterator.docs(bits, randomDocsEnum("body", term, leaves2, bits), random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
enums.put(docs, true);
}
assertEquals(terms.size(), enums.size());
@ -163,7 +163,7 @@ public class TestReuseDocsEnum extends LuceneTestCase {
}
TermsEnum iterator = terms.iterator(null);
if (iterator.seekExact(term, true)) {
return iterator.docs(bits, null, random().nextBoolean() ? DocsEnum.FLAG_FREQS : 0);
return iterator.docs(bits, null, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
}
return null;
}

View File

@ -332,17 +332,17 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
// with positions only
assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, 0),
rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, 0));
assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, 0),
rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, 0));
assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_NONE),
rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_NONE));
assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_NONE),
rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_NONE));
assertPositionsSkipping(leftTermsEnum.docFreq(),
leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, 0),
rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, 0));
leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_NONE),
rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_NONE));
assertPositionsSkipping(leftTermsEnum.docFreq(),
leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, 0),
rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, 0));
leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_NONE),
rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_NONE));
// with freqs:
assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs),
@ -351,10 +351,10 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
rightDocs = rightTermsEnum.docs(randomBits, rightDocs));
// w/o freqs:
assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs, 0),
rightDocs = rightTermsEnum.docs(null, rightDocs, 0));
assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs, 0),
rightDocs = rightTermsEnum.docs(randomBits, rightDocs, 0));
assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE));
assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE));
// with freqs:
assertDocsSkipping(leftTermsEnum.docFreq(),
@ -366,11 +366,11 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
// w/o freqs:
assertDocsSkipping(leftTermsEnum.docFreq(),
leftDocs = leftTermsEnum.docs(null, leftDocs, 0),
rightDocs = rightTermsEnum.docs(null, rightDocs, 0));
leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE));
assertDocsSkipping(leftTermsEnum.docFreq(),
leftDocs = leftTermsEnum.docs(randomBits, leftDocs, 0),
rightDocs = rightTermsEnum.docs(randomBits, rightDocs, 0));
leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE));
}
}
assertNull(rightTermsEnum.next());

View File

@ -537,7 +537,7 @@ public class TestAddIndexes extends LuceneTestCase {
private void verifyTermDocs(Directory dir, Term term, int numDocs)
throws IOException {
IndexReader reader = DirectoryReader.open(dir);
DocsEnum docsEnum = _TestUtil.docs(random(), reader, term.field, term.bytes, null, null, 0);
DocsEnum docsEnum = _TestUtil.docs(random(), reader, term.field, term.bytes, null, null, DocsEnum.FLAG_NONE);
int count = 0;
while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
count++;

View File

@ -725,7 +725,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
// should be found exactly
assertEquals(TermsEnum.SeekStatus.FOUND,
terms.seekCeil(aaaTerm));
assertEquals(35, countDocs(_TestUtil.docs(random(), terms, null, null, 0)));
assertEquals(35, countDocs(_TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
assertNull(terms.next());
// should hit end of field
@ -737,12 +737,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
assertEquals(TermsEnum.SeekStatus.NOT_FOUND,
terms.seekCeil(new BytesRef("a")));
assertTrue(terms.term().bytesEquals(aaaTerm));
assertEquals(35, countDocs(_TestUtil.docs(random(), terms, null, null, 0)));
assertEquals(35, countDocs(_TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
assertNull(terms.next());
assertEquals(TermsEnum.SeekStatus.FOUND,
terms.seekCeil(aaaTerm));
assertEquals(35, countDocs(_TestUtil.docs(random(), terms,null, null, 0)));
assertEquals(35, countDocs(_TestUtil.docs(random(), terms,null, null, DocsEnum.FLAG_NONE)));
assertNull(terms.next());
r.close();

View File

@ -277,7 +277,7 @@ public class TestCodecs extends LuceneTestCase {
// make sure it properly fully resets (rewinds) its
// internal state:
for(int iter=0;iter<2;iter++) {
docsEnum = _TestUtil.docs(random(), termsEnum, null, docsEnum, 0);
docsEnum = _TestUtil.docs(random(), termsEnum, null, docsEnum, DocsEnum.FLAG_NONE);
assertEquals(terms[i].docs[0], docsEnum.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
}
@ -474,7 +474,7 @@ public class TestCodecs extends LuceneTestCase {
assertEquals(status, TermsEnum.SeekStatus.FOUND);
assertEquals(term.docs.length, termsEnum.docFreq());
if (field.omitTF) {
this.verifyDocs(term.docs, term.positions, _TestUtil.docs(random(), termsEnum, null, null, 0), false);
this.verifyDocs(term.docs, term.positions, _TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_NONE), false);
} else {
this.verifyDocs(term.docs, term.positions, termsEnum.docsAndPositions(null, null), true);
}
@ -494,7 +494,7 @@ public class TestCodecs extends LuceneTestCase {
assertTrue(termsEnum.term().bytesEquals(new BytesRef(term.text2)));
assertEquals(term.docs.length, termsEnum.docFreq());
if (field.omitTF) {
this.verifyDocs(term.docs, term.positions, _TestUtil.docs(random(), termsEnum, null, null, 0), false);
this.verifyDocs(term.docs, term.positions, _TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_NONE), false);
} else {
this.verifyDocs(term.docs, term.positions, termsEnum.docsAndPositions(null, null), true);
}
@ -557,7 +557,7 @@ public class TestCodecs extends LuceneTestCase {
} else {
postings = null;
docsAndFreqs = null;
docs = _TestUtil.docs(random(), termsEnum, null, null, 0);
docs = _TestUtil.docs(random(), termsEnum, null, null, DocsEnum.FLAG_NONE);
}
assertNotNull(docs);
int upto2 = -1;

View File

@ -70,7 +70,7 @@ public class TestDocCount extends LuceneTestCase {
FixedBitSet visited = new FixedBitSet(ir.maxDoc());
TermsEnum te = terms.iterator(null);
while (te.next() != null) {
DocsEnum de = _TestUtil.docs(random(), te, null, null, 0);
DocsEnum de = _TestUtil.docs(random(), te, null, null, DocsEnum.FLAG_NONE);
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
visited.set(de.docID());
}

View File

@ -334,7 +334,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
AtomicReader r = getOnlySegmentReader(reader);
DocsEnum disi = _TestUtil.docs(random(), r, "foo", new BytesRef("bar"), null, null, 0);
DocsEnum disi = _TestUtil.docs(random(), r, "foo", new BytesRef("bar"), null, null, DocsEnum.FLAG_NONE);
int docid = disi.docID();
assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@ -342,7 +342,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
// now reuse and check again
TermsEnum te = r.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar"), true));
disi = _TestUtil.docs(random(), te, null, disi, 0);
disi = _TestUtil.docs(random(), te, null, disi, DocsEnum.FLAG_NONE);
docid = disi.docID();
assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);

View File

@ -349,11 +349,11 @@ public class TestDuelingCodecs extends LuceneTestCase {
true);
// w/o freqs:
assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs, 0),
rightDocs = rightTermsEnum.docs(null, rightDocs, 0),
assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
false);
assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs, 0),
rightDocs = rightTermsEnum.docs(randomBits, rightDocs, 0),
assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
false);
// with freqs:
@ -368,12 +368,12 @@ public class TestDuelingCodecs extends LuceneTestCase {
// w/o freqs:
assertDocsSkipping(leftTermsEnum.docFreq(),
leftDocs = leftTermsEnum.docs(null, leftDocs, 0),
rightDocs = rightTermsEnum.docs(null, rightDocs, 0),
leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
false);
assertDocsSkipping(leftTermsEnum.docFreq(),
leftDocs = leftTermsEnum.docs(randomBits, leftDocs, 0),
rightDocs = rightTermsEnum.docs(randomBits, rightDocs, 0),
leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
false);
}
}

View File

@ -1181,12 +1181,12 @@ public class TestIndexWriter extends LuceneTestCase {
// test that the terms were indexed.
assertTrue(_TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, 0).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, 0).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, 0).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, 0).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, 0).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, 0).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue(_TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
ir.close();
dir.close();
@ -1258,7 +1258,7 @@ public class TestIndexWriter extends LuceneTestCase {
TermsEnum t = r.fields().terms("field").iterator(null);
int count = 0;
while(t.next() != null) {
final DocsEnum docs = _TestUtil.docs(random(), t, null, null, 0);
final DocsEnum docs = _TestUtil.docs(random(), t, null, null, DocsEnum.FLAG_NONE);
assertEquals(0, docs.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docs.nextDoc());
count++;

View File

@ -983,7 +983,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
w.addDocument(doc);
SegmentReader r = getOnlySegmentReader(DirectoryReader.open(w, true));
try {
_TestUtil.docs(random(), r, "f", new BytesRef("val"), null, null, 0);
_TestUtil.docs(random(), r, "f", new BytesRef("val"), null, null, DocsEnum.FLAG_NONE);
fail("should have failed to seek since terms index was not loaded.");
} catch (IllegalStateException e) {
// expected - we didn't load the term index

View File

@ -374,7 +374,7 @@ public class TestLongPostings extends LuceneTestCase {
final DocsEnum postings;
if (options == IndexOptions.DOCS_ONLY) {
docs = _TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, 0);
docs = _TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, DocsEnum.FLAG_NONE);
postings = null;
} else {
docs = postings = _TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, DocsEnum.FLAG_FREQS);

View File

@ -121,7 +121,7 @@ public class TestMultiFields extends LuceneTestCase {
System.out.println("TEST: seek term="+ UnicodeUtil.toHexString(term.utf8ToString()) + " " + term);
}
DocsEnum docsEnum = _TestUtil.docs(random(), reader, "field", term, liveDocs, null, 0);
DocsEnum docsEnum = _TestUtil.docs(random(), reader, "field", term, liveDocs, null, DocsEnum.FLAG_NONE);
assertNotNull(docsEnum);
for(int docID : docs.get(term)) {
@ -162,8 +162,8 @@ public class TestMultiFields extends LuceneTestCase {
w.addDocument(d);
IndexReader r = w.getReader();
w.close();
DocsEnum d1 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, 0);
DocsEnum d2 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, 0);
DocsEnum d1 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE);
DocsEnum d2 = _TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE);
assertEquals(0, d1.nextDoc());
assertEquals(0, d2.nextDoc());
r.close();

View File

@ -82,7 +82,7 @@ public class TestParallelTermEnum extends LuceneTestCase {
BytesRef b = te.next();
assertNotNull(b);
assertEquals(t, b.utf8ToString());
DocsEnum td = _TestUtil.docs(random(), te, liveDocs, null, 0);
DocsEnum td = _TestUtil.docs(random(), te, liveDocs, null, DocsEnum.FLAG_NONE);
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(0, td.docID());
assertEquals(td.nextDoc(), DocIdSetIterator.NO_MORE_DOCS);

View File

@ -228,7 +228,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
Terms cterms = fields.terms(term.field);
TermsEnum ctermsEnum = cterms.iterator(null);
if (ctermsEnum.seekExact(new BytesRef(term.text()), false)) {
DocsEnum docsEnum = _TestUtil.docs(random(), ctermsEnum, bits, null, 0);
DocsEnum docsEnum = _TestUtil.docs(random(), ctermsEnum, bits, null, DocsEnum.FLAG_NONE);
return toArray(docsEnum);
}
return null;

View File

@ -82,11 +82,11 @@ public class TestStressAdvance extends LuceneTestCase {
System.out.println("\nTEST: iter=" + iter + " iter2=" + iter2);
}
assertEquals(TermsEnum.SeekStatus.FOUND, te.seekCeil(new BytesRef("a")));
de = _TestUtil.docs(random(), te, null, de, 0);
de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
testOne(de, aDocIDs);
assertEquals(TermsEnum.SeekStatus.FOUND, te.seekCeil(new BytesRef("b")));
de = _TestUtil.docs(random(), te, null, de, 0);
de = _TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
testOne(de, bDocIDs);
}

View File

@ -337,7 +337,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
Bits liveDocs = MultiFields.getLiveDocs(r1);
DocsEnum docs = null;
while(termsEnum.next() != null) {
docs = _TestUtil.docs(random(), termsEnum, liveDocs, docs, 0);
docs = _TestUtil.docs(random(), termsEnum, liveDocs, docs, DocsEnum.FLAG_NONE);
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
fail("r1 is not empty but r2 is");
}
@ -357,9 +357,9 @@ public class TestStressIndexing2 extends LuceneTestCase {
break;
}
termDocs1 = _TestUtil.docs(random(), termsEnum, liveDocs1, termDocs1, 0);
termDocs1 = _TestUtil.docs(random(), termsEnum, liveDocs1, termDocs1, DocsEnum.FLAG_NONE);
if (termsEnum2.seekExact(term, false)) {
termDocs2 = _TestUtil.docs(random(), termsEnum2, liveDocs2, termDocs2, 0);
termDocs2 = _TestUtil.docs(random(), termsEnum2, liveDocs2, termDocs2, DocsEnum.FLAG_NONE);
} else {
termDocs2 = null;
}

View File

@ -225,7 +225,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
//System.out.println("Term: " + term);
assertEquals(testTerms[i], term);
docsEnum = _TestUtil.docs(random(), termsEnum, null, docsEnum, 0);
docsEnum = _TestUtil.docs(random(), termsEnum, null, docsEnum, DocsEnum.FLAG_NONE);
assertNotNull(docsEnum);
int doc = docsEnum.docID();
assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS);

View File

@ -124,7 +124,7 @@ public class TestTermdocPerf extends LuceneTestCase {
final Random random = new Random(random().nextLong());
for (int i=0; i<iter; i++) {
tenum.seekCeil(new BytesRef("val"));
tdocs = _TestUtil.docs(random, tenum, MultiFields.getLiveDocs(reader), tdocs, 0);
tdocs = _TestUtil.docs(random, tenum, MultiFields.getLiveDocs(reader), tdocs, DocsEnum.FLAG_NONE);
while (tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
ret += tdocs.docID();
}

View File

@ -332,7 +332,7 @@ public class TestTermsEnum extends LuceneTestCase {
}
assertEquals(expected, actual);
assertEquals(1, te.docFreq());
docsEnum = _TestUtil.docs(random(), te, null, docsEnum, 0);
docsEnum = _TestUtil.docs(random(), te, null, docsEnum, DocsEnum.FLAG_NONE);
final int docID = docsEnum.nextDoc();
assertTrue(docID != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(docIDToID[docID], termToID.get(expected).intValue());
@ -747,25 +747,25 @@ public class TestTermsEnum extends LuceneTestCase {
CompiledAutomaton ca = new CompiledAutomaton(automaton, false, false);
TermsEnum te = terms.intersect(ca, null);
assertEquals("aaa", te.next().utf8ToString());
assertEquals(0, te.docs(null, null, 0).nextDoc());
assertEquals(0, te.docs(null, null, DocsEnum.FLAG_NONE).nextDoc());
assertEquals("bbb", te.next().utf8ToString());
assertEquals(1, te.docs(null, null, 0).nextDoc());
assertEquals(1, te.docs(null, null, DocsEnum.FLAG_NONE).nextDoc());
assertEquals("ccc", te.next().utf8ToString());
assertEquals(2, te.docs(null, null, 0).nextDoc());
assertEquals(2, te.docs(null, null, DocsEnum.FLAG_NONE).nextDoc());
assertNull(te.next());
te = terms.intersect(ca, new BytesRef("abc"));
assertEquals("bbb", te.next().utf8ToString());
assertEquals(1, te.docs(null, null, 0).nextDoc());
assertEquals(1, te.docs(null, null, DocsEnum.FLAG_NONE).nextDoc());
assertEquals("ccc", te.next().utf8ToString());
assertEquals(2, te.docs(null, null, 0).nextDoc());
assertEquals(2, te.docs(null, null, DocsEnum.FLAG_NONE).nextDoc());
assertNull(te.next());
te = terms.intersect(ca, new BytesRef("aaa"));
assertEquals("bbb", te.next().utf8ToString());
assertEquals(1, te.docs(null, null, 0).nextDoc());
assertEquals(1, te.docs(null, null, DocsEnum.FLAG_NONE).nextDoc());
assertEquals("ccc", te.next().utf8ToString());
assertEquals(2, te.docs(null, null, 0).nextDoc());
assertEquals(2, te.docs(null, null, DocsEnum.FLAG_NONE).nextDoc());
assertNull(te.next());
r.close();

View File

@ -777,7 +777,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
// 'validation' checks.
cp.clear();
cp.add(t.utf8ToString(), delimiter);
docsEnum = termsEnum.docs(null, docsEnum, 0);
docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
boolean res = cache.put(cp, docsEnum.nextDoc() + ctx.docBase);
assert !res : "entries should not have been evicted from the cache";
} else {
@ -870,7 +870,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
cp.clear();
cp.add(value, Consts.DEFAULT_DELIMITER);
final int ordinal = addCategory(cp);
docs = te.docs(null, docs, 0);
docs = te.docs(null, docs, DocsEnum.FLAG_NONE);
ordinalMap.addMapping(docs.nextDoc() + base, ordinal);
}
base += ar.maxDoc(); // no deletions, so we're ok

View File

@ -284,7 +284,7 @@ public abstract class FacetTestBase extends LuceneTestCase {
TermsEnum te = terms.iterator(null);
DocsEnum de = null;
while (te.next() != null) {
de = _TestUtil.docs(random(), te, liveDocs, de, 0);
de = _TestUtil.docs(random(), te, liveDocs, de, DocsEnum.FLAG_NONE);
int cnt = 0;
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
cnt++;

View File

@ -89,7 +89,7 @@ public class TestMultipleCategoryLists extends LuceneTestCase {
// Obtain facets results and hand-test them
assertCorrectResults(facetsCollector);
DocsEnum td = _TestUtil.docs(random(), ir, "$facets", new BytesRef("$fulltree$"), MultiFields.getLiveDocs(ir), null, 0);
DocsEnum td = _TestUtil.docs(random(), ir, "$facets", new BytesRef("$fulltree$"), MultiFields.getLiveDocs(ir), null, DocsEnum.FLAG_NONE);
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
tr.close();
@ -182,7 +182,7 @@ public class TestMultipleCategoryLists extends LuceneTestCase {
}
private void assertPostingListExists(String field, String text, IndexReader ir) throws IOException {
DocsEnum de = _TestUtil.docs(random(), ir, field, new BytesRef(text), null, null, 0);
DocsEnum de = _TestUtil.docs(random(), ir, field, new BytesRef(text), null, null, DocsEnum.FLAG_NONE);
assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
}

View File

@ -189,7 +189,7 @@ class TermsIncludingScoreQuery extends Query {
scoreUpto = upto;
if (termsEnum.seekExact(terms.get(ords[upto++], spare), true)) {
docsEnum = reuse = termsEnum.docs(acceptDocs, reuse, 0);
docsEnum = reuse = termsEnum.docs(acceptDocs, reuse, DocsEnum.FLAG_NONE);
}
} while (docsEnum == null);
@ -261,7 +261,7 @@ class TermsIncludingScoreQuery extends Query {
scoreUpto = upto;
if (termsEnum.seekExact(terms.get(ords[upto++], spare), true)) {
docsEnum = reuse = termsEnum.docs(acceptDocs, reuse, 0);
docsEnum = reuse = termsEnum.docs(acceptDocs, reuse, DocsEnum.FLAG_NONE);
}
} while (docsEnum == null);
@ -302,7 +302,7 @@ class TermsIncludingScoreQuery extends Query {
DocsEnum docsEnum = null;
for (int i = 0; i < terms.size(); i++) {
if (termsEnum.seekExact(terms.get(ords[i], spare), true)) {
docsEnum = termsEnum.docs(acceptDocs, docsEnum, 0);
docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
float score = TermsIncludingScoreQuery.this.scores[ords[i]];
for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {
matchingDocs.set(doc);
@ -348,7 +348,7 @@ class TermsIncludingScoreQuery extends Query {
DocsEnum docsEnum = null;
for (int i = 0; i < terms.size(); i++) {
if (termsEnum.seekExact(terms.get(ords[i], spare), true)) {
docsEnum = termsEnum.docs(acceptDocs, docsEnum, 0);
docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
float score = TermsIncludingScoreQuery.this.scores[ords[i]];
for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {
// I prefer this:

View File

@ -558,7 +558,7 @@ public class TestJoinUtil extends LuceneTestCase {
for (BytesRef joinValue : joinValues) {
termsEnum = terms.iterator(termsEnum);
if (termsEnum.seekExact(joinValue, true)) {
docsEnum = termsEnum.docs(slowCompositeReader.getLiveDocs(), docsEnum, 0);
docsEnum = termsEnum.docs(slowCompositeReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
JoinScore joinScore = joinValueToJoinScores.get(joinValue);
for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {

View File

@ -286,7 +286,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
MemoryIndex memory = new MemoryIndex(random().nextBoolean(), random().nextInt(50) * 1024 * 1024);
memory.addField("foo", "bar", analyzer);
AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader();
DocsEnum disi = _TestUtil.docs(random(), reader, "foo", new BytesRef("bar"), null, null, 0);
DocsEnum disi = _TestUtil.docs(random(), reader, "foo", new BytesRef("bar"), null, null, DocsEnum.FLAG_NONE);
int docid = disi.docID();
assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@ -294,7 +294,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
// now reuse and check again
TermsEnum te = reader.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar"), true));
disi = te.docs(null, disi, 0);
disi = te.docs(null, disi, DocsEnum.FLAG_NONE);
docid = disi.docID();
assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);

View File

@ -194,7 +194,7 @@ public final class TermsFilter extends Filter {
spare.offset = offsets[i];
spare.length = offsets[i+1] - offsets[i];
if (termsEnum.seekExact(spare, false)) { // don't use cache since we could pollute the cache here easily
docs = termsEnum.docs(acceptDocs, docs, 0); // no freq since we don't need them
docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE); // no freq since we don't need them
if (result == null) {
if (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
result = new FixedBitSet(reader.maxDoc());

View File

@ -102,7 +102,7 @@ public class DuplicateFilter extends Filter {
if (currTerm == null) {
break;
} else {
docs = termsEnum.docs(acceptDocs, docs, 0);
docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE);
int doc = docs.nextDoc();
if (doc != DocIdSetIterator.NO_MORE_DOCS) {
if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {
@ -142,7 +142,7 @@ public class DuplicateFilter extends Filter {
} else {
if (termsEnum.docFreq() > 1) {
// unset potential duplicates
docs = termsEnum.docs(acceptDocs, docs, 0);
docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE);
int doc = docs.nextDoc();
if (doc != DocIdSetIterator.NO_MORE_DOCS) {
if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {

View File

@ -117,7 +117,7 @@ RE "scan" threshold:
if (!termsEnum.seekExact(cellTerm, true))
continue;
if (cell.getLevel() == detailLevel || cell.isLeaf()) {
docsEnum = termsEnum.docs(acceptDocs, docsEnum, 0);
docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
addDocs(docsEnum,bits);
} else {//any other intersection
assert cell.getLevel() < detailLevel; //assertions help clarify logic
@ -130,7 +130,7 @@ RE "scan" threshold:
assert StringHelper.startsWith(nextCellTerm, cellTerm);
scanCell = grid.getNode(nextCellTerm.bytes, nextCellTerm.offset, nextCellTerm.length, scanCell);
if (scanCell.getLevel() == cell.getLevel() && scanCell.isLeaf()) {
docsEnum = termsEnum.docs(acceptDocs, docsEnum, 0);
docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
addDocs(docsEnum,bits);
//increment pointer to avoid potential redundant addDocs() below
nextCellTerm = termsEnum.next();
@ -163,7 +163,7 @@ RE "scan" threshold:
if(queryShape.relate(cShape) == SpatialRelation.DISJOINT)
continue;
docsEnum = termsEnum.docs(acceptDocs, docsEnum, 0);
docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
addDocs(docsEnum,bits);
}
}//term loop

View File

@ -70,7 +70,7 @@ public abstract class ShapeFieldCacheProvider<T extends Shape> {
while (term != null) {
T shape = readShape(term);
if( shape != null ) {
docs = te.docs(null, docs, 0);
docs = te.docs(null, docs, DocsEnum.FLAG_NONE);
Integer docid = docs.nextDoc();
while (docid != DocIdSetIterator.NO_MORE_DOCS) {
idx.add( docid, shape );

View File

@ -667,7 +667,7 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase {
if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
prevDocsEnum = threadState.reuseDocsEnum;
}
threadState.reuseDocsEnum = termsEnum.docs(liveDocs, prevDocsEnum, doCheckFreqs ? DocsEnum.FLAG_FREQS : 0);
threadState.reuseDocsEnum = termsEnum.docs(liveDocs, prevDocsEnum, doCheckFreqs ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
docsEnum = threadState.reuseDocsEnum;
docsAndPositionsEnum = null;
}

View File

@ -389,7 +389,7 @@ public class LukeRequestHandler extends RequestHandlerBase
if (text == null) { // Ran off the end of the terms enum without finding any live docs with that field in them.
return null;
}
docsEnum = termsEnum.docs(reader.getLiveDocs(), docsEnum, 0);
docsEnum = termsEnum.docs(reader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
if (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
return reader.document(docsEnum.docID());
}

View File

@ -540,7 +540,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
for (String id : elevations.ids) {
term.copyChars(id);
if (seen.contains(id) == false && termsEnum.seekExact(term, false)) {
docsEnum = termsEnum.docs(liveDocs, docsEnum, 0);
docsEnum = termsEnum.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
if (docsEnum != null) {
int docId = docsEnum.nextDoc();
if (docId == DocIdSetIterator.NO_MORE_DOCS ) continue; // must have been deleted

View File

@ -755,7 +755,7 @@ public class SimpleFacets {
// TODO: specialize when base docset is a bitset or hash set (skipDocs)? or does it matter for this?
// TODO: do this per-segment for better efficiency (MultiDocsEnum just uses base class impl)
// TODO: would passing deleted docs lead to better efficiency over checking the fastForRandomSet?
docsEnum = termsEnum.docs(null, docsEnum, 0);
docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
c=0;
if (docsEnum instanceof MultiDocsEnum) {

View File

@ -340,7 +340,7 @@ class JoinQuery extends Query {
if (freq < minDocFreqFrom) {
fromTermDirectCount++;
// OK to skip liveDocs, since we check for intersection with docs matching query
fromDeState.docsEnum = fromDeState.termsEnum.docs(null, fromDeState.docsEnum, 0);
fromDeState.docsEnum = fromDeState.termsEnum.docs(null, fromDeState.docsEnum, DocsEnum.FLAG_NONE);
DocsEnum docsEnum = fromDeState.docsEnum;
if (docsEnum instanceof MultiDocsEnum) {
@ -405,7 +405,7 @@ class JoinQuery extends Query {
toTermDirectCount++;
// need to use liveDocs here so we don't map to any deleted ones
toDeState.docsEnum = toDeState.termsEnum.docs(toDeState.liveDocs, toDeState.docsEnum, 0);
toDeState.docsEnum = toDeState.termsEnum.docs(toDeState.liveDocs, toDeState.docsEnum, DocsEnum.FLAG_NONE);
DocsEnum docsEnum = toDeState.docsEnum;
if (docsEnum instanceof MultiDocsEnum) {

View File

@ -594,7 +594,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
if (!termsEnum.seekExact(termBytes, false)) {
return -1;
}
DocsEnum docs = termsEnum.docs(atomicReader.getLiveDocs(), null, 0);
DocsEnum docs = termsEnum.docs(atomicReader.getLiveDocs(), null, DocsEnum.FLAG_NONE);
if (docs == null) return -1;
int id = docs.nextDoc();
return id == DocIdSetIterator.NO_MORE_DOCS ? -1 : id;
@ -616,7 +616,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
TermsEnum te = terms.iterator(null);
if (te.seekExact(idBytes, true)) {
DocsEnum docs = te.docs(reader.getLiveDocs(), null, 0);
DocsEnum docs = te.docs(reader.getLiveDocs(), null, DocsEnum.FLAG_NONE);
int id = docs.nextDoc();
if (id == DocIdSetIterator.NO_MORE_DOCS) continue;
assert docs.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
@ -920,7 +920,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
int bitsSet = 0;
OpenBitSet obs = null;
DocsEnum docsEnum = deState.termsEnum.docs(deState.liveDocs, deState.docsEnum, 0);
DocsEnum docsEnum = deState.termsEnum.docs(deState.liveDocs, deState.docsEnum, DocsEnum.FLAG_NONE);
if (deState.docsEnum == null) {
deState.docsEnum = docsEnum;
}
@ -998,7 +998,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(termBytes, false)) {
docsEnum = termsEnum.docs(liveDocs, null, 0);
docsEnum = termsEnum.docs(liveDocs, null, DocsEnum.FLAG_NONE);
}
}

View File

@ -305,7 +305,7 @@ public class FileFloatSource extends ValueSource {
continue;
}
docsEnum = termsEnum.docs(null, docsEnum, 0);
docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
int doc;
while ((doc = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
vals[doc] = fval;

View File

@ -151,7 +151,7 @@ public class SolrIndexSplitter {
// TODO: hook in custom hashes (or store hashes)
int hash = Hash.murmurhash3_x86_32(term.bytes, term.offset, term.length, 0);
docsEnum = termsEnum.docs(liveDocs, docsEnum, 0x0);
docsEnum = termsEnum.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
for (;;) {
int doc = docsEnum.nextDoc();
if (doc == DocsEnum.NO_MORE_DOCS) break;

View File

@ -134,7 +134,7 @@ public class TestRTGBase extends SolrTestCaseJ4 {
if (!termsEnum.seekExact(termBytes, false)) {
return -1;
}
DocsEnum docs = termsEnum.docs(MultiFields.getLiveDocs(r), null, 0);
DocsEnum docs = termsEnum.docs(MultiFields.getLiveDocs(r), null, DocsEnum.FLAG_NONE);
int id = docs.nextDoc();
if (id != DocIdSetIterator.NO_MORE_DOCS) {
int next = docs.nextDoc();