mirror of https://github.com/apache/lucene.git
LUCENE-3850: Fix rawtypes warnings for Java 7 compiler
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1297048 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e5f216f3dd
commit
989530e17e
|
@ -50,6 +50,7 @@ import org.apache.lucene.search.highlight.SynonymTokenizer.TestHighlightRunner;
|
|||
import org.apache.lucene.search.spans.*;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.automaton.BasicAutomata;
|
||||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
import org.apache.lucene.util.automaton.RegExp;
|
||||
|
@ -1969,16 +1970,16 @@ final class SynonymTokenizer extends TokenStream {
|
|||
|
||||
String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
|
||||
fragmentSeparator);
|
||||
if (HighlighterTest.VERBOSE) System.out.println("\t" + result);
|
||||
if (LuceneTestCase.VERBOSE) System.out.println("\t" + result);
|
||||
}
|
||||
}
|
||||
|
||||
abstract void run() throws Exception;
|
||||
|
||||
void start() throws Exception {
|
||||
if (HighlighterTest.VERBOSE) System.out.println("Run QueryScorer");
|
||||
if (LuceneTestCase.VERBOSE) System.out.println("Run QueryScorer");
|
||||
run();
|
||||
if (HighlighterTest.VERBOSE) System.out.println("Run QueryTermScorer");
|
||||
if (LuceneTestCase.VERBOSE) System.out.println("Run QueryTermScorer");
|
||||
mode = QUERY_TERM;
|
||||
run();
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.sandbox.queries;
|
|||
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -94,7 +95,7 @@ public class DuplicateFilter extends Filter {
|
|||
} else {
|
||||
docs = termsEnum.docs(acceptDocs, docs, false);
|
||||
int doc = docs.nextDoc();
|
||||
if (doc != DocsEnum.NO_MORE_DOCS) {
|
||||
if (doc != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {
|
||||
bits.set(doc);
|
||||
} else {
|
||||
|
@ -102,7 +103,7 @@ public class DuplicateFilter extends Filter {
|
|||
while (true) {
|
||||
lastDoc = doc;
|
||||
doc = docs.nextDoc();
|
||||
if (doc == DocsEnum.NO_MORE_DOCS) {
|
||||
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -134,7 +135,7 @@ public class DuplicateFilter extends Filter {
|
|||
// unset potential duplicates
|
||||
docs = termsEnum.docs(acceptDocs, docs, false);
|
||||
int doc = docs.nextDoc();
|
||||
if (doc != DocsEnum.NO_MORE_DOCS) {
|
||||
if (doc != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {
|
||||
doc = docs.nextDoc();
|
||||
}
|
||||
|
@ -145,7 +146,7 @@ public class DuplicateFilter extends Filter {
|
|||
lastDoc = doc;
|
||||
bits.clear(lastDoc);
|
||||
doc = docs.nextDoc();
|
||||
if (doc == DocsEnum.NO_MORE_DOCS) {
|
||||
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -142,7 +143,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
|
|||
false);
|
||||
|
||||
int lastDoc = 0;
|
||||
while (td.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
lastDoc = td.docID();
|
||||
}
|
||||
assertEquals("Duplicate urls should return last doc", lastDoc, hit.doc);
|
||||
|
|
|
@ -488,7 +488,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
|
|||
|
||||
private Frame[] stack;
|
||||
|
||||
@SuppressWarnings("unchecked") private FST.Arc<BytesRef>[] arcs = new FST.Arc[5];
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) private FST.Arc<BytesRef>[] arcs = new FST.Arc[5];
|
||||
|
||||
private final RunAutomaton runAutomaton;
|
||||
private final CompiledAutomaton compiledAutomaton;
|
||||
|
@ -821,7 +821,8 @@ public class BlockTreeTermsReader extends FieldsProducer {
|
|||
|
||||
private FST.Arc<BytesRef> getArc(int ord) {
|
||||
if (ord >= arcs.length) {
|
||||
@SuppressWarnings("unchecked") final FST.Arc<BytesRef>[] next = new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final FST.Arc<BytesRef>[] next =
|
||||
new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
System.arraycopy(arcs, 0, next, 0, arcs.length);
|
||||
for(int arcOrd=arcs.length;arcOrd<next.length;arcOrd++) {
|
||||
next[arcOrd] = new FST.Arc<BytesRef>();
|
||||
|
@ -1198,7 +1199,8 @@ public class BlockTreeTermsReader extends FieldsProducer {
|
|||
final BytesRef term = new BytesRef();
|
||||
private final FST.BytesReader fstReader;
|
||||
|
||||
@SuppressWarnings("unchecked") private FST.Arc<BytesRef>[] arcs = new FST.Arc[1];
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) private FST.Arc<BytesRef>[] arcs =
|
||||
new FST.Arc[1];
|
||||
|
||||
public SegmentTermsEnum() throws IOException {
|
||||
//if (DEBUG) System.out.println("BTTR.init seg=" + segment);
|
||||
|
@ -1354,7 +1356,8 @@ public class BlockTreeTermsReader extends FieldsProducer {
|
|||
|
||||
private FST.Arc<BytesRef> getArc(int ord) {
|
||||
if (ord >= arcs.length) {
|
||||
@SuppressWarnings("unchecked") final FST.Arc<BytesRef>[] next = new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final FST.Arc<BytesRef>[] next =
|
||||
new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
System.arraycopy(arcs, 0, next, 0, arcs.length);
|
||||
for(int arcOrd=arcs.length;arcOrd<next.length;arcOrd++) {
|
||||
next[arcOrd] = new FST.Arc<BytesRef>();
|
||||
|
|
|
@ -639,7 +639,6 @@ public class BlockTreeTermsWriter extends FieldsConsumer {
|
|||
}
|
||||
|
||||
// for debugging
|
||||
@SuppressWarnings("unused")
|
||||
private String toString(BytesRef b) {
|
||||
try {
|
||||
return b.utf8ToString() + " " + b;
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.index.FieldsEnum;
|
|||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.DataInput;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -236,7 +237,7 @@ public abstract class TermVectorsWriter implements Closeable {
|
|||
|
||||
if (docsAndPositionsEnum != null) {
|
||||
final int docID = docsAndPositionsEnum.nextDoc();
|
||||
assert docID != DocsEnum.NO_MORE_DOCS;
|
||||
assert docID != DocIdSetIterator.NO_MORE_DOCS;
|
||||
assert docsAndPositionsEnum.freq() == freq;
|
||||
|
||||
for(int posUpto=0; posUpto<freq; posUpto++) {
|
||||
|
|
|
@ -401,7 +401,7 @@ class BufferedDeletesStream {
|
|||
while (true) {
|
||||
final int docID = docsEnum.nextDoc();
|
||||
//System.out.println(Thread.currentThread().getName() + " del term=" + term + " doc=" + docID);
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
// NOTE: there is no limit check on the docID
|
||||
|
|
|
@ -925,7 +925,7 @@ public class CheckIndex {
|
|||
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
|
||||
postings = termsEnum.docsAndPositions(liveDocs, postings, false);
|
||||
final int docID = postings.advance(skipDocID);
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
} else {
|
||||
if (docID < skipDocID) {
|
||||
|
@ -948,7 +948,7 @@ public class CheckIndex {
|
|||
}
|
||||
|
||||
final int nextDocID = postings.nextDoc();
|
||||
if (nextDocID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (nextDocID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
if (nextDocID <= docID) {
|
||||
|
@ -961,14 +961,14 @@ public class CheckIndex {
|
|||
final int skipDocID = (int) (((idx+1)*(long) maxDoc)/8);
|
||||
docs = termsEnum.docs(liveDocs, docs, false);
|
||||
final int docID = docs.advance(skipDocID);
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
} else {
|
||||
if (docID < skipDocID) {
|
||||
throw new RuntimeException("term " + term + ": advance(docID=" + skipDocID + ") returned docID=" + docID);
|
||||
}
|
||||
final int nextDocID = docs.nextDoc();
|
||||
if (nextDocID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (nextDocID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
if (nextDocID <= docID) {
|
||||
|
@ -1067,7 +1067,7 @@ public class CheckIndex {
|
|||
throw new RuntimeException("null DocsEnum from to existing term " + seekTerms[i]);
|
||||
}
|
||||
|
||||
while(docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
totDocCount++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ class ConjunctionTermScorer extends Scorer {
|
|||
|
||||
private int doNext(int doc) throws IOException {
|
||||
do {
|
||||
if (lead.doc == DocsEnum.NO_MORE_DOCS) {
|
||||
if (lead.doc == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
return NO_MORE_DOCS;
|
||||
}
|
||||
advanceHead: do {
|
||||
|
|
|
@ -76,7 +76,7 @@ final class ExactPhraseScorer extends Scorer {
|
|||
// freq of rarest 2 terms is close:
|
||||
final boolean useAdvance = postings[i].docFreq > 5*postings[0].docFreq;
|
||||
chunkStates[i] = new ChunkState(postings[i].postings, -postings[i].position, useAdvance);
|
||||
if (i > 0 && postings[i].postings.nextDoc() == DocsEnum.NO_MORE_DOCS) {
|
||||
if (i > 0 && postings[i].postings.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
noDocs = true;
|
||||
return;
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ final class ExactPhraseScorer extends Scorer {
|
|||
|
||||
// first (rarest) term
|
||||
final int doc = chunkStates[0].posEnum.nextDoc();
|
||||
if (doc == DocsEnum.NO_MORE_DOCS) {
|
||||
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
docID = doc;
|
||||
return doc;
|
||||
}
|
||||
|
@ -140,8 +140,8 @@ final class ExactPhraseScorer extends Scorer {
|
|||
|
||||
// first term
|
||||
int doc = chunkStates[0].posEnum.advance(target);
|
||||
if (doc == DocsEnum.NO_MORE_DOCS) {
|
||||
docID = DocsEnum.NO_MORE_DOCS;
|
||||
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
docID = DocIdSetIterator.NO_MORE_DOCS;
|
||||
return doc;
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ final class ExactPhraseScorer extends Scorer {
|
|||
}
|
||||
|
||||
doc = chunkStates[0].posEnum.nextDoc();
|
||||
if (doc == DocsEnum.NO_MORE_DOCS) {
|
||||
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
docID = doc;
|
||||
return doc;
|
||||
}
|
||||
|
|
|
@ -367,7 +367,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
docs = termsEnum.docs(null, docs, false);
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
retArray[docID] = termval;
|
||||
|
@ -440,7 +440,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
docs = termsEnum.docs(null, docs, false);
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
retArray[docID] = termval;
|
||||
|
@ -544,7 +544,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
docs = termsEnum.docs(null, docs, false);
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
retArray[docID] = termval;
|
||||
|
@ -612,7 +612,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
// TODO: use bulk API
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
res.set(docID);
|
||||
|
@ -694,7 +694,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
docs = termsEnum.docs(null, docs, false);
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
retArray[docID] = termval;
|
||||
|
@ -782,7 +782,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
docs = termsEnum.docs(null, docs, false);
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
retArray[docID] = termval;
|
||||
|
@ -871,7 +871,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
docs = termsEnum.docs(null, docs, false);
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
retArray[docID] = termval;
|
||||
|
@ -1172,7 +1172,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
docs = termsEnum.docs(null, docs, false);
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
docToTermOrd.set(docID, termOrd);
|
||||
|
@ -1293,7 +1293,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
docs = termsEnum.docs(null, docs, false);
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
docToOffset.set(docID, pointer);
|
||||
|
|
|
@ -459,6 +459,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"unchecked","rawtypes"})
|
||||
public final boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (!(o instanceof FieldCacheRangeFilter)) return false;
|
||||
|
|
|
@ -150,7 +150,7 @@ public abstract class FieldComparator<T> {
|
|||
* comparator across segments
|
||||
* @throws IOException
|
||||
*/
|
||||
public abstract FieldComparator setNextReader(AtomicReaderContext context) throws IOException;
|
||||
public abstract FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException;
|
||||
|
||||
/** Sets the Scorer to use in case a document's score is
|
||||
* needed.
|
||||
|
@ -201,7 +201,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
if (missingValue != null) {
|
||||
docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);
|
||||
// optimization to remove unneeded checks on the bit interface:
|
||||
|
@ -258,7 +258,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Byte> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getBytes(context.reader(), field, parser, missingValue != null);
|
||||
|
@ -335,7 +335,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Double> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getDoubles(context.reader(), field, parser, missingValue != null);
|
||||
|
@ -396,7 +396,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Double> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
final DocValues docValues = context.reader().docValues(field);
|
||||
if (docValues != null) {
|
||||
currentReaderValues = docValues.getSource();
|
||||
|
@ -478,7 +478,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Float> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getFloats(context.reader(), field, parser, missingValue != null);
|
||||
|
@ -540,7 +540,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Short> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getShorts(context.reader(), field, parser, missingValue != null);
|
||||
|
@ -624,7 +624,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getInts(context.reader(), field, parser, missingValue != null);
|
||||
|
@ -689,7 +689,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Long> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
DocValues docValues = context.reader().docValues(field);
|
||||
if (docValues != null) {
|
||||
currentReaderValues = docValues.getSource();
|
||||
|
@ -772,7 +772,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Long> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getLongs(context.reader(), field, parser, missingValue != null);
|
||||
|
@ -824,7 +824,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) {
|
||||
public FieldComparator<Float> setNextReader(AtomicReaderContext context) {
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -887,7 +887,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) {
|
||||
public FieldComparator<Integer> setNextReader(AtomicReaderContext context) {
|
||||
// TODO: can we "map" our docIDs to the current
|
||||
// reader? saves having to then subtract on every
|
||||
// compare call
|
||||
|
@ -1007,7 +1007,7 @@ public abstract class FieldComparator<T> {
|
|||
abstract class PerSegmentComparator extends FieldComparator<BytesRef> {
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
return TermOrdValComparator.this.setNextReader(context);
|
||||
}
|
||||
|
||||
|
@ -1226,11 +1226,11 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
final int docBase = context.docBase;
|
||||
termsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
|
||||
final PackedInts.Reader docToOrd = termsIndex.getDocToOrd();
|
||||
FieldComparator perSegComp = null;
|
||||
FieldComparator<BytesRef> perSegComp = null;
|
||||
if (docToOrd.hasArray()) {
|
||||
final Object arr = docToOrd.getArray();
|
||||
if (arr instanceof byte[]) {
|
||||
|
@ -1397,7 +1397,7 @@ public abstract class FieldComparator<T> {
|
|||
abstract class PerSegmentComparator extends FieldComparator<BytesRef> {
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
return TermOrdValDocValuesComparator.this.setNextReader(context);
|
||||
}
|
||||
|
||||
|
@ -1625,7 +1625,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
final int docBase = context.docBase;
|
||||
|
||||
final DocValues dv = context.reader().docValues(field);
|
||||
|
@ -1646,7 +1646,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
comp = termsIndex.getComparator();
|
||||
|
||||
FieldComparator perSegComp = null;
|
||||
FieldComparator<BytesRef> perSegComp = null;
|
||||
if (termsIndex.hasPackedDocToOrd()) {
|
||||
final PackedInts.Reader docToOrd = termsIndex.getDocToOrd();
|
||||
if (docToOrd.hasArray()) {
|
||||
|
@ -1774,7 +1774,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
docTerms = FieldCache.DEFAULT.getTerms(context.reader(), field);
|
||||
return this;
|
||||
}
|
||||
|
@ -1843,7 +1843,7 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
final DocValues dv = context.reader().docValues(field);
|
||||
if (dv != null) {
|
||||
docTerms = dv.getSource();
|
||||
|
|
|
@ -36,6 +36,6 @@ public abstract class FieldComparatorSource {
|
|||
* @throws IOException
|
||||
* If an error occurs reading the index.
|
||||
*/
|
||||
public abstract FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed)
|
||||
public abstract FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed)
|
||||
throws IOException;
|
||||
}
|
||||
|
|
|
@ -129,6 +129,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
|
|||
}
|
||||
|
||||
// prevent instantiation and extension.
|
||||
@SuppressWarnings({"rawtypes","unchecked"})
|
||||
private FieldValueHitQueue(SortField[] fields, int size) {
|
||||
super(size);
|
||||
// When we get here, fields.length is guaranteed to be > 0, therefore no
|
||||
|
@ -169,7 +170,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
|
|||
}
|
||||
}
|
||||
|
||||
public FieldComparator[] getComparators() {
|
||||
public FieldComparator<?>[] getComparators() {
|
||||
return comparators;
|
||||
}
|
||||
|
||||
|
@ -177,15 +178,15 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
|
|||
return reverseMul;
|
||||
}
|
||||
|
||||
public void setComparator(int pos, FieldComparator comparator) {
|
||||
public void setComparator(int pos, FieldComparator<?> comparator) {
|
||||
if (pos==0) firstComparator = comparator;
|
||||
comparators[pos] = comparator;
|
||||
}
|
||||
|
||||
/** Stores the sort criteria being used. */
|
||||
protected final SortField[] fields;
|
||||
protected final FieldComparator[] comparators; // use setComparator to change this array
|
||||
protected FieldComparator firstComparator; // this must always be equal to comparators[0]
|
||||
protected final FieldComparator<?>[] comparators; // use setComparator to change this array
|
||||
protected FieldComparator<?> firstComparator; // this must always be equal to comparators[0]
|
||||
protected final int[] reverseMul;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -408,7 +408,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
|||
Iterator<DocsAndPositionsEnum> i = docsEnums.iterator();
|
||||
while (i.hasNext()) {
|
||||
DocsAndPositionsEnum postings = i.next();
|
||||
if (postings.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
|
||||
if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
add(postings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"unchecked","rawtypes"})
|
||||
public final boolean equals(final Object o) {
|
||||
if (o==this) return true;
|
||||
if (o==null) return false;
|
||||
|
|
|
@ -352,6 +352,7 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"unchecked","rawtypes"})
|
||||
public final boolean equals(final Object o) {
|
||||
if (o==this) return true;
|
||||
if (!super.equals(o))
|
||||
|
|
|
@ -376,7 +376,7 @@ public class SortField {
|
|||
* optimize themselves when they are the primary sort.
|
||||
* @return {@link FieldComparator} to use when sorting
|
||||
*/
|
||||
public FieldComparator getComparator(final int numHits, final int sortPos) throws IOException {
|
||||
public FieldComparator<?> getComparator(final int numHits, final int sortPos) throws IOException {
|
||||
|
||||
switch (type) {
|
||||
case SCORE:
|
||||
|
|
|
@ -116,10 +116,11 @@ public class TopDocs {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"rawtypes","unchecked"})
|
||||
private static class MergeSortQueue extends PriorityQueue<ShardRef> {
|
||||
// These are really FieldDoc instances:
|
||||
final ScoreDoc[][] shardHits;
|
||||
final FieldComparator[] comparators;
|
||||
final FieldComparator<?>[] comparators;
|
||||
final int[] reverseMul;
|
||||
|
||||
public MergeSortQueue(Sort sort, TopDocs[] shardHits) throws IOException {
|
||||
|
@ -155,7 +156,7 @@ public class TopDocs {
|
|||
}
|
||||
|
||||
// Returns true if first is < second
|
||||
@SuppressWarnings("unchecked")
|
||||
@SuppressWarnings({"unchecked","rawtypes"})
|
||||
public boolean lessThan(ShardRef first, ShardRef second) {
|
||||
assert first != second;
|
||||
final FieldDoc firstFD = (FieldDoc) shardHits[first.shardIndex][first.hitIndex];
|
||||
|
|
|
@ -46,7 +46,7 @@ public abstract class TopFieldCollector extends TopDocsCollector<Entry> {
|
|||
private static class OneComparatorNonScoringCollector extends
|
||||
TopFieldCollector {
|
||||
|
||||
FieldComparator comparator;
|
||||
FieldComparator<?> comparator;
|
||||
final int reverseMul;
|
||||
final FieldValueHitQueue<Entry> queue;
|
||||
|
||||
|
@ -382,7 +382,7 @@ public abstract class TopFieldCollector extends TopDocsCollector<Entry> {
|
|||
*/
|
||||
private static class MultiComparatorNonScoringCollector extends TopFieldCollector {
|
||||
|
||||
final FieldComparator[] comparators;
|
||||
final FieldComparator<?>[] comparators;
|
||||
final int[] reverseMul;
|
||||
final FieldValueHitQueue<Entry> queue;
|
||||
public MultiComparatorNonScoringCollector(FieldValueHitQueue<Entry> queue,
|
||||
|
|
|
@ -135,7 +135,8 @@ public class PayloadSpanUtil {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked") final List<Query>[] disjunctLists = new List[maxPosition + 1];
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final List<Query>[] disjunctLists =
|
||||
new List[maxPosition + 1];
|
||||
int distinctPositions = 0;
|
||||
|
||||
for (int i = 0; i < termArrays.size(); ++i) {
|
||||
|
|
|
@ -60,6 +60,7 @@ public class SpanMultiTermQueryWrapper<Q extends MultiTermQuery> extends SpanQue
|
|||
* Be sure to not change the rewrite method on the wrapped query afterwards! Doing so will
|
||||
* throw {@link UnsupportedOperationException} on rewriting this query!
|
||||
*/
|
||||
@SuppressWarnings({"rawtypes","unchecked"})
|
||||
public SpanMultiTermQueryWrapper(Q query) {
|
||||
this.query = query;
|
||||
|
||||
|
@ -123,6 +124,7 @@ public class SpanMultiTermQueryWrapper<Q extends MultiTermQuery> extends SpanQue
|
|||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"rawtypes","unchecked"})
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) return true;
|
||||
if (obj == null) return false;
|
||||
|
|
|
@ -56,7 +56,7 @@ public class TermSpans extends Spans {
|
|||
return false;
|
||||
}
|
||||
doc = postings.nextDoc();
|
||||
if (doc == DocsAndPositionsEnum.NO_MORE_DOCS) {
|
||||
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
return false;
|
||||
}
|
||||
freq = postings.freq();
|
||||
|
@ -70,7 +70,7 @@ public class TermSpans extends Spans {
|
|||
@Override
|
||||
public boolean skipTo(int target) throws IOException {
|
||||
doc = postings.advance(target);
|
||||
if (doc == DocsAndPositionsEnum.NO_MORE_DOCS) {
|
||||
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -430,7 +430,7 @@ public class Automaton implements Cloneable {
|
|||
}
|
||||
}
|
||||
// map<state, set<state>>
|
||||
@SuppressWarnings("unchecked") Set<State> map[] = new Set[states.length];
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) Set<State> map[] = new Set[states.length];
|
||||
for (int i = 0; i < map.length; i++)
|
||||
map[i] = new HashSet<State>();
|
||||
for (State s : states) {
|
||||
|
|
|
@ -74,11 +74,11 @@ final public class MinimizationOperations {
|
|||
final int[] sigma = a.getStartPoints();
|
||||
final State[] states = a.getNumberedStates();
|
||||
final int sigmaLen = sigma.length, statesLen = states.length;
|
||||
@SuppressWarnings("unchecked") final ArrayList<State>[][] reverse =
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final ArrayList<State>[][] reverse =
|
||||
(ArrayList<State>[][]) new ArrayList[statesLen][sigmaLen];
|
||||
@SuppressWarnings("unchecked") final HashSet<State>[] partition =
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final HashSet<State>[] partition =
|
||||
(HashSet<State>[]) new HashSet[statesLen];
|
||||
@SuppressWarnings("unchecked") final ArrayList<State>[] splitblock =
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final ArrayList<State>[] splitblock =
|
||||
(ArrayList<State>[]) new ArrayList[statesLen];
|
||||
final int[] block = new int[statesLen];
|
||||
final StateList[][] active = new StateList[statesLen][sigmaLen];
|
||||
|
|
|
@ -144,7 +144,8 @@ public class Builder<T> {
|
|||
}
|
||||
NO_OUTPUT = outputs.getNoOutput();
|
||||
|
||||
@SuppressWarnings("unchecked") final UnCompiledNode<T>[] f = (UnCompiledNode<T>[]) new UnCompiledNode[10];
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final UnCompiledNode<T>[] f =
|
||||
(UnCompiledNode<T>[]) new UnCompiledNode[10];
|
||||
frontier = f;
|
||||
for(int idx=0;idx<frontier.length;idx++) {
|
||||
frontier[idx] = new UnCompiledNode<T>(this, idx);
|
||||
|
@ -239,7 +240,8 @@ public class Builder<T> {
|
|||
if (node.inputCount < minSuffixCount2 || (minSuffixCount2 == 1 && node.inputCount == 1 && idx > 1)) {
|
||||
// drop all arcs
|
||||
for(int arcIdx=0;arcIdx<node.numArcs;arcIdx++) {
|
||||
@SuppressWarnings("unchecked") final UnCompiledNode<T> target = (UnCompiledNode<T>) node.arcs[arcIdx].target;
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final UnCompiledNode<T> target =
|
||||
(UnCompiledNode<T>) node.arcs[arcIdx].target;
|
||||
target.clear();
|
||||
}
|
||||
node.numArcs = 0;
|
||||
|
@ -356,7 +358,7 @@ public class Builder<T> {
|
|||
final int prefixLenPlus1 = pos1+1;
|
||||
|
||||
if (frontier.length < input.length+1) {
|
||||
@SuppressWarnings("unchecked") final UnCompiledNode<T>[] next =
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final UnCompiledNode<T>[] next =
|
||||
new UnCompiledNode[ArrayUtil.oversize(input.length+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
System.arraycopy(frontier, 0, next, 0, frontier.length);
|
||||
for(int idx=frontier.length;idx<next.length;idx++) {
|
||||
|
@ -458,7 +460,7 @@ public class Builder<T> {
|
|||
final Arc<T> arc = node.arcs[arcIdx];
|
||||
if (!arc.target.isCompiled()) {
|
||||
// not yet compiled
|
||||
@SuppressWarnings("unchecked") final UnCompiledNode<T> n = (UnCompiledNode<T>) arc.target;
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final UnCompiledNode<T> n = (UnCompiledNode<T>) arc.target;
|
||||
if (n.numArcs == 0) {
|
||||
//System.out.println("seg=" + segment + " FORCE final arc=" + (char) arc.label);
|
||||
arc.isFinal = n.isFinal = true;
|
||||
|
@ -512,7 +514,7 @@ public class Builder<T> {
|
|||
* LUCENE-2934 (node expansion based on conditions other than the
|
||||
* fanout size).
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
@SuppressWarnings({"rawtypes","unchecked"})
|
||||
public UnCompiledNode(Builder<T> owner, int depth) {
|
||||
this.owner = owner;
|
||||
arcs = (Arc<T>[]) new Arc[1];
|
||||
|
@ -545,7 +547,7 @@ public class Builder<T> {
|
|||
assert label >= 0;
|
||||
assert numArcs == 0 || label > arcs[numArcs-1].label: "arc[-1].label=" + arcs[numArcs-1].label + " new label=" + label + " numArcs=" + numArcs;
|
||||
if (numArcs == arcs.length) {
|
||||
@SuppressWarnings("unchecked") final Arc<T>[] newArcs =
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final Arc<T>[] newArcs =
|
||||
new Arc[ArrayUtil.oversize(numArcs+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
System.arraycopy(arcs, 0, newArcs, 0, arcs.length);
|
||||
for(int arcIdx=numArcs;arcIdx<newArcs.length;arcIdx++) {
|
||||
|
|
|
@ -376,7 +376,7 @@ public final class FST<T> {
|
|||
}
|
||||
|
||||
// Caches first 128 labels
|
||||
@SuppressWarnings("unchecked")
|
||||
@SuppressWarnings({"rawtypes","unchecked"})
|
||||
private void cacheRootArcs() throws IOException {
|
||||
cachedRootArcs = (Arc<T>[]) new Arc[0x80];
|
||||
final Arc<T> arc = new Arc<T>();
|
||||
|
|
|
@ -30,9 +30,9 @@ import java.io.IOException;
|
|||
abstract class FSTEnum<T> {
|
||||
protected final FST<T> fst;
|
||||
|
||||
@SuppressWarnings("unchecked") protected FST.Arc<T>[] arcs = new FST.Arc[10];
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) protected FST.Arc<T>[] arcs = new FST.Arc[10];
|
||||
// outputs are cumulative
|
||||
@SuppressWarnings("unchecked") protected T[] output = (T[]) new Object[10];
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) protected T[] output = (T[]) new Object[10];
|
||||
|
||||
protected final T NO_OUTPUT;
|
||||
protected final FST.Arc<T> scratchArc = new FST.Arc<T>();
|
||||
|
@ -462,13 +462,13 @@ abstract class FSTEnum<T> {
|
|||
upto++;
|
||||
grow();
|
||||
if (arcs.length <= upto) {
|
||||
@SuppressWarnings("unchecked") final FST.Arc<T>[] newArcs =
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final FST.Arc<T>[] newArcs =
|
||||
new FST.Arc[ArrayUtil.oversize(1+upto, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
System.arraycopy(arcs, 0, newArcs, 0, arcs.length);
|
||||
arcs = newArcs;
|
||||
}
|
||||
if (output.length <= upto) {
|
||||
@SuppressWarnings("unchecked") final T[] newOutput =
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final T[] newOutput =
|
||||
(T[]) new Object[ArrayUtil.oversize(1+upto, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
System.arraycopy(output, 0, newOutput, 0, output.length);
|
||||
output = newOutput;
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
|
@ -76,7 +77,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
|
|||
"preanalyzed",
|
||||
new BytesRef("term1"),
|
||||
false);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, termPositions.freq());
|
||||
assertEquals(0, termPositions.nextPosition());
|
||||
|
||||
|
@ -85,7 +86,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
|
|||
"preanalyzed",
|
||||
new BytesRef("term2"),
|
||||
false);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(2, termPositions.freq());
|
||||
assertEquals(1, termPositions.nextPosition());
|
||||
assertEquals(3, termPositions.nextPosition());
|
||||
|
@ -95,7 +96,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
|
|||
"preanalyzed",
|
||||
new BytesRef("term3"),
|
||||
false);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, termPositions.freq());
|
||||
assertEquals(2, termPositions.nextPosition());
|
||||
reader.close();
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.lucene.index.Terms;
|
|||
import org.apache.lucene.index.TermsEnum.SeekStatus;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.TieredMergePolicy;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
|
@ -141,10 +142,10 @@ public class TestAppendingCodec extends LuceneTestCase {
|
|||
assertEquals(SeekStatus.FOUND, te.seekCeil(new BytesRef("dog")));
|
||||
assertEquals(SeekStatus.FOUND, te.seekCeil(new BytesRef("the")));
|
||||
DocsEnum de = te.docs(null, null, true);
|
||||
assertTrue(de.advance(0) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(de.advance(0) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(2, de.freq());
|
||||
assertTrue(de.advance(1) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(de.advance(2) == DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(de.advance(1) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(de.advance(2) == DocIdSetIterator.NO_MORE_DOCS);
|
||||
reader.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
NUM_TEST_ITER = atLeast(20);
|
||||
}
|
||||
|
||||
class FieldData implements Comparable {
|
||||
class FieldData implements Comparable<FieldData> {
|
||||
final FieldInfo fieldInfo;
|
||||
final TermData[] terms;
|
||||
final boolean omitTF;
|
||||
|
@ -102,8 +102,8 @@ public class TestCodecs extends LuceneTestCase {
|
|||
Arrays.sort(terms);
|
||||
}
|
||||
|
||||
public int compareTo(final Object other) {
|
||||
return fieldInfo.name.compareTo(((FieldData) other).fieldInfo.name);
|
||||
public int compareTo(final FieldData other) {
|
||||
return fieldInfo.name.compareTo(other.fieldInfo.name);
|
||||
}
|
||||
|
||||
public void write(final FieldsConsumer consumer) throws Throwable {
|
||||
|
@ -133,7 +133,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
class TermData implements Comparable {
|
||||
class TermData implements Comparable<TermData> {
|
||||
String text2;
|
||||
final BytesRef text;
|
||||
int[] docs;
|
||||
|
@ -147,8 +147,8 @@ public class TestCodecs extends LuceneTestCase {
|
|||
this.positions = positions;
|
||||
}
|
||||
|
||||
public int compareTo(final Object o) {
|
||||
return text.compareTo(((TermData) o).text);
|
||||
public int compareTo(final TermData o) {
|
||||
return text.compareTo(o.text);
|
||||
}
|
||||
|
||||
public long write(final TermsConsumer termsConsumer) throws Throwable {
|
||||
|
@ -281,7 +281,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
for(int iter=0;iter<2;iter++) {
|
||||
docsEnum = _TestUtil.docs(random, termsEnum, null, docsEnum, false);
|
||||
assertEquals(terms[i].docs[0], docsEnum.nextDoc());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, docsEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
|
||||
}
|
||||
}
|
||||
assertNull(termsEnum.next());
|
||||
|
@ -439,7 +439,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
assertEquals(positions[i].pos, pos);
|
||||
if (positions[i].payload != null) {
|
||||
assertTrue(posEnum.hasPayload());
|
||||
if (TestCodecs.random.nextInt(3) < 2) {
|
||||
if (LuceneTestCase.random.nextInt(3) < 2) {
|
||||
// Verify the payload bytes
|
||||
final BytesRef otherPayload = posEnum.getPayload();
|
||||
assertTrue("expected=" + positions[i].payload.toString() + " got=" + otherPayload.toString(), positions[i].payload.equals(otherPayload));
|
||||
|
@ -453,7 +453,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
public void _run() throws Throwable {
|
||||
|
||||
for(int iter=0;iter<NUM_TEST_ITER;iter++) {
|
||||
final FieldData field = fields[TestCodecs.random.nextInt(fields.length)];
|
||||
final FieldData field = fields[LuceneTestCase.random.nextInt(fields.length)];
|
||||
final TermsEnum termsEnum = termsDict.terms(field.fieldInfo.name).iterator(null);
|
||||
if (si.getCodec() instanceof Lucene3xCodec) {
|
||||
// code below expects unicode sort order
|
||||
|
@ -473,7 +473,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
assertEquals(upto, field.terms.length);
|
||||
|
||||
// Test random seek:
|
||||
TermData term = field.terms[TestCodecs.random.nextInt(field.terms.length)];
|
||||
TermData term = field.terms[LuceneTestCase.random.nextInt(field.terms.length)];
|
||||
TermsEnum.SeekStatus status = termsEnum.seekCeil(new BytesRef(term.text2));
|
||||
assertEquals(status, TermsEnum.SeekStatus.FOUND);
|
||||
assertEquals(term.docs.length, termsEnum.docFreq());
|
||||
|
@ -484,7 +484,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// Test random seek by ord:
|
||||
final int idx = TestCodecs.random.nextInt(field.terms.length);
|
||||
final int idx = LuceneTestCase.random.nextInt(field.terms.length);
|
||||
term = field.terms[idx];
|
||||
boolean success = false;
|
||||
try {
|
||||
|
@ -547,7 +547,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
upto = 0;
|
||||
do {
|
||||
term = field.terms[upto];
|
||||
if (TestCodecs.random.nextInt(3) == 1) {
|
||||
if (LuceneTestCase.random.nextInt(3) == 1) {
|
||||
final DocsEnum docs;
|
||||
final DocsEnum docsAndFreqs;
|
||||
final DocsAndPositionsEnum postings;
|
||||
|
@ -569,10 +569,10 @@ public class TestCodecs extends LuceneTestCase {
|
|||
// Maybe skip:
|
||||
final int left = term.docs.length-upto2;
|
||||
int doc;
|
||||
if (TestCodecs.random.nextInt(3) == 1 && left >= 1) {
|
||||
final int inc = 1+TestCodecs.random.nextInt(left-1);
|
||||
if (LuceneTestCase.random.nextInt(3) == 1 && left >= 1) {
|
||||
final int inc = 1+LuceneTestCase.random.nextInt(left-1);
|
||||
upto2 += inc;
|
||||
if (TestCodecs.random.nextInt(2) == 1) {
|
||||
if (LuceneTestCase.random.nextInt(2) == 1) {
|
||||
doc = docs.advance(term.docs[upto2]);
|
||||
assertEquals(term.docs[upto2], doc);
|
||||
} else {
|
||||
|
@ -597,7 +597,7 @@ public class TestCodecs extends LuceneTestCase {
|
|||
assertEquals(term.docs[upto2], doc);
|
||||
if (!field.omitTF) {
|
||||
assertEquals(term.positions[upto2].length, postings.freq());
|
||||
if (TestCodecs.random.nextInt(2) == 1) {
|
||||
if (LuceneTestCase.random.nextInt(2) == 1) {
|
||||
this.verifyPositions(term.positions[upto2], postings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
|
||||
// This should blow up if we forget to check that the TermEnum is from the same
|
||||
// reader as the TermDocs.
|
||||
while (td.nextDoc() != td.NO_MORE_DOCS) ret += td.docID();
|
||||
while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) ret += td.docID();
|
||||
|
||||
// really a dummy assert to ensure that we got some docs and to ensure that
|
||||
// nothing is eliminated by hotspot
|
||||
|
|
|
@ -816,7 +816,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
assertEquals(1, docFreq);
|
||||
DocsEnum termDocsEnum = reader.termDocsEnum(null, term.field, term.bytes, false);
|
||||
int nextDoc = termDocsEnum.nextDoc();
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, termDocsEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, termDocsEnum.nextDoc());
|
||||
return nextDoc;
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
assertEquals(msg, 20, docsAndPosEnum.nextPosition());
|
||||
assertEquals(msg, 4, docsAndPosEnum.freq());
|
||||
assertEquals(msg, 30, docsAndPosEnum.nextPosition());
|
||||
} while (docsAndPosEnum.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS);
|
||||
} while (docsAndPosEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
|
@ -156,7 +156,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
// now run through the scorer and check if all positions are there...
|
||||
do {
|
||||
int docID = docsAndPosEnum.docID();
|
||||
if (docID == DocsAndPositionsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
Integer[] pos = positionsInDoc[atomicReaderContext.docBase + docID];
|
||||
|
@ -177,7 +177,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
.advance(docID + 1 + random.nextInt((maxDoc - docID)));
|
||||
}
|
||||
|
||||
} while (docsAndPosEnum.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS);
|
||||
} while (docsAndPosEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
int next = findNext(freqInDoc, context.docBase+j+1, context.docBase + maxDoc) - context.docBase;
|
||||
int advancedTo = docsEnum.advance(next);
|
||||
if (next >= maxDoc) {
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, advancedTo);
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, advancedTo);
|
||||
} else {
|
||||
assertTrue("advanced to: " +advancedTo + " but should be <= " + next, next >= advancedTo);
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
assertEquals("docBase: " + context.docBase + " maxDoc: " + maxDoc + " " + docsEnum.getClass(), DocsEnum.NO_MORE_DOCS, docsEnum.docID());
|
||||
assertEquals("docBase: " + context.docBase + " maxDoc: " + maxDoc + " " + docsEnum.getClass(), DocIdSetIterator.NO_MORE_DOCS, docsEnum.docID());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.document.FieldType;
|
|||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -129,7 +130,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
|
||||
DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
|
||||
"repeated", new BytesRef("repeated"), false);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
int freq = termPositions.freq();
|
||||
assertEquals(2, freq);
|
||||
assertEquals(0, termPositions.nextPosition());
|
||||
|
@ -200,7 +201,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
|
||||
DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"), false);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
int freq = termPositions.freq();
|
||||
assertEquals(3, freq);
|
||||
assertEquals(0, termPositions.nextPosition());
|
||||
|
@ -244,18 +245,18 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random));
|
||||
|
||||
DocsAndPositionsEnum termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term1"), false);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, termPositions.freq());
|
||||
assertEquals(0, termPositions.nextPosition());
|
||||
|
||||
termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term2"), false);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(2, termPositions.freq());
|
||||
assertEquals(1, termPositions.nextPosition());
|
||||
assertEquals(3, termPositions.nextPosition());
|
||||
|
||||
termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term3"), false);
|
||||
assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS);
|
||||
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, termPositions.freq());
|
||||
assertEquals(2, termPositions.nextPosition());
|
||||
reader.close();
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.lang.reflect.Modifier;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
@ -165,7 +166,7 @@ public class TestFilterAtomicReader extends LuceneTestCase {
|
|||
|
||||
DocsAndPositionsEnum positions = terms.docsAndPositions(MultiFields.getLiveDocs(reader),
|
||||
null, false);
|
||||
while (positions.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while (positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
assertTrue((positions.docID() % 2) == 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -969,14 +969,14 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
assertNotNull(termsEnum.next());
|
||||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, false);
|
||||
assertNotNull(dpEnum);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, dpEnum.freq());
|
||||
assertEquals(100, dpEnum.nextPosition());
|
||||
|
||||
assertNotNull(termsEnum.next());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
|
||||
assertNotNull(dpEnum);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, dpEnum.freq());
|
||||
assertEquals(101, dpEnum.nextPosition());
|
||||
assertNull(termsEnum.next());
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -57,7 +58,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
false);
|
||||
|
||||
if (td != null) {
|
||||
while (td.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while (td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
td.docID();
|
||||
count++;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
|
@ -217,7 +218,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
|
|||
null,
|
||||
false);
|
||||
int count = 0;
|
||||
while(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
count++;
|
||||
}
|
||||
assertTrue(count > 0);
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
|
@ -264,14 +265,14 @@ public class TestIndexableField extends LuceneTestCase {
|
|||
assertEquals(new BytesRef(""+counter), termsEnum.next());
|
||||
assertEquals(1, termsEnum.totalTermFreq());
|
||||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, false);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, dpEnum.freq());
|
||||
assertEquals(1, dpEnum.nextPosition());
|
||||
|
||||
assertEquals(new BytesRef("text"), termsEnum.next());
|
||||
assertEquals(1, termsEnum.totalTermFreq());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, dpEnum.freq());
|
||||
assertEquals(0, dpEnum.nextPosition());
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -176,7 +177,7 @@ public class TestLongPostings extends LuceneTestCase {
|
|||
final DocsAndPositionsEnum postings = MultiFields.getTermPositionsEnum(r, null, "field", new BytesRef(term), false);
|
||||
|
||||
int docID = -1;
|
||||
while(docID < DocsEnum.NO_MORE_DOCS) {
|
||||
while(docID < DocIdSetIterator.NO_MORE_DOCS) {
|
||||
final int what = random.nextInt(3);
|
||||
if (what == 0) {
|
||||
if (VERBOSE) {
|
||||
|
@ -199,7 +200,7 @@ public class TestLongPostings extends LuceneTestCase {
|
|||
System.out.println(" got docID=" + docID);
|
||||
}
|
||||
assertEquals(expected, docID);
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -241,7 +242,7 @@ public class TestLongPostings extends LuceneTestCase {
|
|||
System.out.println(" got docID=" + docID);
|
||||
}
|
||||
assertEquals(expected, docID);
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -380,7 +381,7 @@ public class TestLongPostings extends LuceneTestCase {
|
|||
assert docs != null;
|
||||
|
||||
int docID = -1;
|
||||
while(docID < DocsEnum.NO_MORE_DOCS) {
|
||||
while(docID < DocIdSetIterator.NO_MORE_DOCS) {
|
||||
final int what = random.nextInt(3);
|
||||
if (what == 0) {
|
||||
if (VERBOSE) {
|
||||
|
@ -403,7 +404,7 @@ public class TestLongPostings extends LuceneTestCase {
|
|||
System.out.println(" got docID=" + docID);
|
||||
}
|
||||
assertEquals(expected, docID);
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -439,7 +440,7 @@ public class TestLongPostings extends LuceneTestCase {
|
|||
System.out.println(" got docID=" + docID);
|
||||
}
|
||||
assertEquals(expected, docID);
|
||||
if (docID == DocsEnum.NO_MORE_DOCS) {
|
||||
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ package org.apache.lucene.index;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.util.*;
|
||||
import org.apache.lucene.document.*;
|
||||
|
@ -128,7 +129,7 @@ public class TestMultiFields extends LuceneTestCase {
|
|||
assertEquals(docID, docsEnum.nextDoc());
|
||||
}
|
||||
}
|
||||
assertEquals(docsEnum.NO_MORE_DOCS, docsEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
|
||||
}
|
||||
|
||||
reader.close();
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -79,9 +80,9 @@ public class TestParallelTermEnum extends LuceneTestCase {
|
|||
assertNotNull(b);
|
||||
assertEquals(t, b.utf8ToString());
|
||||
DocsEnum td = _TestUtil.docs(random, te, liveDocs, null, false);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(0, td.docID());
|
||||
assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS);
|
||||
assertEquals(td.nextDoc(), DocIdSetIterator.NO_MORE_DOCS);
|
||||
}
|
||||
assertNull(te.next());
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.Map;
|
|||
import java.util.Random;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
@ -233,7 +234,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
|
|||
|
||||
public static int[] toArray(DocsEnum docsEnum) throws IOException {
|
||||
List<Integer> docs = new ArrayList<Integer>();
|
||||
while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
int docID = docsEnum.docID();
|
||||
docs.add(docID);
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
assertEquals(2, dp.nextPosition());
|
||||
assertEquals(9, dp.startOffset());
|
||||
assertEquals(17, dp.endOffset());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dp.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc());
|
||||
|
||||
dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("b"), true);
|
||||
assertNotNull(dp);
|
||||
|
@ -97,7 +97,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
assertEquals(1, dp.nextPosition());
|
||||
assertEquals(8, dp.startOffset());
|
||||
assertEquals(9, dp.endOffset());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dp.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc());
|
||||
|
||||
dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("c"), true);
|
||||
assertNotNull(dp);
|
||||
|
@ -106,7 +106,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
assertEquals(3, dp.nextPosition());
|
||||
assertEquals(19, dp.startOffset());
|
||||
assertEquals(50, dp.endOffset());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dp.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc());
|
||||
|
||||
r.close();
|
||||
dir.close();
|
||||
|
@ -156,7 +156,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
for (String term : terms) {
|
||||
DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term), true);
|
||||
int doc;
|
||||
while((doc = dp.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
|
||||
while((doc = dp.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
String storedNumbers = reader.document(doc).get("numbers");
|
||||
int freq = dp.freq();
|
||||
for (int i = 0; i < freq; i++) {
|
||||
|
@ -304,7 +304,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
assertNotNull(docs);
|
||||
int doc;
|
||||
//System.out.println(" doc/freq");
|
||||
while((doc = docs.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
|
||||
while((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
final List<Token> expected = actualTokens.get(term).get(docIDToID[doc]);
|
||||
//System.out.println(" doc=" + docIDToID[doc] + " docID=" + doc + " " + expected.size() + " freq");
|
||||
assertNotNull(expected);
|
||||
|
@ -314,7 +314,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
docsAndPositions = termsEnum.docsAndPositions(null, docsAndPositions, false);
|
||||
assertNotNull(docsAndPositions);
|
||||
//System.out.println(" doc/freq/pos");
|
||||
while((doc = docsAndPositions.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
|
||||
while((doc = docsAndPositions.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
final List<Token> expected = actualTokens.get(term).get(docIDToID[doc]);
|
||||
//System.out.println(" doc=" + docIDToID[doc] + " " + expected.size() + " freq");
|
||||
assertNotNull(expected);
|
||||
|
@ -329,7 +329,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
docsAndPositionsAndOffsets = termsEnum.docsAndPositions(null, docsAndPositions, true);
|
||||
assertNotNull(docsAndPositionsAndOffsets);
|
||||
//System.out.println(" doc/freq/pos/offs");
|
||||
while((doc = docsAndPositions.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
|
||||
while((doc = docsAndPositions.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
final List<Token> expected = actualTokens.get(term).get(docIDToID[doc]);
|
||||
//System.out.println(" doc=" + docIDToID[doc] + " " + expected.size() + " freq");
|
||||
assertNotNull(expected);
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
|
@ -105,7 +106,7 @@ public class TestSegmentMerger extends LuceneTestCase {
|
|||
null,
|
||||
false);
|
||||
assertTrue(termDocs != null);
|
||||
assertTrue(termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
int tvCount = 0;
|
||||
for(FieldInfo fieldInfo : mergedReader.getFieldInfos()) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -133,7 +134,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
false);
|
||||
assertTrue(termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
termDocs = _TestUtil.docs(random, reader,
|
||||
DocHelper.NO_NORMS_KEY,
|
||||
|
@ -142,7 +143,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
null,
|
||||
false);
|
||||
|
||||
assertTrue(termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
|
||||
DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader,
|
||||
|
@ -152,7 +153,7 @@ public class TestSegmentReader extends LuceneTestCase {
|
|||
false);
|
||||
// NOTE: prior rev of this test was failing to first
|
||||
// call next here:
|
||||
assertTrue(positions.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(positions.docID() == 0);
|
||||
assertTrue(positions.nextPosition() >= 0);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -63,7 +64,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
TermsEnum terms = reader.fields().terms(DocHelper.TEXT_FIELD_2_KEY).iterator(null);
|
||||
terms.seekCeil(new BytesRef("field"));
|
||||
DocsEnum termDocs = _TestUtil.docs(random, terms, reader.getLiveDocs(), null, true);
|
||||
if (termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
int docId = termDocs.docID();
|
||||
assertTrue(docId == 0);
|
||||
int freq = termDocs.freq();
|
||||
|
@ -142,19 +143,19 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
// without optimization (assumption skipInterval == 16)
|
||||
|
||||
// with next
|
||||
assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(0, tdocs.docID());
|
||||
assertEquals(4, tdocs.freq());
|
||||
assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, tdocs.docID());
|
||||
assertEquals(4, tdocs.freq());
|
||||
assertTrue(tdocs.advance(0) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(0) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(2, tdocs.docID());
|
||||
assertTrue(tdocs.advance(4) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(4) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(4, tdocs.docID());
|
||||
assertTrue(tdocs.advance(9) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(9) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(9, tdocs.docID());
|
||||
assertFalse(tdocs.advance(10) != DocsEnum.NO_MORE_DOCS);
|
||||
assertFalse(tdocs.advance(10) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
// without next
|
||||
tdocs = _TestUtil.docs(random, reader,
|
||||
|
@ -164,13 +165,13 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
null,
|
||||
false);
|
||||
|
||||
assertTrue(tdocs.advance(0) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(0) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(0, tdocs.docID());
|
||||
assertTrue(tdocs.advance(4) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(4) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(4, tdocs.docID());
|
||||
assertTrue(tdocs.advance(9) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(9) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(9, tdocs.docID());
|
||||
assertFalse(tdocs.advance(10) != DocsEnum.NO_MORE_DOCS);
|
||||
assertFalse(tdocs.advance(10) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
// exactly skipInterval documents and therefore with optimization
|
||||
|
||||
|
@ -182,21 +183,21 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
null,
|
||||
true);
|
||||
|
||||
assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(10, tdocs.docID());
|
||||
assertEquals(4, tdocs.freq());
|
||||
assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(11, tdocs.docID());
|
||||
assertEquals(4, tdocs.freq());
|
||||
assertTrue(tdocs.advance(5) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(12, tdocs.docID());
|
||||
assertTrue(tdocs.advance(15) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(15) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(15, tdocs.docID());
|
||||
assertTrue(tdocs.advance(24) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(24) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(24, tdocs.docID());
|
||||
assertTrue(tdocs.advance(25) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(25) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(25, tdocs.docID());
|
||||
assertFalse(tdocs.advance(26) != DocsEnum.NO_MORE_DOCS);
|
||||
assertFalse(tdocs.advance(26) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
// without next
|
||||
tdocs = _TestUtil.docs(random, reader,
|
||||
|
@ -206,15 +207,15 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
null,
|
||||
true);
|
||||
|
||||
assertTrue(tdocs.advance(5) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(10, tdocs.docID());
|
||||
assertTrue(tdocs.advance(15) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(15) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(15, tdocs.docID());
|
||||
assertTrue(tdocs.advance(24) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(24) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(24, tdocs.docID());
|
||||
assertTrue(tdocs.advance(25) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(25) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(25, tdocs.docID());
|
||||
assertFalse(tdocs.advance(26) != DocsEnum.NO_MORE_DOCS);
|
||||
assertFalse(tdocs.advance(26) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
// much more than skipInterval documents and therefore with optimization
|
||||
|
||||
|
@ -226,23 +227,23 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
null,
|
||||
true);
|
||||
|
||||
assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(26, tdocs.docID());
|
||||
assertEquals(4, tdocs.freq());
|
||||
assertTrue(tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(27, tdocs.docID());
|
||||
assertEquals(4, tdocs.freq());
|
||||
assertTrue(tdocs.advance(5) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(28, tdocs.docID());
|
||||
assertTrue(tdocs.advance(40) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(40) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(40, tdocs.docID());
|
||||
assertTrue(tdocs.advance(57) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(57) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(57, tdocs.docID());
|
||||
assertTrue(tdocs.advance(74) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(74) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(74, tdocs.docID());
|
||||
assertTrue(tdocs.advance(75) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(75) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(75, tdocs.docID());
|
||||
assertFalse(tdocs.advance(76) != DocsEnum.NO_MORE_DOCS);
|
||||
assertFalse(tdocs.advance(76) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
//without next
|
||||
tdocs = _TestUtil.docs(random, reader,
|
||||
|
@ -251,17 +252,17 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
false);
|
||||
assertTrue(tdocs.advance(5) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(26, tdocs.docID());
|
||||
assertTrue(tdocs.advance(40) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(40) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(40, tdocs.docID());
|
||||
assertTrue(tdocs.advance(57) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(57) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(57, tdocs.docID());
|
||||
assertTrue(tdocs.advance(74) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(74) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(74, tdocs.docID());
|
||||
assertTrue(tdocs.advance(75) != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tdocs.advance(75) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(75, tdocs.docID());
|
||||
assertFalse(tdocs.advance(76) != DocsEnum.NO_MORE_DOCS);
|
||||
assertFalse(tdocs.advance(76) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.util.List;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.util.*;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.document.*;
|
||||
|
||||
|
@ -117,14 +118,14 @@ public class TestStressAdvance extends LuceneTestCase {
|
|||
}
|
||||
if (upto == expected.size()) {
|
||||
if (VERBOSE) {
|
||||
System.out.println(" expect docID=" + DocsEnum.NO_MORE_DOCS + " actual=" + docID);
|
||||
System.out.println(" expect docID=" + DocIdSetIterator.NO_MORE_DOCS + " actual=" + docID);
|
||||
}
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, docID);
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docID);
|
||||
} else {
|
||||
if (VERBOSE) {
|
||||
System.out.println(" expect docID=" + expected.get(upto) + " actual=" + docID);
|
||||
}
|
||||
assertTrue(docID != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(docID != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(expected.get(upto).intValue(), docID);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.*;
|
||||
|
@ -336,7 +337,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
DocsEnum docs = null;
|
||||
while(termsEnum.next() != null) {
|
||||
docs = _TestUtil.docs(random, termsEnum, null, docs, false);
|
||||
while(docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
fail("r1 is not empty but r2 is");
|
||||
}
|
||||
}
|
||||
|
@ -362,18 +363,18 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
termDocs2 = null;
|
||||
}
|
||||
|
||||
if (termDocs1.nextDoc() == DocsEnum.NO_MORE_DOCS) {
|
||||
if (termDocs1.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
// This doc is deleted and wasn't replaced
|
||||
assertTrue(termDocs2 == null || termDocs2.nextDoc() == DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(termDocs2 == null || termDocs2.nextDoc() == DocIdSetIterator.NO_MORE_DOCS);
|
||||
continue;
|
||||
}
|
||||
|
||||
int id1 = termDocs1.docID();
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, termDocs1.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, termDocs1.nextDoc());
|
||||
|
||||
assertTrue(termDocs2.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(termDocs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
int id2 = termDocs2.docID();
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, termDocs2.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, termDocs2.nextDoc());
|
||||
|
||||
r2r1[id2] = id1;
|
||||
|
||||
|
@ -409,7 +410,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
|
||||
dpEnum = termsEnum3.docsAndPositions(null, dpEnum, false);
|
||||
if (dpEnum != null) {
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
final int freq = dpEnum.freq();
|
||||
System.out.println(" doc=" + dpEnum.docID() + " freq=" + freq);
|
||||
for(int posUpto=0;posUpto<freq;posUpto++) {
|
||||
|
@ -418,7 +419,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
} else {
|
||||
dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true);
|
||||
assertNotNull(dEnum);
|
||||
assertTrue(dEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
final int freq = dEnum.freq();
|
||||
System.out.println(" doc=" + dEnum.docID() + " freq=" + freq);
|
||||
}
|
||||
|
@ -443,7 +444,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
|
||||
dpEnum = termsEnum3.docsAndPositions(null, dpEnum, false);
|
||||
if (dpEnum != null) {
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
final int freq = dpEnum.freq();
|
||||
System.out.println(" doc=" + dpEnum.docID() + " freq=" + freq);
|
||||
for(int posUpto=0;posUpto<freq;posUpto++) {
|
||||
|
@ -452,7 +453,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
} else {
|
||||
dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true);
|
||||
assertNotNull(dEnum);
|
||||
assertTrue(dEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
final int freq = dEnum.freq();
|
||||
System.out.println(" doc=" + dEnum.docID() + " freq=" + freq);
|
||||
}
|
||||
|
@ -508,7 +509,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
|
||||
//System.out.println("TEST: term1=" + term1);
|
||||
docs1 = _TestUtil.docs(random, termsEnum1, liveDocs1, docs1, true);
|
||||
while (docs1.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while (docs1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
int d = docs1.docID();
|
||||
int f = docs1.freq();
|
||||
info1[len1] = (((long)d)<<32) | f;
|
||||
|
@ -542,7 +543,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
|
||||
//System.out.println("TEST: term1=" + term1);
|
||||
docs2 = _TestUtil.docs(random, termsEnum2, liveDocs2, docs2, true);
|
||||
while (docs2.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while (docs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
int d = r2r1[docs2.docID()];
|
||||
int f = docs2.freq();
|
||||
info2[len2] = (((long)d)<<32) | f;
|
||||
|
@ -640,7 +641,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
// docIDs are not supposed to be equal
|
||||
//int docID2 = dpEnum2.nextDoc();
|
||||
//assertEquals(docID1, docID2);
|
||||
assertTrue(docID1 != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(docID1 != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
int freq1 = dpEnum1.freq();
|
||||
int freq2 = dpEnum2.freq();
|
||||
|
@ -665,8 +666,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
offsetAtt2.endOffset());
|
||||
}
|
||||
}
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum1.nextDoc());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum2.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum1.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum2.nextDoc());
|
||||
} else {
|
||||
dEnum1 = _TestUtil.docs(random, termsEnum1, null, dEnum1, true);
|
||||
dEnum2 = _TestUtil.docs(random, termsEnum2, null, dEnum2, true);
|
||||
|
@ -677,12 +678,12 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
// docIDs are not supposed to be equal
|
||||
//int docID2 = dEnum2.nextDoc();
|
||||
//assertEquals(docID1, docID2);
|
||||
assertTrue(docID1 != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(docID1 != DocIdSetIterator.NO_MORE_DOCS);
|
||||
int freq1 = dEnum1.freq();
|
||||
int freq2 = dEnum2.freq();
|
||||
assertEquals(freq1, freq2);
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dEnum1.nextDoc());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dEnum2.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dEnum1.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dEnum2.nextDoc());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
assertNotNull(docsEnum);
|
||||
int doc = docsEnum.docID();
|
||||
assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
|
||||
}
|
||||
assertNull(termsEnum.next());
|
||||
|
@ -264,17 +264,17 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
assertNotNull(dpEnum);
|
||||
int doc = dpEnum.docID();
|
||||
assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(dpEnum.freq(), positions[i].length);
|
||||
for (int j = 0; j < positions[i].length; j++) {
|
||||
assertEquals(positions[i][j], dpEnum.nextPosition());
|
||||
}
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
|
||||
doc = dpEnum.docID();
|
||||
assertTrue(doc == -1 || doc == DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertNotNull(dpEnum);
|
||||
assertEquals(dpEnum.freq(), positions[i].length);
|
||||
for (int j = 0; j < positions[i].length; j++) {
|
||||
|
@ -282,7 +282,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
assertEquals(j*10, dpEnum.startOffset());
|
||||
assertEquals(j*10 + testTerms[i].length(), dpEnum.endOffset());
|
||||
}
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
}
|
||||
|
||||
Terms freqVector = reader.get(0).terms(testFields[1]); //no pos, no offset
|
||||
|
@ -316,15 +316,15 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
|
||||
assertNotNull(dpEnum);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(dpEnum.freq(), positions[i].length);
|
||||
for (int j = 0; j < positions[i].length; j++) {
|
||||
assertEquals(positions[i][j], dpEnum.nextPosition());
|
||||
}
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertNotNull(dpEnum);
|
||||
assertEquals(dpEnum.freq(), positions[i].length);
|
||||
for (int j = 0; j < positions[i].length; j++) {
|
||||
|
@ -332,7 +332,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
|
|||
assertEquals(j*10, dpEnum.startOffset());
|
||||
assertEquals(j*10 + testTerms[i].length(), dpEnum.endOffset());
|
||||
}
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
}
|
||||
reader.close();
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
@ -69,18 +70,18 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
assertEquals(1, termsEnum.totalTermFreq());
|
||||
|
||||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(8, dpEnum.startOffset());
|
||||
assertEquals(8, dpEnum.endOffset());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
|
||||
// Token "abcd" occurred three times
|
||||
assertEquals(new BytesRef("abcd"), termsEnum.next());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
|
||||
assertEquals(3, termsEnum.totalTermFreq());
|
||||
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(0, dpEnum.startOffset());
|
||||
assertEquals(4, dpEnum.endOffset());
|
||||
|
@ -93,7 +94,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
assertEquals(8, dpEnum.startOffset());
|
||||
assertEquals(12, dpEnum.endOffset());
|
||||
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertNull(termsEnum.next());
|
||||
r.close();
|
||||
dir.close();
|
||||
|
@ -120,7 +121,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
|
||||
assertEquals(2, termsEnum.totalTermFreq());
|
||||
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(0, dpEnum.startOffset());
|
||||
assertEquals(4, dpEnum.endOffset());
|
||||
|
@ -128,7 +129,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
dpEnum.nextPosition();
|
||||
assertEquals(5, dpEnum.startOffset());
|
||||
assertEquals(9, dpEnum.endOffset());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
|
||||
r.close();
|
||||
dir.close();
|
||||
|
@ -155,7 +156,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
|
||||
assertEquals(2, termsEnum.totalTermFreq());
|
||||
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(0, dpEnum.startOffset());
|
||||
assertEquals(4, dpEnum.endOffset());
|
||||
|
@ -163,7 +164,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
dpEnum.nextPosition();
|
||||
assertEquals(8, dpEnum.startOffset());
|
||||
assertEquals(12, dpEnum.endOffset());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
|
||||
r.close();
|
||||
dir.close();
|
||||
|
@ -194,7 +195,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
|
||||
assertEquals(2, termsEnum.totalTermFreq());
|
||||
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(0, dpEnum.startOffset());
|
||||
assertEquals(4, dpEnum.endOffset());
|
||||
|
@ -202,7 +203,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
dpEnum.nextPosition();
|
||||
assertEquals(8, dpEnum.startOffset());
|
||||
assertEquals(12, dpEnum.endOffset());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
|
||||
r.close();
|
||||
dir.close();
|
||||
|
@ -230,7 +231,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
|
||||
assertEquals(2, termsEnum.totalTermFreq());
|
||||
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(0, dpEnum.startOffset());
|
||||
assertEquals(4, dpEnum.endOffset());
|
||||
|
@ -238,7 +239,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
dpEnum.nextPosition();
|
||||
assertEquals(9, dpEnum.startOffset());
|
||||
assertEquals(13, dpEnum.endOffset());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
|
||||
|
||||
r.close();
|
||||
dir.close();
|
||||
|
@ -266,21 +267,21 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
assertNotNull(termsEnum.next());
|
||||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
|
||||
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(0, dpEnum.startOffset());
|
||||
assertEquals(4, dpEnum.endOffset());
|
||||
|
||||
assertNotNull(termsEnum.next());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(11, dpEnum.startOffset());
|
||||
assertEquals(17, dpEnum.endOffset());
|
||||
|
||||
assertNotNull(termsEnum.next());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(18, dpEnum.startOffset());
|
||||
assertEquals(21, dpEnum.endOffset());
|
||||
|
@ -312,14 +313,14 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
|
||||
|
||||
assertEquals(1, (int) termsEnum.totalTermFreq());
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(1, dpEnum.startOffset());
|
||||
assertEquals(7, dpEnum.endOffset());
|
||||
|
||||
assertNotNull(termsEnum.next());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(8, dpEnum.startOffset());
|
||||
assertEquals(11, dpEnum.endOffset());
|
||||
|
@ -355,14 +356,14 @@ public class TestTermVectorsWriter extends LuceneTestCase {
|
|||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, true);
|
||||
|
||||
assertEquals(1, (int) termsEnum.totalTermFreq());
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(0, dpEnum.startOffset());
|
||||
assertEquals(4, dpEnum.endOffset());
|
||||
|
||||
assertNotNull(termsEnum.next());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
dpEnum.nextPosition();
|
||||
assertEquals(6, dpEnum.startOffset());
|
||||
assertEquals(12, dpEnum.endOffset());
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -123,7 +124,7 @@ public class TestTermdocPerf extends LuceneTestCase {
|
|||
for (int i=0; i<iter; i++) {
|
||||
tenum.seekCeil(new BytesRef("val"));
|
||||
tdocs = _TestUtil.docs(random, tenum, MultiFields.getLiveDocs(reader), tdocs, false);
|
||||
while (tdocs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while (tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
ret += tdocs.docID();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -333,7 +334,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
assertEquals(1, te.docFreq());
|
||||
docsEnum = _TestUtil.docs(random, te, null, docsEnum, false);
|
||||
final int docID = docsEnum.nextDoc();
|
||||
assertTrue(docID != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(docID != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(docIDToID[docID], termToID.get(expected).intValue());
|
||||
do {
|
||||
loc++;
|
||||
|
|
|
@ -129,7 +129,7 @@ final class JustCompileSearch {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context)
|
||||
public FieldComparator<Object> setNextReader(AtomicReaderContext context)
|
||||
throws IOException {
|
||||
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ final class JustCompileSearch {
|
|||
static final class JustCompileFieldComparatorSource extends FieldComparatorSource {
|
||||
|
||||
@Override
|
||||
public FieldComparator newComparator(String fieldname, int numHits,
|
||||
public FieldComparator<?> newComparator(String fieldname, int numHits,
|
||||
int sortPos, boolean reversed) throws IOException {
|
||||
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ public class TestCustomSearcherSort extends LuceneTestCase {
|
|||
INDEX_SIZE = atLeast(2000);
|
||||
index = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random, index);
|
||||
RandomGen random = new RandomGen(this.random);
|
||||
RandomGen random = new RandomGen(LuceneTestCase.random);
|
||||
for (int i = 0; i < INDEX_SIZE; ++i) { // don't decrease; if to low the
|
||||
// problem doesn't show up
|
||||
Document doc = new Document();
|
||||
|
|
|
@ -139,7 +139,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
|
||||
public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
|
||||
return new FieldComparator<Integer>() {
|
||||
|
||||
FieldCache.DocTermsIndex idIndex;
|
||||
|
@ -179,7 +179,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
idIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), fieldname);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -218,7 +218,7 @@ public class TestPositionIncrement extends LuceneTestCase {
|
|||
false);
|
||||
|
||||
int count = 0;
|
||||
assertTrue(tp.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
// "a" occurs 4 times
|
||||
assertEquals(4, tp.freq());
|
||||
int expected = 0;
|
||||
|
@ -228,7 +228,7 @@ public class TestPositionIncrement extends LuceneTestCase {
|
|||
assertEquals(6, tp.nextPosition());
|
||||
|
||||
// only one doc has "a"
|
||||
assertEquals(DocsAndPositionsEnum.NO_MORE_DOCS, tp.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, tp.nextDoc());
|
||||
|
||||
IndexSearcher is = newSearcher(readerFromWriter);
|
||||
|
||||
|
|
|
@ -699,7 +699,7 @@ public class TestSort extends LuceneTestCase {
|
|||
};
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
docValues = FieldCache.DEFAULT.getInts(context.reader(), "parser", testIntParser, false);
|
||||
return this;
|
||||
}
|
||||
|
@ -712,7 +712,7 @@ public class TestSort extends LuceneTestCase {
|
|||
|
||||
static class MyFieldComparatorSource extends FieldComparatorSource {
|
||||
@Override
|
||||
public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
|
||||
public FieldComparator<Integer> newComparator(String fieldname, int numHits, int sortPos, boolean reversed) {
|
||||
return new MyFieldComparator(numHits);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ public class TestSubScorerFreqs extends LuceneTestCase {
|
|||
query.add(inner, Occur.MUST);
|
||||
query.add(aQuery, Occur.MUST);
|
||||
query.add(dQuery, Occur.MUST);
|
||||
Set<String>[] occurList = new Set[] {
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) Set<String>[] occurList = new Set[] {
|
||||
Collections.singleton(Occur.MUST.toString()),
|
||||
new HashSet<String>(Arrays.asList(Occur.MUST.toString(), Occur.SHOULD.toString()))
|
||||
};
|
||||
|
|
|
@ -135,19 +135,19 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
TermsEnum termsEnum = terms.iterator(null);
|
||||
assertEquals("content", termsEnum.next().utf8ToString());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, dpEnum.freq());
|
||||
assertEquals(expectedPositions[0], dpEnum.nextPosition());
|
||||
|
||||
assertEquals("here", termsEnum.next().utf8ToString());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, dpEnum.freq());
|
||||
assertEquals(expectedPositions[1], dpEnum.nextPosition());
|
||||
|
||||
assertEquals("some", termsEnum.next().utf8ToString());
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, dpEnum.freq());
|
||||
assertEquals(expectedPositions[2], dpEnum.nextPosition());
|
||||
|
||||
|
@ -178,7 +178,7 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
while(true) {
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, shouldBeOffVector);
|
||||
assertNotNull(dpEnum);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
||||
dpEnum.nextPosition();
|
||||
|
||||
|
@ -263,7 +263,7 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
String text = termsEnum.term().utf8ToString();
|
||||
docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(knownSearcher.reader), docs, true);
|
||||
|
||||
while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
int docId = docs.docID();
|
||||
int freq = docs.freq();
|
||||
//System.out.println("Doc Id: " + docId + " freq " + freq);
|
||||
|
@ -428,7 +428,7 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
assertEquals(5, termsEnum.totalTermFreq());
|
||||
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null, false);
|
||||
assertNotNull(dpEnum);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(5, dpEnum.freq());
|
||||
for(int i=0;i<5;i++) {
|
||||
assertEquals(i, dpEnum.nextPosition());
|
||||
|
@ -436,7 +436,7 @@ public class TestTermVectors extends LuceneTestCase {
|
|||
|
||||
dpEnum = termsEnum.docsAndPositions(null, dpEnum, true);
|
||||
assertNotNull(dpEnum);
|
||||
assertTrue(dpEnum.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(5, dpEnum.freq());
|
||||
for(int i=0;i<5;i++) {
|
||||
dpEnum.nextPosition();
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.index.AtomicReaderContext;
|
|||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.apache.lucene.util.TermContext;
|
||||
|
||||
|
@ -121,7 +122,7 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t
|
|||
@Override
|
||||
public int doc() {
|
||||
if (current == null) {
|
||||
return DocsEnum.NO_MORE_DOCS;
|
||||
return DocIdSetIterator.NO_MORE_DOCS;
|
||||
}
|
||||
return current.doc() + leaves[leafOrd].docBase;
|
||||
}
|
||||
|
@ -129,7 +130,7 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t
|
|||
@Override
|
||||
public int start() {
|
||||
if (current == null) {
|
||||
return DocsEnum.NO_MORE_DOCS;
|
||||
return DocIdSetIterator.NO_MORE_DOCS;
|
||||
}
|
||||
return current.start();
|
||||
}
|
||||
|
@ -137,7 +138,7 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t
|
|||
@Override
|
||||
public int end() {
|
||||
if (current == null) {
|
||||
return DocsEnum.NO_MORE_DOCS;
|
||||
return DocIdSetIterator.NO_MORE_DOCS;
|
||||
}
|
||||
return current.end();
|
||||
}
|
||||
|
|
|
@ -126,7 +126,7 @@ public class TestAttributeSource extends LuceneTestCase {
|
|||
src.addAttribute(TypeAttribute.class) instanceof TypeAttributeImpl);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@SuppressWarnings({"rawtypes","unchecked"})
|
||||
public void testInvalidArguments() throws Exception {
|
||||
try {
|
||||
AttributeSource src = new AttributeSource();
|
||||
|
|
|
@ -69,7 +69,7 @@ public class TestVirtualMethod extends LuceneTestCase {
|
|||
assertEquals(0, VirtualMethod.compareImplementationDistance(TestClass5.class, publicTestMethod, protectedTestMethod));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@SuppressWarnings({"rawtypes","unchecked"})
|
||||
public void testExceptions() {
|
||||
try {
|
||||
// cast to Class to remove generics:
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util.LuceneTestCase.UseNoMemoryExpensiveCodec;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.fst.BytesRefFSTEnum.InputOutput;
|
||||
import org.apache.lucene.util.fst.FST.Arc;
|
||||
import org.apache.lucene.util.fst.FST.BytesReader;
|
||||
import org.apache.lucene.util.fst.PairOutputs.Pair;
|
||||
|
@ -494,7 +495,7 @@ public class TestFSTs extends LuceneTestCase {
|
|||
|
||||
if (random.nextBoolean() && fst != null && !willRewrite) {
|
||||
TestFSTs t = new TestFSTs();
|
||||
IOContext context = t.newIOContext(random);
|
||||
IOContext context = LuceneTestCase.newIOContext(random);
|
||||
IndexOutput out = dir.createOutput("fst.bin", context);
|
||||
fst.save(out);
|
||||
out.close();
|
||||
|
@ -984,7 +985,7 @@ public class TestFSTs extends LuceneTestCase {
|
|||
if (VERBOSE) {
|
||||
System.out.println(" fstEnum.next prefix=" + inputToString(inputMode, current.input, false) + " output=" + outputs.outputToString(current.output));
|
||||
}
|
||||
final CountMinOutput cmo = prefixes.get(current.input);
|
||||
final CountMinOutput<T> cmo = prefixes.get(current.input);
|
||||
assertNotNull(cmo);
|
||||
assertTrue(cmo.isLeaf || cmo.isFinal);
|
||||
//if (cmo.isFinal && !cmo.isLeaf) {
|
||||
|
@ -1183,7 +1184,7 @@ public class TestFSTs extends LuceneTestCase {
|
|||
}
|
||||
|
||||
final TermsEnum.SeekStatus seekResult = termsEnum.seekCeil(randomTerm);
|
||||
final BytesRefFSTEnum.InputOutput fstSeekResult = fstEnum.seekCeil(randomTerm);
|
||||
final InputOutput<Long> fstSeekResult = fstEnum.seekCeil(randomTerm);
|
||||
|
||||
if (seekResult == TermsEnum.SeekStatus.END) {
|
||||
assertNull("got " + (fstSeekResult == null ? "null" : fstSeekResult.input.utf8ToString()) + " but expected null", fstSeekResult);
|
||||
|
@ -1224,7 +1225,7 @@ public class TestFSTs extends LuceneTestCase {
|
|||
dir.close();
|
||||
}
|
||||
|
||||
private void assertSame(TermsEnum termsEnum, BytesRefFSTEnum fstEnum, boolean storeOrd) throws Exception {
|
||||
private void assertSame(TermsEnum termsEnum, BytesRefFSTEnum<?> fstEnum, boolean storeOrd) throws Exception {
|
||||
if (termsEnum.term() == null) {
|
||||
assertNull(fstEnum.current());
|
||||
} else {
|
||||
|
@ -1829,7 +1830,7 @@ public class TestFSTs extends LuceneTestCase {
|
|||
|
||||
public int verifyStateAndBelow(FST<Object> fst, Arc<Object> arc, int depth)
|
||||
throws IOException {
|
||||
if (fst.targetHasArcs(arc)) {
|
||||
if (FST.targetHasArcs(arc)) {
|
||||
int childCount = 0;
|
||||
for (arc = fst.readFirstTargetArc(arc, arc);;
|
||||
arc = fst.readNextArc(arc), childCount++)
|
||||
|
|
|
@ -12,6 +12,7 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
@ -285,7 +286,7 @@ public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
|
|||
"content",
|
||||
new BytesRef("another"),
|
||||
false);
|
||||
assertTrue(tps.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(1, tps.freq());
|
||||
assertEquals(3, tps.nextPosition());
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
@ -102,7 +103,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
|
|||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
false);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
td = _TestUtil.docs(random,
|
||||
reader,
|
||||
"partnum",
|
||||
|
@ -110,7 +111,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
|
|||
MultiFields.getLiveDocs(reader),
|
||||
null,
|
||||
false);
|
||||
assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
}
|
||||
|
||||
// LUCENE-1441
|
||||
|
|
|
@ -59,7 +59,7 @@ public class ShingleFilterTest extends BaseTokenStreamTestCase {
|
|||
termAtt.copyBuffer(t.buffer(), 0, t.length());
|
||||
offsetAtt.setOffset(t.startOffset(), t.endOffset());
|
||||
posIncrAtt.setPositionIncrement(t.getPositionIncrement());
|
||||
typeAtt.setType(TypeAttributeImpl.DEFAULT_TYPE);
|
||||
typeAtt.setType(TypeAttribute.DEFAULT_TYPE);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
|
@ -1018,14 +1018,14 @@ public class ShingleFilterTest extends BaseTokenStreamTestCase {
|
|||
assertTokenStreamContents(filter,
|
||||
new String[]{"please","please divide","divide","divide this","this","this sentence","sentence"},
|
||||
new int[]{0,0,7,7,14,14,19}, new int[]{6,13,13,18,18,27,27},
|
||||
new String[]{TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE},
|
||||
new String[]{TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE},
|
||||
new int[]{1,0,1,0,1,0,1}
|
||||
);
|
||||
wsTokenizer.reset(new StringReader("please divide this sentence"));
|
||||
assertTokenStreamContents(filter,
|
||||
new String[]{"please","please divide","divide","divide this","this","this sentence","sentence"},
|
||||
new int[]{0,0,7,7,14,14,19}, new int[]{6,13,13,18,18,27,27},
|
||||
new String[]{TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE,"shingle",TypeAttributeImpl.DEFAULT_TYPE},
|
||||
new String[]{TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE,"shingle",TypeAttribute.DEFAULT_TYPE},
|
||||
new int[]{1,0,1,0,1,0,1}
|
||||
);
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.English;
|
||||
|
@ -110,7 +111,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
termsEnum.next();
|
||||
assertEquals(2, termsEnum.totalTermFreq());
|
||||
DocsAndPositionsEnum positions = termsEnum.docsAndPositions(null, null, true);
|
||||
assertTrue(positions.nextDoc() != DocsEnum.NO_MORE_DOCS);
|
||||
assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
|
||||
assertEquals(2, positions.freq());
|
||||
positions.nextPosition();
|
||||
assertEquals(0, positions.startOffset());
|
||||
|
@ -118,7 +119,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
positions.nextPosition();
|
||||
assertEquals(8, positions.startOffset());
|
||||
assertEquals(12, positions.endOffset());
|
||||
assertEquals(DocsEnum.NO_MORE_DOCS, positions.nextDoc());
|
||||
assertEquals(DocIdSetIterator.NO_MORE_DOCS, positions.nextDoc());
|
||||
r.close();
|
||||
dir.close();
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ package org.apache.lucene.analysis.icu.segmentation;
|
|||
*/
|
||||
|
||||
import com.ibm.icu.lang.UCharacter;
|
||||
import com.ibm.icu.lang.UCharacterEnums.ECharacterCategory;
|
||||
import com.ibm.icu.lang.UScript;
|
||||
import com.ibm.icu.text.UTF16;
|
||||
|
||||
|
@ -110,7 +111,7 @@ final class ScriptIterator {
|
|||
* value — should inherit the script value of its base character.
|
||||
*/
|
||||
if (isSameScript(scriptCode, sc)
|
||||
|| UCharacter.getType(ch) == UCharacter.NON_SPACING_MARK) {
|
||||
|| UCharacter.getType(ch) == ECharacterCategory.NON_SPACING_MARK) {
|
||||
index += UTF16.getCharCount(ch);
|
||||
|
||||
/*
|
||||
|
|
|
@ -85,7 +85,7 @@ abstract class AbstractDictionary {
|
|||
* @return unicode String
|
||||
*/
|
||||
public String getCCByGB2312Id(int ccid) {
|
||||
if (ccid < 0 || ccid > WordDictionary.GB2312_CHAR_NUM)
|
||||
if (ccid < 0 || ccid > AbstractDictionary.GB2312_CHAR_NUM)
|
||||
return "";
|
||||
int cc1 = ccid / 94 + 161;
|
||||
int cc2 = ccid % 94 + 161;
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.lucene.benchmark.quality.QualityQuery;
|
|||
import org.apache.lucene.benchmark.quality.QualityQueryParser;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.queryparser.classic.QueryParserBase;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -66,7 +67,7 @@ public class SimpleQQParser implements QualityQueryParser {
|
|||
}
|
||||
BooleanQuery bq = new BooleanQuery();
|
||||
for (int i = 0; i < qqNames.length; i++)
|
||||
bq.add(qp.parse(QueryParser.escape(qq.getValue(qqNames[i]))), BooleanClause.Occur.SHOULD);
|
||||
bq.add(qp.parse(QueryParserBase.escape(qq.getValue(qqNames[i]))), BooleanClause.Occur.SHOULD);
|
||||
|
||||
return bq;
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.lucene.index.SerialMergeScheduler;
|
|||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldCache.DocTermsIndex;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -497,7 +498,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
|
|||
DocsEnum docs = null;
|
||||
while(termsEnum.next() != null) {
|
||||
docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(reader), docs, true);
|
||||
while(docs.nextDoc() != docs.NO_MORE_DOCS) {
|
||||
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
totalTokenCount2 += docs.freq();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import org.apache.lucene.index.CorruptIndexException;
|
|||
import org.apache.lucene.index.DocsAndPositionsEnum;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
|
@ -106,7 +107,7 @@ class ParentArray {
|
|||
DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(indexReader, liveDocs,
|
||||
Consts.FIELD_PAYLOADS, new BytesRef(Consts.PAYLOAD_PARENT),
|
||||
false);
|
||||
if ((positions == null || positions.advance(first) == DocsAndPositionsEnum.NO_MORE_DOCS) && first < num) {
|
||||
if ((positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) && first < num) {
|
||||
throw new CorruptIndexException("Missing parent data for category " + first);
|
||||
}
|
||||
for (int i=first; i<num; i++) {
|
||||
|
@ -124,7 +125,7 @@ class ParentArray {
|
|||
// increment we added originally, so we get here the right numbers:
|
||||
prefetchParentOrdinal[i] = positions.nextPosition();
|
||||
|
||||
if (positions.nextDoc() == DocsAndPositionsEnum.NO_MORE_DOCS) {
|
||||
if (positions.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
if ( i+1 < num ) {
|
||||
throw new CorruptIndexException(
|
||||
"Missing parent data for category "+(i+1));
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.lucene.queries;
|
|||
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -81,7 +82,7 @@ public class TermsFilter extends Filter {
|
|||
br.copyBytes(term.bytes());
|
||||
if (termsEnum.seekCeil(br) == TermsEnum.SeekStatus.FOUND) {
|
||||
docs = termsEnum.docs(acceptDocs, docs, false);
|
||||
while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
|
||||
while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
result.set(docs.docID());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class BoostedQuery extends Query {
|
|||
public BoostedWeight(IndexSearcher searcher) throws IOException {
|
||||
this.searcher = searcher;
|
||||
this.qWeight = q.createWeight(searcher);
|
||||
this.fcontext = boostVal.newContext(searcher);
|
||||
this.fcontext = ValueSource.newContext(searcher);
|
||||
boostVal.createWeight(fcontext,searcher);
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ public class FunctionQuery extends Query {
|
|||
|
||||
public FunctionWeight(IndexSearcher searcher) throws IOException {
|
||||
this.searcher = searcher;
|
||||
this.context = func.newContext(searcher);
|
||||
this.context = ValueSource.newContext(searcher);
|
||||
func.createWeight(context, searcher);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ package org.apache.lucene.queryparser.ext;
|
|||
* limitations under the License.
|
||||
*/
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.queryparser.classic.QueryParserBase;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
@ -139,7 +140,7 @@ public class Extensions {
|
|||
* a backslash character.
|
||||
*/
|
||||
public String escapeExtensionField(String extfield) {
|
||||
return QueryParser.escape(extfield);
|
||||
return QueryParserBase.escape(extfield);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -88,7 +88,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
|
|||
qp.setPhraseSlop(0);
|
||||
|
||||
// non-default operator:
|
||||
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
|
||||
qp.setDefaultOperator(QueryParserBase.AND_OPERATOR);
|
||||
assertEquals("+(multi multi2) +foo", qp.parse("multi foo").toString());
|
||||
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
assertEquals("(b:one t:one) f:two", q.toString());
|
||||
|
||||
// AND mode:
|
||||
mfqp.setDefaultOperator(QueryParser.AND_OPERATOR);
|
||||
mfqp.setDefaultOperator(QueryParserBase.AND_OPERATOR);
|
||||
q = mfqp.parse("one two");
|
||||
assertEquals("+(b:one t:one) +(b:two t:two)", q.toString());
|
||||
q = mfqp.parse("\"aa bb cc\" \"dd ee\"");
|
||||
|
|
|
@ -32,7 +32,7 @@ public class TestQueryParser extends QueryParserTestBase {
|
|||
if (a == null)
|
||||
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
|
||||
qp.setDefaultOperator(QueryParser.OR_OPERATOR);
|
||||
qp.setDefaultOperator(QueryParserBase.OR_OPERATOR);
|
||||
return qp;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.queryparser.classic.QueryParserBase;
|
||||
import org.apache.lucene.queryparser.util.QueryParserTestBase;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
|
@ -47,7 +48,7 @@ public class TestExtendableQueryParser extends QueryParserTestBase {
|
|||
QueryParser qp = extensions == null ? new ExtendableQueryParser(
|
||||
TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser(
|
||||
TEST_VERSION_CURRENT, "field", a, extensions);
|
||||
qp.setDefaultOperator(QueryParser.OR_OPERATOR);
|
||||
qp.setDefaultOperator(QueryParserBase.OR_OPERATOR);
|
||||
return qp;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.lucene.index.Term;
|
|||
import org.apache.lucene.queryparser.classic.CharStream;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.queryparser.classic.QueryParserBase;
|
||||
import org.apache.lucene.queryparser.classic.QueryParserTokenManager;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
|
@ -160,7 +161,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
|
|||
|
||||
public void assertEscapedQueryEquals(String query, Analyzer a, String result)
|
||||
throws Exception {
|
||||
String escapedQuery = QueryParser.escape(query);
|
||||
String escapedQuery = QueryParserBase.escape(query);
|
||||
if (!escapedQuery.equals(result)) {
|
||||
fail("Query /" + query + "/ yielded /" + escapedQuery
|
||||
+ "/, expecting /" + result + "/");
|
||||
|
@ -200,7 +201,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
|
|||
if (a == null)
|
||||
a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
|
||||
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
|
||||
qp.setDefaultOperator(QueryParserBase.AND_OPERATOR);
|
||||
return qp.parse(query);
|
||||
}
|
||||
|
||||
|
@ -382,11 +383,11 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
|
|||
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random));
|
||||
// make sure OR is the default:
|
||||
assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator());
|
||||
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
|
||||
assertEquals(QueryParser.AND_OPERATOR, qp.getDefaultOperator());
|
||||
qp.setDefaultOperator(QueryParser.OR_OPERATOR);
|
||||
assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator());
|
||||
assertEquals(QueryParserBase.OR_OPERATOR, qp.getDefaultOperator());
|
||||
qp.setDefaultOperator(QueryParserBase.AND_OPERATOR);
|
||||
assertEquals(QueryParserBase.AND_OPERATOR, qp.getDefaultOperator());
|
||||
qp.setDefaultOperator(QueryParserBase.OR_OPERATOR);
|
||||
assertEquals(QueryParserBase.OR_OPERATOR, qp.getDefaultOperator());
|
||||
}
|
||||
|
||||
public void testPunct() throws Exception {
|
||||
|
|
|
@ -412,7 +412,7 @@ public class TestSpellChecker extends LuceneTestCase {
|
|||
assertEquals(4, searchers.size());
|
||||
int num_field2 = this.numdoc();
|
||||
assertEquals(num_field2, num_field1 + 1);
|
||||
int numThreads = 5 + this.random.nextInt(5);
|
||||
int numThreads = 5 + LuceneTestCase.random.nextInt(5);
|
||||
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
|
||||
SpellCheckWorker[] workers = new SpellCheckWorker[numThreads];
|
||||
for (int i = 0; i < numThreads; i++) {
|
||||
|
|
|
@ -19,7 +19,9 @@ package org.apache.solr.handler.dataimport;
|
|||
import com.sun.mail.imap.IMAPMessage;
|
||||
|
||||
import org.apache.tika.Tika;
|
||||
import org.apache.tika.metadata.HttpHeaders;
|
||||
import org.apache.tika.metadata.Metadata;
|
||||
import org.apache.tika.metadata.TikaMetadataKeys;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -169,8 +171,8 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
InputStream is = part.getInputStream();
|
||||
String fileName = part.getFileName();
|
||||
Metadata md = new Metadata();
|
||||
md.set(Metadata.CONTENT_TYPE, ctype.getBaseType().toLowerCase(Locale.ENGLISH));
|
||||
md.set(Metadata.RESOURCE_NAME_KEY, fileName);
|
||||
md.set(HttpHeaders.CONTENT_TYPE, ctype.getBaseType().toLowerCase(Locale.ENGLISH));
|
||||
md.set(TikaMetadataKeys.RESOURCE_NAME_KEY, fileName);
|
||||
String content = tika.parseToString(is, md);
|
||||
if (disp != null && disp.equalsIgnoreCase(Part.ATTACHMENT)) {
|
||||
if (row.get(ATTACHMENT) == null)
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.xml.sax.helpers.DefaultHandler;
|
|||
|
||||
import javax.xml.transform.OutputKeys;
|
||||
import javax.xml.transform.TransformerConfigurationException;
|
||||
import javax.xml.transform.TransformerFactory;
|
||||
import javax.xml.transform.sax.SAXTransformerFactory;
|
||||
import javax.xml.transform.sax.TransformerHandler;
|
||||
import javax.xml.transform.stream.StreamResult;
|
||||
|
@ -142,7 +143,7 @@ public class TikaEntityProcessor extends EntityProcessorBase {
|
|||
private static ContentHandler getHtmlHandler(Writer writer)
|
||||
throws TransformerConfigurationException {
|
||||
SAXTransformerFactory factory = (SAXTransformerFactory)
|
||||
SAXTransformerFactory.newInstance();
|
||||
TransformerFactory.newInstance();
|
||||
TransformerHandler handler = factory.newTransformerHandler();
|
||||
handler.getTransformer().setOutputProperty(OutputKeys.METHOD, "html");
|
||||
handler.setResult(new StreamResult(writer));
|
||||
|
@ -185,7 +186,7 @@ public class TikaEntityProcessor extends EntityProcessorBase {
|
|||
private static ContentHandler getXmlContentHandler(Writer writer)
|
||||
throws TransformerConfigurationException {
|
||||
SAXTransformerFactory factory = (SAXTransformerFactory)
|
||||
SAXTransformerFactory.newInstance();
|
||||
TransformerFactory.newInstance();
|
||||
TransformerHandler handler = factory.newTransformerHandler();
|
||||
handler.getTransformer().setOutputProperty(OutputKeys.METHOD, "xml");
|
||||
handler.setResult(new StreamResult(writer));
|
||||
|
|
|
@ -211,7 +211,7 @@ public class TestXPathEntityProcessor extends AbstractDataImportHandlerTestCase
|
|||
tmpdir.delete();
|
||||
tmpdir.mkdir();
|
||||
tmpdir.deleteOnExit();
|
||||
TestFileListEntityProcessor.createFile(tmpdir, "x.xsl", xsl.getBytes("UTF-8"),
|
||||
AbstractDataImportHandlerTestCase.createFile(tmpdir, "x.xsl", xsl.getBytes("UTF-8"),
|
||||
false);
|
||||
Map entityAttrs = createMap("name", "e",
|
||||
XPathEntityProcessor.USE_SOLR_ADD_SCHEMA, "true", "xsl", ""
|
||||
|
|
|
@ -36,7 +36,9 @@ import org.apache.solr.update.AddUpdateCommand;
|
|||
import org.apache.solr.update.processor.UpdateRequestProcessor;
|
||||
import org.apache.tika.config.TikaConfig;
|
||||
import org.apache.tika.exception.TikaException;
|
||||
import org.apache.tika.metadata.HttpHeaders;
|
||||
import org.apache.tika.metadata.Metadata;
|
||||
import org.apache.tika.metadata.TikaMetadataKeys;
|
||||
import org.apache.tika.mime.MediaType;
|
||||
import org.apache.tika.parser.AutoDetectParser;
|
||||
import org.apache.tika.parser.DefaultParser;
|
||||
|
@ -150,11 +152,11 @@ public class ExtractingDocumentLoader extends ContentStreamLoader {
|
|||
// then Tika can make use of it in guessing the appropriate MIME type:
|
||||
String resourceName = req.getParams().get(ExtractingParams.RESOURCE_NAME, null);
|
||||
if (resourceName != null) {
|
||||
metadata.add(Metadata.RESOURCE_NAME_KEY, resourceName);
|
||||
metadata.add(TikaMetadataKeys.RESOURCE_NAME_KEY, resourceName);
|
||||
}
|
||||
// Provide stream's content type as hint for auto detection
|
||||
if(stream.getContentType() != null) {
|
||||
metadata.add(Metadata.CONTENT_TYPE, stream.getContentType());
|
||||
metadata.add(HttpHeaders.CONTENT_TYPE, stream.getContentType());
|
||||
}
|
||||
|
||||
InputStream inputStream = null;
|
||||
|
@ -167,7 +169,7 @@ public class ExtractingDocumentLoader extends ContentStreamLoader {
|
|||
// HtmlParser and TXTParser regard Metadata.CONTENT_ENCODING in metadata
|
||||
String charset = ContentStreamBase.getCharsetFromContentType(stream.getContentType());
|
||||
if(charset != null){
|
||||
metadata.add(Metadata.CONTENT_ENCODING, charset);
|
||||
metadata.add(HttpHeaders.CONTENT_ENCODING, charset);
|
||||
}
|
||||
|
||||
String xpathExpr = params.get(ExtractingParams.XPATH_EXPRESSION);
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.solr.schema.DateField;
|
|||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.tika.metadata.Metadata;
|
||||
import org.apache.tika.metadata.TikaMetadataKeys;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.xml.sax.Attributes;
|
||||
|
@ -191,7 +192,7 @@ public class SolrContentHandler extends DefaultHandler implements ExtractingPara
|
|||
if (sf==null && unknownFieldPrefix.length() > 0) {
|
||||
name = unknownFieldPrefix + name;
|
||||
sf = schema.getFieldOrNull(name);
|
||||
} else if (sf == null && defaultField.length() > 0 && name.equals(Metadata.RESOURCE_NAME_KEY) == false /*let the fall through below handle this*/){
|
||||
} else if (sf == null && defaultField.length() > 0 && name.equals(TikaMetadataKeys.RESOURCE_NAME_KEY) == false /*let the fall through below handle this*/){
|
||||
name = defaultField;
|
||||
sf = schema.getFieldOrNull(name);
|
||||
}
|
||||
|
@ -201,7 +202,7 @@ public class SolrContentHandler extends DefaultHandler implements ExtractingPara
|
|||
// ExtractingDocumentLoader.load(). You shouldn't have to define a mapping for this
|
||||
// field just because you specified a resource.name parameter to the handler, should
|
||||
// you?
|
||||
if (sf == null && unknownFieldPrefix.length()==0 && name == Metadata.RESOURCE_NAME_KEY) {
|
||||
if (sf == null && unknownFieldPrefix.length()==0 && name == TikaMetadataKeys.RESOURCE_NAME_KEY) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.solr.request.SolrQueryRequest;
|
|||
import org.apache.velocity.Template;
|
||||
import org.apache.velocity.VelocityContext;
|
||||
import org.apache.velocity.app.VelocityEngine;
|
||||
import org.apache.velocity.runtime.RuntimeConstants;
|
||||
import org.apache.velocity.tools.generic.*;
|
||||
|
||||
import java.io.*;
|
||||
|
@ -117,14 +118,14 @@ public class VelocityResponseWriter implements QueryResponseWriter {
|
|||
if (template_root != null) {
|
||||
baseDir = new File(template_root);
|
||||
}
|
||||
engine.setProperty(VelocityEngine.FILE_RESOURCE_LOADER_PATH, baseDir.getAbsolutePath());
|
||||
engine.setProperty(RuntimeConstants.FILE_RESOURCE_LOADER_PATH, baseDir.getAbsolutePath());
|
||||
engine.setProperty("params.resource.loader.instance", new SolrParamResourceLoader(request));
|
||||
SolrVelocityResourceLoader resourceLoader =
|
||||
new SolrVelocityResourceLoader(request.getCore().getSolrConfig().getResourceLoader());
|
||||
engine.setProperty("solr.resource.loader.instance", resourceLoader);
|
||||
|
||||
// TODO: Externalize Velocity properties
|
||||
engine.setProperty(VelocityEngine.RESOURCE_LOADER, "params,file,solr");
|
||||
engine.setProperty(RuntimeConstants.RESOURCE_LOADER, "params,file,solr");
|
||||
String propFile = request.getParams().get("v.properties");
|
||||
try {
|
||||
if (propFile == null)
|
||||
|
|
|
@ -150,7 +150,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
// the CMD_GET_FILE_LIST command.
|
||||
//
|
||||
core.getDeletionPolicy().setReserveDuration(commitPoint.getGeneration(), reserveCommitDuration);
|
||||
rsp.add(CMD_INDEX_VERSION, core.getDeletionPolicy().getCommitTimestamp(commitPoint));
|
||||
rsp.add(CMD_INDEX_VERSION, IndexDeletionPolicyWrapper.getCommitTimestamp(commitPoint));
|
||||
rsp.add(GENERATION, commitPoint.getGeneration());
|
||||
} else {
|
||||
// This happens when replication is not configured to happen after startup and no commit/optimize
|
||||
|
@ -229,7 +229,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
for (IndexCommit c : commits.values()) {
|
||||
try {
|
||||
NamedList<Object> nl = new NamedList<Object>();
|
||||
nl.add("indexVersion", core.getDeletionPolicy().getCommitTimestamp(c));
|
||||
nl.add("indexVersion", IndexDeletionPolicyWrapper.getCommitTimestamp(c));
|
||||
nl.add(GENERATION, c.getGeneration());
|
||||
nl.add(CMD_GET_FILE_LIST, c.getFileNames());
|
||||
l.add(nl);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
|
@ -399,7 +400,7 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
false);
|
||||
if (docsEnum != null) {
|
||||
int docId;
|
||||
if ((docId = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
|
||||
if ((docId = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
return reader.document(docId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -821,7 +821,7 @@ public class SimpleFacets {
|
|||
}
|
||||
|
||||
final String gap = required.getFieldParam(f,FacetParams.FACET_DATE_GAP);
|
||||
final DateMathParser dmp = new DateMathParser(ft.UTC, Locale.US);
|
||||
final DateMathParser dmp = new DateMathParser(DateField.UTC, Locale.US);
|
||||
|
||||
final int minCount = params.getFieldInt(f,FacetParams.FACET_MINCOUNT, 0);
|
||||
|
||||
|
|
|
@ -349,8 +349,8 @@ class SpatialDistanceQuery extends ExtendedQueryBase implements PostFilter {
|
|||
|
||||
public SpatialWeight(IndexSearcher searcher) throws IOException {
|
||||
this.searcher = searcher;
|
||||
this.latContext = latSource.newContext(searcher);
|
||||
this.lonContext = lonSource.newContext(searcher);
|
||||
this.latContext = ValueSource.newContext(searcher);
|
||||
this.lonContext = ValueSource.newContext(searcher);
|
||||
latSource.createWeight(latContext, searcher);
|
||||
lonSource.createWeight(lonContext, searcher);
|
||||
}
|
||||
|
|
|
@ -103,25 +103,25 @@ class ExtendedDismaxQParser extends QParser {
|
|||
final String minShouldMatch =
|
||||
DisMaxQParser.parseMinShouldMatch(req.getSchema(), solrParams);
|
||||
|
||||
queryFields = U.parseFieldBoosts(solrParams.getParams(DMP.QF));
|
||||
queryFields = SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.QF));
|
||||
if (0 == queryFields.size()) {
|
||||
queryFields.put(req.getSchema().getDefaultSearchFieldName(), 1.0f);
|
||||
}
|
||||
|
||||
// Boosted phrase of the full query string
|
||||
Map<String,Float> phraseFields =
|
||||
U.parseFieldBoosts(solrParams.getParams(DMP.PF));
|
||||
SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.PF));
|
||||
// Boosted Bi-Term Shingles from the query string
|
||||
Map<String,Float> phraseFields2 =
|
||||
U.parseFieldBoosts(solrParams.getParams("pf2"));
|
||||
SolrPluginUtils.parseFieldBoosts(solrParams.getParams("pf2"));
|
||||
// Boosted Tri-Term Shingles from the query string
|
||||
Map<String,Float> phraseFields3 =
|
||||
U.parseFieldBoosts(solrParams.getParams("pf3"));
|
||||
SolrPluginUtils.parseFieldBoosts(solrParams.getParams("pf3"));
|
||||
|
||||
float tiebreaker = solrParams.getFloat(DMP.TIE, 0.0f);
|
||||
float tiebreaker = solrParams.getFloat(DisMaxParams.TIE, 0.0f);
|
||||
|
||||
int pslop = solrParams.getInt(DMP.PS, 0);
|
||||
int qslop = solrParams.getInt(DMP.QS, 0);
|
||||
int pslop = solrParams.getInt(DisMaxParams.PS, 0);
|
||||
int qslop = solrParams.getInt(DisMaxParams.QS, 0);
|
||||
|
||||
// remove stopwords from mandatory "matching" component?
|
||||
boolean stopwords = solrParams.getBool("stopwords", true);
|
||||
|
@ -137,7 +137,7 @@ class ExtendedDismaxQParser extends QParser {
|
|||
altUserQuery = null;
|
||||
if( userQuery == null || userQuery.length() < 1 ) {
|
||||
// If no query is specified, we may have an alternate
|
||||
String altQ = solrParams.get( DMP.ALTQ );
|
||||
String altQ = solrParams.get( DisMaxParams.ALTQ );
|
||||
if (altQ != null) {
|
||||
altQParser = subQuery(altQ, null);
|
||||
altUserQuery = altQParser.getQuery();
|
||||
|
@ -248,7 +248,7 @@ class ExtendedDismaxQParser extends QParser {
|
|||
|
||||
if (parsedUserQuery != null && doMinMatched) {
|
||||
if (parsedUserQuery instanceof BooleanQuery) {
|
||||
U.setMinShouldMatch((BooleanQuery)parsedUserQuery, minShouldMatch);
|
||||
SolrPluginUtils.setMinShouldMatch((BooleanQuery)parsedUserQuery, minShouldMatch);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -285,8 +285,8 @@ class ExtendedDismaxQParser extends QParser {
|
|||
|
||||
if (parsedUserQuery instanceof BooleanQuery) {
|
||||
BooleanQuery t = new BooleanQuery();
|
||||
U.flattenBooleanQuery(t, (BooleanQuery)parsedUserQuery);
|
||||
U.setMinShouldMatch(t, minShouldMatch);
|
||||
SolrPluginUtils.flattenBooleanQuery(t, (BooleanQuery)parsedUserQuery);
|
||||
SolrPluginUtils.setMinShouldMatch(t, minShouldMatch);
|
||||
parsedUserQuery = t;
|
||||
}
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ class ExtendedDismaxQParser extends QParser {
|
|||
|
||||
|
||||
/* * * Boosting Query * * */
|
||||
boostParams = solrParams.getParams(DMP.BQ);
|
||||
boostParams = solrParams.getParams(DisMaxParams.BQ);
|
||||
//List<Query> boostQueries = U.parseQueryStrings(req, boostParams);
|
||||
boostQueries=null;
|
||||
if (boostParams!=null && boostParams.length>0) {
|
||||
|
@ -345,7 +345,7 @@ class ExtendedDismaxQParser extends QParser {
|
|||
|
||||
/* * * Boosting Functions * * */
|
||||
|
||||
String[] boostFuncs = solrParams.getParams(DMP.BF);
|
||||
String[] boostFuncs = solrParams.getParams(DisMaxParams.BF);
|
||||
if (null != boostFuncs && 0 != boostFuncs.length) {
|
||||
for (String boostFunc : boostFuncs) {
|
||||
if(null == boostFunc || "".equals(boostFunc)) continue;
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.lucene.index.AtomicReaderContext;
|
|||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
|
@ -278,7 +279,7 @@ public class FileFloatSource extends ValueSource {
|
|||
|
||||
docsEnum = termsEnum.docs(null, docsEnum, false);
|
||||
int doc;
|
||||
while ((doc = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
|
||||
while ((doc = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
vals[doc] = fval;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import java.io.StringReader;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
|
@ -211,12 +212,12 @@ public class TestWordDelimiterFilterFactory extends SolrTestCaseJ4 {
|
|||
|
||||
TokenStream ts = factoryDefault.create(
|
||||
new MockTokenizer(new StringReader(testText), MockTokenizer.WHITESPACE, false));
|
||||
BaseTokenTestCase.assertTokenStreamContents(ts,
|
||||
BaseTokenStreamTestCase.assertTokenStreamContents(ts,
|
||||
new String[] { "I", "borrowed", "5", "400", "00", "540000", "at", "25", "interest", "rate", "interestrate" });
|
||||
|
||||
ts = factoryDefault.create(
|
||||
new MockTokenizer(new StringReader("foo\u200Dbar"), MockTokenizer.WHITESPACE, false));
|
||||
BaseTokenTestCase.assertTokenStreamContents(ts,
|
||||
BaseTokenStreamTestCase.assertTokenStreamContents(ts,
|
||||
new String[] { "foo", "bar", "foobar" });
|
||||
|
||||
|
||||
|
@ -229,13 +230,13 @@ public class TestWordDelimiterFilterFactory extends SolrTestCaseJ4 {
|
|||
|
||||
ts = factoryCustom.create(
|
||||
new MockTokenizer(new StringReader(testText), MockTokenizer.WHITESPACE, false));
|
||||
BaseTokenTestCase.assertTokenStreamContents(ts,
|
||||
BaseTokenStreamTestCase.assertTokenStreamContents(ts,
|
||||
new String[] { "I", "borrowed", "$5,400.00", "at", "25%", "interest", "rate", "interestrate" });
|
||||
|
||||
/* test custom behavior with a char > 0x7F, because we had to make a larger byte[] */
|
||||
ts = factoryCustom.create(
|
||||
new MockTokenizer(new StringReader("foo\u200Dbar"), MockTokenizer.WHITESPACE, false));
|
||||
BaseTokenTestCase.assertTokenStreamContents(ts,
|
||||
BaseTokenStreamTestCase.assertTokenStreamContents(ts,
|
||||
new String[] { "foo\u200Dbar" });
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.solr.cloud;
|
|||
import java.io.File;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import junit.framework.Assert;
|
||||
import junit.framework.TestCase;
|
||||
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
|
@ -91,7 +92,7 @@ public class ZkSolrClientTest extends AbstractSolrTestCase {
|
|||
|
||||
try {
|
||||
zkClient.makePath("collections/collection2", false);
|
||||
TestCase.fail("Server should be down here");
|
||||
Assert.fail("Server should be down here");
|
||||
} catch (KeeperException.ConnectionLossException e) {
|
||||
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ public class TestArbitraryIndexDir extends AbstractSolrTestCase{
|
|||
+ System.getProperty("file.separator") + "data");
|
||||
dataDir.mkdirs();
|
||||
|
||||
solrConfig = h.createConfig("solrconfig.xml");
|
||||
solrConfig = TestHarness.createConfig("solrconfig.xml");
|
||||
h = new TestHarness( dataDir.getAbsolutePath(),
|
||||
solrConfig,
|
||||
"schema12.xml");
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue