mirror of https://github.com/apache/lucene.git
LUCENE-2383: some small cleanups after landing flex
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@931668 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1c058e28cb
commit
0a5e6806f3
|
@ -178,7 +178,13 @@ API Changes
|
|||
DocsEnum, DocsAndPositionsEnum). One big difference is that field
|
||||
and terms are now enumerated separately: a TermsEnum provides a
|
||||
BytesRef (wraps a byte[]) per term within a single field, not a
|
||||
Term.
|
||||
Term. Another is that when asking for a Docs/AndPositionsEnum, you
|
||||
now specify the skipDocs explicitly (typically this will be the
|
||||
deleted docs, but in general you can provide any Bits).
|
||||
|
||||
* LUCENE-1458, LUCENE-2111: IndexReader now directly exposes its
|
||||
deleted docs (getDeletedDocs), providing a new Bits interface to
|
||||
directly query by doc ID.
|
||||
|
||||
Bug fixes
|
||||
|
||||
|
@ -315,8 +321,10 @@ New features
|
|||
|
||||
* LUCENE-1458, LUCENE-2111: With flexible indexing it is now possible
|
||||
for an application to create its own postings codec, to alter how
|
||||
fields, terms, docs and positions are encoded into the idnex. The
|
||||
standard codec is the default codec.
|
||||
fields, terms, docs and positions are encoded into the index. The
|
||||
standard codec is the default codec. Both IndexWriter and
|
||||
IndexReader accept a CodecProvider class to obtain codecs for newly
|
||||
written segments as well as existing segments opened for reading.
|
||||
|
||||
* LUCENE-1458, LUCENE-2111: Some experimental codecs have been added
|
||||
for flexible indexing, including pulsing codec (inlines
|
||||
|
|
|
@ -18,8 +18,6 @@ package org.apache.lucene.search;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.Collator;
|
||||
import java.util.Locale;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -66,9 +64,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
Query q = new TermQuery(new Term("body","body"));
|
||||
|
||||
// test id, bounded on both ends
|
||||
FieldCacheRangeFilter fcrf;
|
||||
result = search.search(q,fcrf = FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
|
||||
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q, FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
|
||||
assertEquals("find all", numDocs, result.length);
|
||||
|
||||
result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,F), numDocs).scoreDocs;
|
||||
|
@ -213,9 +209,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
Query q = new TermQuery(new Term("body","body"));
|
||||
|
||||
// test id, bounded on both ends
|
||||
FieldCacheRangeFilter fcrf;
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertEquals("find all", numDocs, result.length);
|
||||
|
||||
result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
|
||||
|
@ -305,9 +299,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
|
||||
// test id, bounded on both ends
|
||||
|
||||
FieldCacheRangeFilter fcrf;
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertEquals("find all", numDocs, result.length);
|
||||
|
||||
result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
|
||||
|
@ -397,9 +389,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
|
||||
// test id, bounded on both ends
|
||||
|
||||
FieldCacheRangeFilter fcrf;
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertEquals("find all", numDocs, result.length);
|
||||
|
||||
result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
|
||||
|
@ -529,7 +519,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
assertEquals("infinity special case", 0, result.length);
|
||||
}
|
||||
|
||||
// test using a sparse index (with deleted docs). The DocIdSet should be not cacheable, as it uses TermDocs if the range contains 0
|
||||
// test using a sparse index (with deleted docs).
|
||||
public void testSparseIndex() throws IOException {
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), T, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
@ -550,27 +540,21 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
assertTrue(reader.hasDeletions());
|
||||
|
||||
ScoreDoc[] result;
|
||||
FieldCacheRangeFilter fcrf;
|
||||
Query q = new TermQuery(new Term("body","body"));
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 40, result.length);
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 20, result.length);
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs;
|
||||
assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 20, result.length);
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 11, result.length);
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs;
|
||||
assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 11, result.length);
|
||||
}
|
||||
|
||||
|
|
|
@ -282,7 +282,7 @@ public class CheckIndex {
|
|||
return checkIndex(null);
|
||||
}
|
||||
|
||||
protected Status checkIndex(List<String> onlySegments) throws IOException {
|
||||
public Status checkIndex(List<String> onlySegments) throws IOException {
|
||||
return checkIndex(onlySegments, CodecProvider.getDefault());
|
||||
}
|
||||
|
||||
|
@ -298,7 +298,7 @@ public class CheckIndex {
|
|||
* <p><b>WARNING</b>: make sure
|
||||
* you only call this when the index is not opened by any
|
||||
* writer. */
|
||||
protected Status checkIndex(List<String> onlySegments, CodecProvider codecs) throws IOException {
|
||||
public Status checkIndex(List<String> onlySegments, CodecProvider codecs) throws IOException {
|
||||
NumberFormat nf = NumberFormat.getInstance();
|
||||
SegmentInfos sis = new SegmentInfos();
|
||||
Status result = new Status();
|
||||
|
|
|
@ -1050,58 +1050,60 @@ final class DocumentsWriter {
|
|||
|
||||
// Delete by term
|
||||
if (deletesFlushed.terms.size() > 0) {
|
||||
try {
|
||||
Fields fields = reader.fields();
|
||||
TermsEnum termsEnum = null;
|
||||
Fields fields = reader.fields();
|
||||
if (fields == null) {
|
||||
// This reader has no postings
|
||||
return false;
|
||||
}
|
||||
|
||||
TermsEnum termsEnum = null;
|
||||
|
||||
String currentField = null;
|
||||
BytesRef termRef = new BytesRef();
|
||||
DocsEnum docs = null;
|
||||
String currentField = null;
|
||||
BytesRef termRef = new BytesRef();
|
||||
DocsEnum docs = null;
|
||||
|
||||
for (Entry<Term, BufferedDeletes.Num> entry: deletesFlushed.terms.entrySet()) {
|
||||
Term term = entry.getKey();
|
||||
// Since we visit terms sorted, we gain performance
|
||||
// by re-using the same TermsEnum and seeking only
|
||||
// forwards
|
||||
if (term.field() != currentField) {
|
||||
assert currentField == null || currentField.compareTo(term.field()) < 0;
|
||||
currentField = term.field();
|
||||
Terms terms = fields.terms(currentField);
|
||||
if (terms != null) {
|
||||
termsEnum = terms.iterator();
|
||||
} else {
|
||||
termsEnum = null;
|
||||
}
|
||||
for (Entry<Term, BufferedDeletes.Num> entry: deletesFlushed.terms.entrySet()) {
|
||||
Term term = entry.getKey();
|
||||
// Since we visit terms sorted, we gain performance
|
||||
// by re-using the same TermsEnum and seeking only
|
||||
// forwards
|
||||
if (term.field() != currentField) {
|
||||
assert currentField == null || currentField.compareTo(term.field()) < 0;
|
||||
currentField = term.field();
|
||||
Terms terms = fields.terms(currentField);
|
||||
if (terms != null) {
|
||||
termsEnum = terms.iterator();
|
||||
} else {
|
||||
termsEnum = null;
|
||||
}
|
||||
}
|
||||
|
||||
if (termsEnum == null) {
|
||||
continue;
|
||||
}
|
||||
assert checkDeleteTerm(term);
|
||||
if (termsEnum == null) {
|
||||
continue;
|
||||
}
|
||||
assert checkDeleteTerm(term);
|
||||
|
||||
termRef.copy(term.text());
|
||||
termRef.copy(term.text());
|
||||
|
||||
if (termsEnum.seek(termRef, false) == TermsEnum.SeekStatus.FOUND) {
|
||||
DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs);
|
||||
if (termsEnum.seek(termRef, false) == TermsEnum.SeekStatus.FOUND) {
|
||||
DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs);
|
||||
|
||||
if (docsEnum != null) {
|
||||
docs = docsEnum;
|
||||
int limit = entry.getValue().getNum();
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS || docIDStart+docID >= limit) {
|
||||
break;
|
||||
}
|
||||
reader.deleteDocument(docID);
|
||||
any = true;
|
||||
if (docsEnum != null) {
|
||||
docs = docsEnum;
|
||||
int limit = entry.getValue().getNum();
|
||||
while (true) {
|
||||
final int docID = docs.nextDoc();
|
||||
if (docID == DocsEnum.NO_MORE_DOCS || docIDStart+docID >= limit) {
|
||||
break;
|
||||
}
|
||||
reader.deleteDocument(docID);
|
||||
any = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
//docs.close();
|
||||
}
|
||||
}
|
||||
|
||||
// Delete by docID
|
||||
for (Integer docIdInt : deletesFlushed.docIDs) {
|
||||
int docID = docIdInt.intValue();
|
||||
|
|
|
@ -77,8 +77,11 @@ public final class MultiFields extends Fields {
|
|||
new ReaderUtil.Gather(r) {
|
||||
@Override
|
||||
protected void add(int base, IndexReader r) throws IOException {
|
||||
fields.add(r.fields());
|
||||
slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
|
||||
final Fields f = r.fields();
|
||||
if (f != null) {
|
||||
fields.add(f);
|
||||
slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
|
||||
}
|
||||
}
|
||||
}.run();
|
||||
|
||||
|
|
|
@ -572,11 +572,14 @@ final class SegmentMerger {
|
|||
docBase = new ReaderUtil.Gather(readers.get(i)) {
|
||||
@Override
|
||||
protected void add(int base, IndexReader r) throws IOException {
|
||||
subReaders.add(r);
|
||||
fields.add(r.fields());
|
||||
slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
|
||||
bits.add(r.getDeletedDocs());
|
||||
bitsStarts.add(base);
|
||||
final Fields f = r.fields();
|
||||
if (f != null) {
|
||||
subReaders.add(r);
|
||||
fields.add(f);
|
||||
slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1));
|
||||
bits.add(r.getDeletedDocs());
|
||||
bitsStarts.add(base);
|
||||
}
|
||||
}
|
||||
}.run(docBase);
|
||||
}
|
||||
|
|
|
@ -26,9 +26,6 @@ import org.apache.lucene.index.codecs.Codec;
|
|||
import org.apache.lucene.index.codecs.CodecProvider;
|
||||
|
||||
/**
|
||||
* This class is not meant for public usage; it's only
|
||||
* public in order to expose access across packages. It's
|
||||
* used internally when updating the index.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class SegmentWriteState {
|
||||
|
|
|
@ -66,17 +66,13 @@ public class SingleIntIndexInput extends IntIndexInput {
|
|||
|
||||
class Index extends IntIndexInput.Index {
|
||||
private long fp;
|
||||
// nocmmit: only for asserts
|
||||
boolean first = true;
|
||||
|
||||
@Override
|
||||
public void read(IndexInput indexIn, boolean absolute)
|
||||
throws IOException {
|
||||
if (absolute) {
|
||||
fp = indexIn.readVLong();
|
||||
first = false;
|
||||
} else {
|
||||
assert !first;
|
||||
fp += indexIn.readVLong();
|
||||
}
|
||||
}
|
||||
|
@ -84,7 +80,6 @@ public class SingleIntIndexInput extends IntIndexInput {
|
|||
@Override
|
||||
public void set(IntIndexInput.Index other) {
|
||||
fp = ((Index) other).fp;
|
||||
first = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,7 +95,6 @@ public class SingleIntIndexInput extends IntIndexInput {
|
|||
@Override
|
||||
public Object clone() {
|
||||
Index other = new Index();
|
||||
other.first = first;
|
||||
other.fp = fp;
|
||||
return other;
|
||||
}
|
||||
|
|
|
@ -504,7 +504,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
|
||||
static abstract class FieldCacheDocIdSet extends DocIdSet {
|
||||
private final IndexReader reader;
|
||||
private boolean canIgnoreDeletedDocs;
|
||||
private final boolean canIgnoreDeletedDocs;
|
||||
|
||||
FieldCacheDocIdSet(IndexReader reader, boolean canIgnoreDeletedDocs) {
|
||||
this.reader = reader;
|
||||
|
@ -518,68 +518,89 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
abstract boolean matchDoc(int doc) throws ArrayIndexOutOfBoundsException;
|
||||
|
||||
/**
|
||||
* this DocIdSet is cacheable, if it can ignore deletions
|
||||
* this DocIdSet is always cacheable (does not go back
|
||||
* to the reader for iteration)
|
||||
*/
|
||||
@Override
|
||||
public boolean isCacheable() {
|
||||
return canIgnoreDeletedDocs || !reader.hasDeletions();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocIdSetIterator iterator() throws IOException {
|
||||
// Synchronization needed because deleted docs BitVector
|
||||
// can change after call to hasDeletions until TermDocs creation.
|
||||
// We only use an iterator with termDocs, when this was requested (e.g.
|
||||
// range contains 0)
|
||||
// and the index has deletions
|
||||
|
||||
final Bits skipDocs;
|
||||
synchronized (reader) {
|
||||
if (isCacheable()) {
|
||||
skipDocs = null;
|
||||
} else {
|
||||
skipDocs = MultiFields.getDeletedDocs(reader);
|
||||
}
|
||||
}
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final Bits skipDocs = canIgnoreDeletedDocs ? null : MultiFields.getDeletedDocs(reader);
|
||||
|
||||
// a DocIdSetIterator generating docIds by
|
||||
// incrementing a variable & checking skipDocs -
|
||||
return new DocIdSetIterator() {
|
||||
private int doc = -1;
|
||||
@Override
|
||||
public int docID() {
|
||||
return doc;
|
||||
}
|
||||
if (skipDocs == null) {
|
||||
// Specialization optimization disregard deletions
|
||||
return new DocIdSetIterator() {
|
||||
private int doc = -1;
|
||||
@Override
|
||||
public int docID() {
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() {
|
||||
try {
|
||||
@Override
|
||||
public int nextDoc() {
|
||||
try {
|
||||
do {
|
||||
doc++;
|
||||
} while (!matchDoc(doc));
|
||||
return doc;
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
return doc = NO_MORE_DOCS;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) {
|
||||
try {
|
||||
doc = target;
|
||||
while (!matchDoc(doc)) {
|
||||
doc++;
|
||||
}
|
||||
return doc;
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
return doc = NO_MORE_DOCS;
|
||||
}
|
||||
}
|
||||
};
|
||||
} else {
|
||||
// Must consult deletions
|
||||
|
||||
final int maxDoc = reader.maxDoc();
|
||||
|
||||
// a DocIdSetIterator generating docIds by
|
||||
// incrementing a variable & checking skipDocs -
|
||||
return new DocIdSetIterator() {
|
||||
private int doc = -1;
|
||||
@Override
|
||||
public int docID() {
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() {
|
||||
do {
|
||||
doc++;
|
||||
} while ((skipDocs != null && doc < maxDoc && skipDocs.get(doc))
|
||||
|| !matchDoc(doc));
|
||||
if (doc >= maxDoc) {
|
||||
return doc = NO_MORE_DOCS;
|
||||
}
|
||||
} while (skipDocs.get(doc) || !matchDoc(doc));
|
||||
return doc;
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
return doc = NO_MORE_DOCS;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) {
|
||||
try {
|
||||
doc = target;
|
||||
while (!matchDoc(doc)) {
|
||||
doc++;
|
||||
@Override
|
||||
public int advance(int target) {
|
||||
for(doc=target;doc<maxDoc;doc++) {
|
||||
if (!skipDocs.get(doc) && matchDoc(doc)) {
|
||||
return doc;
|
||||
}
|
||||
}
|
||||
return doc;
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
return doc = NO_MORE_DOCS;
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -160,7 +160,7 @@ final class TermScorer extends Scorer {
|
|||
// not found in readahead cache, seek underlying stream
|
||||
int newDoc = docsEnum.advance(target);
|
||||
//System.out.println("ts.advance docsEnum=" + docsEnum);
|
||||
if (newDoc != DocsEnum.NO_MORE_DOCS) {
|
||||
if (newDoc != NO_MORE_DOCS) {
|
||||
doc = newDoc;
|
||||
freq = docsEnum.freq();
|
||||
} else {
|
||||
|
|
|
@ -79,12 +79,6 @@ final public class UnicodeUtil {
|
|||
public int[] offsets = new int[10];
|
||||
public int length;
|
||||
|
||||
/*
|
||||
public String toString() {
|
||||
return new String(result, 0, length);
|
||||
}
|
||||
*/
|
||||
|
||||
public void setLength(int newLength) {
|
||||
if (result.length < newLength)
|
||||
result = ArrayUtil.grow(result, newLength);
|
||||
|
|
|
@ -1570,7 +1570,6 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// LUCENE-1579: Ensure that on a cloned reader, segments
|
||||
// reuse the doc values arrays in FieldCache
|
||||
public void testFieldCacheReuseAfterClone() throws Exception {
|
||||
//Codec.DEBUG = true;
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
|
||||
Document doc = new Document();
|
||||
|
|
|
@ -66,9 +66,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
Query q = new TermQuery(new Term("body","body"));
|
||||
|
||||
// test id, bounded on both ends
|
||||
FieldCacheRangeFilter<String> fcrf;
|
||||
result = search.search(q,fcrf = FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
|
||||
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q, FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs;
|
||||
assertEquals("find all", numDocs, result.length);
|
||||
|
||||
result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,F), numDocs).scoreDocs;
|
||||
|
@ -213,9 +211,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
Query q = new TermQuery(new Term("body","body"));
|
||||
|
||||
// test id, bounded on both ends
|
||||
FieldCacheRangeFilter<Short> fcrf;
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertEquals("find all", numDocs, result.length);
|
||||
|
||||
result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
|
||||
|
@ -305,9 +301,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
|
||||
// test id, bounded on both ends
|
||||
|
||||
FieldCacheRangeFilter<Integer> fcrf;
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertEquals("find all", numDocs, result.length);
|
||||
|
||||
result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
|
||||
|
@ -397,9 +391,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
|
||||
// test id, bounded on both ends
|
||||
|
||||
FieldCacheRangeFilter<Long> fcrf;
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs;
|
||||
assertEquals("find all", numDocs, result.length);
|
||||
|
||||
result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs;
|
||||
|
@ -529,7 +521,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
assertEquals("infinity special case", 0, result.length);
|
||||
}
|
||||
|
||||
// test using a sparse index (with deleted docs). The DocIdSet should be not cacheable, as it uses TermDocs if the range contains 0
|
||||
// test using a sparse index (with deleted docs).
|
||||
public void testSparseIndex() throws IOException {
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)));
|
||||
|
@ -550,27 +542,21 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
assertTrue(reader.hasDeletions());
|
||||
|
||||
ScoreDoc[] result;
|
||||
FieldCacheRangeFilter<Byte> fcrf;
|
||||
Query q = new TermQuery(new Term("body","body"));
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 40, result.length);
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 20, result.length);
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs;
|
||||
assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 20, result.length);
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 11, result.length);
|
||||
|
||||
result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs;
|
||||
assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable());
|
||||
result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs;
|
||||
assertEquals("find all", 11, result.length);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue