LUCENE-4961: Filters should return null if they don't accept documents

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1476185 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Simon Willnauer 2013-04-26 13:30:06 +00:00
parent 08a6f78510
commit 2bcdaaa114
20 changed files with 139 additions and 128 deletions

View File

@ -24,7 +24,6 @@ import java.util.WeakHashMap;
import org.apache.lucene.index.AtomicReader;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DirectoryReader; // javadocs
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.Bits;
@ -47,25 +46,28 @@ public class CachingWrapperFilter extends Filter {
this.filter = filter;
}
/** Provide the DocIdSet to be cached, using the DocIdSet provided
* by the wrapped Filter.
* <p>This implementation returns the given {@link DocIdSet}, if {@link DocIdSet#isCacheable}
* returns <code>true</code>, else it copies the {@link DocIdSetIterator} into
* a {@link FixedBitSet}.
/**
* Provide the DocIdSet to be cached, using the DocIdSet provided
* by the wrapped Filter. <p>This implementation returns the given {@link DocIdSet},
* if {@link DocIdSet#isCacheable} returns <code>true</code>, else it copies the
* {@link DocIdSetIterator} into a {@link FixedBitSet}.
* <p>Note: This method returns {@linkplain #EMPTY_DOCIDSET} if the given docIdSet
* is <code>null</code> or if {@link DocIdSet#iterator()} return <code>null</code>. The empty
* instance is use as a placeholder in the cache instead of the <code>null</code> value.
*/
protected DocIdSet docIdSetToCache(DocIdSet docIdSet, AtomicReader reader) throws IOException {
if (docIdSet == null) {
// this is better than returning null, as the nonnull result can be cached
return DocIdSet.EMPTY_DOCIDSET;
return EMPTY_DOCIDSET;
} else if (docIdSet.isCacheable()) {
return docIdSet;
} else {
final DocIdSetIterator it = docIdSet.iterator();
// null is allowed to be returned by iterator(),
// in this case we wrap with the empty set,
// in this case we wrap with the sentinel set,
// which is cacheable.
if (it == null) {
return DocIdSet.EMPTY_DOCIDSET;
return EMPTY_DOCIDSET;
} else {
final FixedBitSet bits = new FixedBitSet(reader.maxDoc());
bits.or(it);
@ -91,9 +93,9 @@ public class CachingWrapperFilter extends Filter {
cache.put(key, docIdSet);
}
return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
return docIdSet == EMPTY_DOCIDSET ? null : BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
}
@Override
public String toString() {
return "CachingWrapperFilter("+filter+")";
@ -110,4 +112,24 @@ public class CachingWrapperFilter extends Filter {
public int hashCode() {
return (filter.hashCode() ^ 0x1117BF25);
}
/** An empty {@code DocIdSet} instance */
protected static final DocIdSet EMPTY_DOCIDSET = new DocIdSet() {
@Override
public DocIdSetIterator iterator() {
return DocIdSetIterator.empty();
}
@Override
public boolean isCacheable() {
return true;
}
// we explicitly provide no random access, as this filter is 100% sparse and iterator exits faster
@Override
public Bits bits() {
return null;
}
};
}

View File

@ -26,56 +26,8 @@ import org.apache.lucene.util.Bits;
*/
public abstract class DocIdSet {
/** An empty {@code DocIdSet} instance for easy use, e.g. in Filters that hit no documents. */
public static final DocIdSet EMPTY_DOCIDSET = new DocIdSet() {
@Override
public DocIdSetIterator iterator() {
return new DocIdSetIterator() {
boolean exhausted = false;
@Override
public int advance(int target) {
assert !exhausted;
assert target >= 0;
exhausted = true;
return NO_MORE_DOCS;
}
@Override
public int docID() {
return exhausted ? NO_MORE_DOCS : -1;
}
@Override
public int nextDoc() {
assert !exhausted;
exhausted = true;
return NO_MORE_DOCS;
}
@Override
public long cost() {
return 0;
}
};
}
@Override
public boolean isCacheable() {
return true;
}
// we explicitely provide no random access, as this filter is 100% sparse and iterator exits faster
@Override
public Bits bits() {
return null;
}
};
/** Provides a {@link DocIdSetIterator} to access the set.
* This implementation can return <code>null</code> or
* <code>{@linkplain #EMPTY_DOCIDSET}.iterator()</code> if there
* This implementation can return <code>null</code> if there
* are no docs that match. */
public abstract DocIdSetIterator iterator() throws IOException;

View File

@ -28,6 +28,37 @@ import java.io.IOException;
*/
public abstract class DocIdSetIterator {
/** An empty {@code DocIdSetIterator} instance */
public static final DocIdSetIterator empty() {
return new DocIdSetIterator() {
boolean exhausted = false;
@Override
public int advance(int target) {
assert !exhausted;
assert target >= 0;
exhausted = true;
return NO_MORE_DOCS;
}
@Override
public int docID() {
return exhausted ? NO_MORE_DOCS : -1;
}
@Override
public int nextDoc() {
assert !exhausted;
exhausted = true;
return NO_MORE_DOCS;
}
@Override
public long cost() {
return 0;
}
};
}
/**
* When returned by {@link #nextDoc()}, {@link #advance(int)} and
* {@link #docID()} it means there are no more docs in the iterator.

View File

@ -90,7 +90,7 @@ public abstract class DocTermOrdsRangeFilter extends Filter {
}
if (inclusiveUpperPoint < 0 || inclusiveLowerPoint > inclusiveUpperPoint) {
return DocIdSet.EMPTY_DOCIDSET;
return null;
}
assert inclusiveLowerPoint >= 0 && inclusiveUpperPoint >= 0;

View File

@ -143,7 +143,7 @@ public final class DocTermOrdsRewriteMethod extends MultiTermQuery.RewriteMethod
termSet.set(termsEnum.ord());
} while (termsEnum.next() != null);
} else {
return DocIdSet.EMPTY_DOCIDSET;
return null;
}
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {

View File

@ -121,7 +121,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
if (inclusiveUpperPoint < 0 || inclusiveLowerPoint > inclusiveUpperPoint) {
return DocIdSet.EMPTY_DOCIDSET;
return null;
}
assert inclusiveLowerPoint >= 0 && inclusiveUpperPoint >= 0;
@ -178,7 +178,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
if (inclusiveUpperPoint < 0 || inclusiveLowerPoint > inclusiveUpperPoint) {
return DocIdSet.EMPTY_DOCIDSET;
return null;
}
assert inclusiveLowerPoint >= 0 && inclusiveUpperPoint >= 0;
@ -216,7 +216,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (lowerVal != null) {
final byte i = lowerVal.byteValue();
if (!includeLower && i == Byte.MAX_VALUE)
return DocIdSet.EMPTY_DOCIDSET;
return null;
inclusiveLowerPoint = (byte) (includeLower ? i : (i + 1));
} else {
inclusiveLowerPoint = Byte.MIN_VALUE;
@ -224,14 +224,14 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (upperVal != null) {
final byte i = upperVal.byteValue();
if (!includeUpper && i == Byte.MIN_VALUE)
return DocIdSet.EMPTY_DOCIDSET;
return null;
inclusiveUpperPoint = (byte) (includeUpper ? i : (i - 1));
} else {
inclusiveUpperPoint = Byte.MAX_VALUE;
}
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
return null;
final FieldCache.Bytes values = FieldCache.DEFAULT.getBytes(context.reader(), field, (FieldCache.ByteParser) parser, false);
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
@ -267,7 +267,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (lowerVal != null) {
short i = lowerVal.shortValue();
if (!includeLower && i == Short.MAX_VALUE)
return DocIdSet.EMPTY_DOCIDSET;
return null;
inclusiveLowerPoint = (short) (includeLower ? i : (i + 1));
} else {
inclusiveLowerPoint = Short.MIN_VALUE;
@ -275,14 +275,14 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (upperVal != null) {
short i = upperVal.shortValue();
if (!includeUpper && i == Short.MIN_VALUE)
return DocIdSet.EMPTY_DOCIDSET;
return null;
inclusiveUpperPoint = (short) (includeUpper ? i : (i - 1));
} else {
inclusiveUpperPoint = Short.MAX_VALUE;
}
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
return null;
final FieldCache.Shorts values = FieldCache.DEFAULT.getShorts(context.reader(), field, (FieldCache.ShortParser) parser, false);
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
@ -318,7 +318,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (lowerVal != null) {
int i = lowerVal.intValue();
if (!includeLower && i == Integer.MAX_VALUE)
return DocIdSet.EMPTY_DOCIDSET;
return null;
inclusiveLowerPoint = includeLower ? i : (i + 1);
} else {
inclusiveLowerPoint = Integer.MIN_VALUE;
@ -326,14 +326,14 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (upperVal != null) {
int i = upperVal.intValue();
if (!includeUpper && i == Integer.MIN_VALUE)
return DocIdSet.EMPTY_DOCIDSET;
return null;
inclusiveUpperPoint = includeUpper ? i : (i - 1);
} else {
inclusiveUpperPoint = Integer.MAX_VALUE;
}
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
return null;
final FieldCache.Ints values = FieldCache.DEFAULT.getInts(context.reader(), field, (FieldCache.IntParser) parser, false);
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
@ -369,7 +369,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (lowerVal != null) {
long i = lowerVal.longValue();
if (!includeLower && i == Long.MAX_VALUE)
return DocIdSet.EMPTY_DOCIDSET;
return null;
inclusiveLowerPoint = includeLower ? i : (i + 1L);
} else {
inclusiveLowerPoint = Long.MIN_VALUE;
@ -377,14 +377,14 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (upperVal != null) {
long i = upperVal.longValue();
if (!includeUpper && i == Long.MIN_VALUE)
return DocIdSet.EMPTY_DOCIDSET;
return null;
inclusiveUpperPoint = includeUpper ? i : (i - 1L);
} else {
inclusiveUpperPoint = Long.MAX_VALUE;
}
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
return null;
final FieldCache.Longs values = FieldCache.DEFAULT.getLongs(context.reader(), field, (FieldCache.LongParser) parser, false);
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
@ -422,7 +422,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (lowerVal != null) {
float f = lowerVal.floatValue();
if (!includeUpper && f > 0.0f && Float.isInfinite(f))
return DocIdSet.EMPTY_DOCIDSET;
return null;
int i = NumericUtils.floatToSortableInt(f);
inclusiveLowerPoint = NumericUtils.sortableIntToFloat( includeLower ? i : (i + 1) );
} else {
@ -431,7 +431,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (upperVal != null) {
float f = upperVal.floatValue();
if (!includeUpper && f < 0.0f && Float.isInfinite(f))
return DocIdSet.EMPTY_DOCIDSET;
return null;
int i = NumericUtils.floatToSortableInt(f);
inclusiveUpperPoint = NumericUtils.sortableIntToFloat( includeUpper ? i : (i - 1) );
} else {
@ -439,7 +439,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
return null;
final FieldCache.Floats values = FieldCache.DEFAULT.getFloats(context.reader(), field, (FieldCache.FloatParser) parser, false);
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
@ -477,7 +477,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (lowerVal != null) {
double f = lowerVal.doubleValue();
if (!includeUpper && f > 0.0 && Double.isInfinite(f))
return DocIdSet.EMPTY_DOCIDSET;
return null;
long i = NumericUtils.doubleToSortableLong(f);
inclusiveLowerPoint = NumericUtils.sortableLongToDouble( includeLower ? i : (i + 1L) );
} else {
@ -486,7 +486,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
if (upperVal != null) {
double f = upperVal.doubleValue();
if (!includeUpper && f < 0.0 && Double.isInfinite(f))
return DocIdSet.EMPTY_DOCIDSET;
return null;
long i = NumericUtils.doubleToSortableLong(f);
inclusiveUpperPoint = NumericUtils.sortableLongToDouble( includeUpper ? i : (i - 1L) );
} else {
@ -494,7 +494,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
if (inclusiveLowerPoint > inclusiveUpperPoint)
return DocIdSet.EMPTY_DOCIDSET;
return null;
final FieldCache.Doubles values = FieldCache.DEFAULT.getDoubles(context.reader(), field, (FieldCache.DoubleParser) parser, false);
// ignore deleted docs if range doesn't contain 0

View File

@ -146,7 +146,7 @@ public final class FieldCacheRewriteMethod extends MultiTermQuery.RewriteMethod
}
} while (termsEnum.next() != null);
} else {
return DocIdSet.EMPTY_DOCIDSET;
return null;
}
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.lucene.index.AtomicReader; // javadocs
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader; // javadocs
import org.apache.lucene.util.Bits;
/**
@ -53,8 +52,9 @@ public abstract class Filter {
* but possibly filtering other documents)
*
* @return a DocIdSet that provides the documents which should be permitted or
* prohibited in search results. <b>NOTE:</b> null can be returned if
* no documents will be accepted by this Filter.
* prohibited in search results. <b>NOTE:</b> <code>null</code> should be returned if
* the filter doesn't accept any documents otherwise internal optimization might not apply
* in the case an <i>empty</i> {@link DocIdSet} is returned.
*/
public abstract DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException;
}

View File

@ -102,9 +102,9 @@ public class FilteredQuery extends Query {
Explanation inner = weight.explain (ir, i);
Filter f = FilteredQuery.this.filter;
DocIdSet docIdSet = f.getDocIdSet(ir, ir.reader().getLiveDocs());
DocIdSetIterator docIdSetIterator = docIdSet == null ? DocIdSet.EMPTY_DOCIDSET.iterator() : docIdSet.iterator();
DocIdSetIterator docIdSetIterator = docIdSet == null ? DocIdSetIterator.empty() : docIdSet.iterator();
if (docIdSetIterator == null) {
docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator();
docIdSetIterator = DocIdSetIterator.empty();
}
if (docIdSetIterator.advance(i) == i) {
return inner;

View File

@ -88,13 +88,13 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
final Fields fields = reader.fields();
if (fields == null) {
// reader has no fields
return DocIdSet.EMPTY_DOCIDSET;
return null;
}
final Terms terms = fields.terms(query.field);
if (terms == null) {
// field does not exist
return DocIdSet.EMPTY_DOCIDSET;
return null;
}
final TermsEnum termsEnum = query.getTermsEnum(terms);
@ -116,7 +116,7 @@ public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filte
return bitSet;
} else {
return DocIdSet.EMPTY_DOCIDSET;
return null;
}
}
}

View File

@ -80,7 +80,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
// the caching filter should return the empty set constant
assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(context, context.reader().getLiveDocs()));
assertNull(cacher.getDocIdSet(context, context.reader().getLiveDocs()));
reader.close();
dir.close();
@ -108,7 +108,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
// the caching filter should return the empty set constant
assertSame(DocIdSet.EMPTY_DOCIDSET, cacher.getDocIdSet(context, context.reader().getLiveDocs()));
assertNull(cacher.getDocIdSet(context, context.reader().getLiveDocs()));
reader.close();
dir.close();
@ -120,13 +120,20 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
final DocIdSet originalSet = filter.getDocIdSet(context, context.reader().getLiveDocs());
final DocIdSet cachedSet = cacher.getDocIdSet(context, context.reader().getLiveDocs());
assertTrue(cachedSet.isCacheable());
assertEquals(shouldCacheable, originalSet.isCacheable());
//System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName());
if (originalSet.isCacheable()) {
assertEquals("Cached DocIdSet must be of same class like uncached, if cacheable", originalSet.getClass(), cachedSet.getClass());
if (originalSet == null) {
assertNull(cachedSet);
}
if (cachedSet == null) {
assertTrue(originalSet == null || originalSet.iterator() == null);
} else {
assertTrue("Cached DocIdSet must be an FixedBitSet if the original one was not cacheable", cachedSet instanceof FixedBitSet || cachedSet == DocIdSet.EMPTY_DOCIDSET);
assertTrue(cachedSet.isCacheable());
assertEquals(shouldCacheable, originalSet.isCacheable());
//System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName());
if (originalSet.isCacheable()) {
assertEquals("Cached DocIdSet must be of same class like uncached, if cacheable", originalSet.getClass(), cachedSet.getClass());
} else {
assertTrue("Cached DocIdSet must be an FixedBitSet if the original one was not cacheable", cachedSet instanceof FixedBitSet || cachedSet == null);
}
}
}

View File

@ -200,13 +200,11 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
public void testInverseRange() throws Exception {
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(reader).getContext();
NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange("field8", 8, 1000, -1000, true, true);
assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(context, context.reader().getLiveDocs()));
assertNull("A inverse range should return the null instance", f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newIntRange("field8", 8, Integer.MAX_VALUE, null, false, false);
assertSame("A exclusive range starting with Integer.MAX_VALUE should return the EMPTY_DOCIDSET instance",
DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(context, context.reader().getLiveDocs()));
assertNull("A exclusive range starting with Integer.MAX_VALUE should return the null instance", f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newIntRange("field8", 8, null, Integer.MIN_VALUE, false, false);
assertSame("A exclusive range ending with Integer.MIN_VALUE should return the EMPTY_DOCIDSET instance",
DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(context, context.reader().getLiveDocs()));
assertNull("A exclusive range ending with Integer.MIN_VALUE should return the null instance", f.getDocIdSet(context, context.reader().getLiveDocs()));
}
@Test

View File

@ -214,14 +214,14 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
public void testInverseRange() throws Exception {
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()).getContext();
NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange("field8", 8, 1000L, -1000L, true, true);
assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET,
assertNull("A inverse range should return the null instance",
f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newLongRange("field8", 8, Long.MAX_VALUE, null, false, false);
assertSame("A exclusive range starting with Long.MAX_VALUE should return the EMPTY_DOCIDSET instance",
DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(context, context.reader().getLiveDocs()));
assertNull("A exclusive range starting with Long.MAX_VALUE should return the null instance",
f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newLongRange("field8", 8, null, Long.MIN_VALUE, false, false);
assertSame("A exclusive range ending with Long.MIN_VALUE should return the EMPTY_DOCIDSET instance",
DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(context, context.reader().getLiveDocs()));
assertNull("A exclusive range ending with Long.MIN_VALUE should return the null instance",
f.getDocIdSet(context, context.reader().getLiveDocs()));
}
@Test

View File

@ -93,7 +93,7 @@ public abstract class ToParentBlockJoinFieldComparator extends FieldComparator<O
}
private static boolean isEmpty(DocIdSet set) {
return set == null || set == DocIdSet.EMPTY_DOCIDSET;
return set == null;
}
private static FixedBitSet toFixedBitSet(DocIdSetIterator iterator, int numBits) throws IOException {

View File

@ -181,7 +181,7 @@ public class ToParentBlockJoinQuery extends Query {
// acceptDocs when we score:
final DocIdSet parents = parentsFilter.getDocIdSet(readerContext, null);
if (parents == null || parents == DocIdSet.EMPTY_DOCIDSET) {
if (parents == null) {
// No matches
return null;
}

View File

@ -67,7 +67,7 @@ public class BooleanFilter extends Filter implements Iterable<FilterClause> {
}
}
if (hasShouldClauses && res == null)
return DocIdSet.EMPTY_DOCIDSET;
return null;
for (final FilterClause fc : clauses) {
if (fc.getOccur() == Occur.MUST_NOT) {
@ -87,7 +87,7 @@ public class BooleanFilter extends Filter implements Iterable<FilterClause> {
if (fc.getOccur() == Occur.MUST) {
final DocIdSetIterator disi = getDISI(fc.getFilter(), context);
if (disi == null) {
return DocIdSet.EMPTY_DOCIDSET; // no documents can match
return null; // no documents can match
}
if (res == null) {
res = new FixedBitSet(reader.maxDoc());
@ -98,14 +98,14 @@ public class BooleanFilter extends Filter implements Iterable<FilterClause> {
}
}
return res != null ? BitsFilteredDocIdSet.wrap(res, acceptDocs) : DocIdSet.EMPTY_DOCIDSET;
return BitsFilteredDocIdSet.wrap(res, acceptDocs);
}
private static DocIdSetIterator getDISI(Filter filter, AtomicReaderContext context)
throws IOException {
// we dont pass acceptDocs, we will filter at the end using an additional filter
final DocIdSet set = filter.getDocIdSet(context, null);
return (set == null || set == DocIdSet.EMPTY_DOCIDSET) ? null : set.iterator();
return set == null ? null : set.iterator();
}
/**

View File

@ -116,11 +116,11 @@ public class ChainedFilter extends Filter {
// we dont pass acceptDocs, we will filter at the end using an additional filter
DocIdSet docIdSet = filter.getDocIdSet(context, null);
if (docIdSet == null) {
return DocIdSet.EMPTY_DOCIDSET.iterator();
return DocIdSetIterator.empty();
} else {
DocIdSetIterator iter = docIdSet.iterator();
if (iter == null) {
return DocIdSet.EMPTY_DOCIDSET.iterator();
return DocIdSetIterator.empty();
} else {
return iter;
}
@ -223,11 +223,11 @@ public class ChainedFilter extends Filter {
} else {
DocIdSetIterator disi;
if (dis == null) {
disi = DocIdSet.EMPTY_DOCIDSET.iterator();
disi = DocIdSetIterator.empty();
} else {
disi = dis.iterator();
if (disi == null) {
disi = DocIdSet.EMPTY_DOCIDSET.iterator();
disi = DocIdSetIterator.empty();
}
}

View File

@ -128,11 +128,13 @@ public class BooleanFilterTest extends LuceneTestCase {
private void tstFilterCard(String mes, int expected, Filter filt)
throws Exception {
// BooleanFilter never returns null DIS or null DISI!
DocIdSetIterator disi = filt.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator();
final DocIdSet docIdSet = filt.getDocIdSet(reader.getContext(), reader.getLiveDocs());
int actual = 0;
while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
actual++;
if (docIdSet != null) {
DocIdSetIterator disi = docIdSet.iterator();
while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
actual++;
}
}
assertEquals(mes, expected, actual);
}

View File

@ -252,8 +252,7 @@ class JoinQuery extends Query {
// Although this set only includes live docs, other filters can be pushed down to queries.
DocIdSet readerSet = filter.getDocIdSet(context, acceptDocs);
if (readerSet == null) readerSet=DocIdSet.EMPTY_DOCIDSET;
return new JoinScorer(this, readerSet.iterator(), getBoost());
return new JoinScorer(this, readerSet == null ? DocIdSetIterator.empty() : readerSet.iterator(), getBoost());
}
@ -519,7 +518,7 @@ class JoinQuery extends Query {
public JoinScorer(Weight w, DocIdSetIterator iter, float score) throws IOException {
super(w);
this.score = score;
this.iter = iter==null ? DocIdSet.EMPTY_DOCIDSET.iterator() : iter;
this.iter = iter==null ? DocIdSetIterator.empty() : iter;
}
@Override

View File

@ -161,11 +161,11 @@ public class SolrConstantScoreQuery extends ConstantScoreQuery implements Extend
this.acceptDocs = acceptDocs;
DocIdSet docIdSet = filter instanceof SolrFilter ? ((SolrFilter)filter).getDocIdSet(w.context, context, acceptDocs) : filter.getDocIdSet(context, acceptDocs);
if (docIdSet == null) {
docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator();
docIdSetIterator = DocIdSetIterator.empty();
} else {
DocIdSetIterator iter = docIdSet.iterator();
if (iter == null) {
docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator();
docIdSetIterator = DocIdSetIterator.empty();
} else {
docIdSetIterator = iter;
}