LUCENE-6300: Remove multi-term filters.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1662682 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Adrien Grand 2015-02-27 14:12:02 +00:00
parent e98b031bda
commit 544f5bf1e7
74 changed files with 570 additions and 1811 deletions

View File

@ -172,6 +172,10 @@ API Changes
* LUCENE-6286: Removed IndexSearcher methods that take a Filter object.
A BooleanQuery with a filter clause must be used instead. (Adrien Grand)
* LUCENE-6300: PrefixFilter, TermRangeFilter and NumericRangeFilter have been
removed. Use PrefixQuery, TermRangeQuery and NumericRangeQuery instead.
(Adrien Grand)
Other
* LUCENE-6248: Remove unused odd constants from StandardSyntaxParser.jj

View File

@ -72,7 +72,7 @@ public class EnwikiQueryMaker extends AbstractQueryMaker implements
private static Query[] getPrebuiltQueries(String field) {
WildcardQuery wcq = new WildcardQuery(new Term(field, "fo*"));
wcq .setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
wcq .setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
// be wary of unanalyzed text
return new Query[] {
new SpanFirstQuery(new SpanTermQuery(new Term(field, "ford")), 5),

View File

@ -27,7 +27,6 @@ import org.apache.lucene.document.DoubleField; // for javadocs
import org.apache.lucene.document.FloatField; // for javadocs
import org.apache.lucene.document.IntField; // for javadocs
import org.apache.lucene.document.LongField; // for javadocs
import org.apache.lucene.search.NumericRangeFilter; // for javadocs
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeFactory;
@ -40,7 +39,7 @@ import org.apache.lucene.util.NumericUtils;
/**
* <b>Expert:</b> This class provides a {@link TokenStream}
* for indexing numeric values that can be used by {@link
* NumericRangeQuery} or {@link NumericRangeFilter}.
* NumericRangeQuery}.
*
* <p>Note that for simple usage, {@link IntField}, {@link
* LongField}, {@link FloatField} or {@link DoubleField} is

View File

@ -19,7 +19,6 @@ package org.apache.lucene.document;
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.NumericRangeFilter; // javadocs
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.util.NumericUtils;
@ -52,8 +51,8 @@ import org.apache.lucene.util.NumericUtils;
* FloatField}.
*
* <p>To perform range querying or filtering against a
* <code>DoubleField</code>, use {@link NumericRangeQuery} or {@link
* NumericRangeFilter}. To sort according to a
* <code>DoubleField</code>, use {@link NumericRangeQuery}.
* To sort according to a
* <code>DoubleField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>DoubleField</code>
* values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
@ -86,7 +85,7 @@ import org.apache.lucene.util.NumericUtils;
* FieldType#setNumericPrecisionStep} method if you'd
* like to change the value. Note that you must also
* specify a congruent value when creating {@link
* NumericRangeQuery} or {@link NumericRangeFilter}.
* NumericRangeQuery}.
* For low cardinality fields larger precision steps are good.
* If the cardinality is &lt; 100, it is fair
* to use {@link Integer#MAX_VALUE}, which produces one

View File

@ -19,7 +19,6 @@ package org.apache.lucene.document;
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.NumericRangeFilter; // javadocs
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.util.NumericUtils;
@ -52,8 +51,8 @@ import org.apache.lucene.util.NumericUtils;
* DoubleField}.
*
* <p>To perform range querying or filtering against a
* <code>FloatField</code>, use {@link NumericRangeQuery} or {@link
* NumericRangeFilter}. To sort according to a
* <code>FloatField</code>, use {@link NumericRangeQuery}.
* To sort according to a
* <code>FloatField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>FloatField</code>
* values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
@ -86,7 +85,7 @@ import org.apache.lucene.util.NumericUtils;
* FieldType#setNumericPrecisionStep} method if you'd
* like to change the value. Note that you must also
* specify a congruent value when creating {@link
* NumericRangeQuery} or {@link NumericRangeFilter}.
* NumericRangeQuery}.
* For low cardinality fields larger precision steps are good.
* If the cardinality is &lt; 100, it is fair
* to use {@link Integer#MAX_VALUE}, which produces one

View File

@ -19,7 +19,6 @@ package org.apache.lucene.document;
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.NumericRangeFilter; // javadocs
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.util.NumericUtils;
@ -52,8 +51,8 @@ import org.apache.lucene.util.NumericUtils;
* DoubleField}.
*
* <p>To perform range querying or filtering against a
* <code>IntField</code>, use {@link NumericRangeQuery} or {@link
* NumericRangeFilter}. To sort according to a
* <code>IntField</code>, use {@link NumericRangeQuery}.
* To sort according to a
* <code>IntField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#INT}. <code>IntField</code>
* values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
@ -86,7 +85,7 @@ import org.apache.lucene.util.NumericUtils;
* FieldType#setNumericPrecisionStep} method if you'd
* like to change the value. Note that you must also
* specify a congruent value when creating {@link
* NumericRangeQuery} or {@link NumericRangeFilter}.
* NumericRangeQuery}.
* For low cardinality fields larger precision steps are good.
* If the cardinality is &lt; 100, it is fair
* to use {@link Integer#MAX_VALUE}, which produces one

View File

@ -19,7 +19,6 @@ package org.apache.lucene.document;
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.NumericRangeFilter; // javadocs
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.util.NumericUtils;
@ -62,8 +61,8 @@ import org.apache.lucene.util.NumericUtils;
* <code>long</code> value.
*
* <p>To perform range querying or filtering against a
* <code>LongField</code>, use {@link NumericRangeQuery} or {@link
* NumericRangeFilter}. To sort according to a
* <code>LongField</code>, use {@link NumericRangeQuery}.
* To sort according to a
* <code>LongField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LongField</code>
* values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.
@ -96,7 +95,7 @@ import org.apache.lucene.util.NumericUtils;
* FieldType#setNumericPrecisionStep} method if you'd
* like to change the value. Note that you must also
* specify a congruent value when creating {@link
* NumericRangeQuery} or {@link NumericRangeFilter}.
* NumericRangeQuery}.
* For low cardinality fields larger precision steps are good.
* If the cardinality is &lt; 100, it is fair
* to use {@link Integer#MAX_VALUE}, which produces one

View File

@ -24,7 +24,7 @@ import org.apache.lucene.index.Terms; // javadocs only
/** Add this {@link Attribute} to a {@link TermsEnum} returned by {@link MultiTermQuery#getTermsEnum(Terms,AttributeSource)}
* and update the boost on each returned term. This enables to control the boost factor
* for each matching term in {@link MultiTermQuery#SCORING_BOOLEAN_QUERY_REWRITE} or
* for each matching term in {@link MultiTermQuery#SCORING_BOOLEAN_REWRITE} or
* {@link TopTermsRewrite} mode.
* {@link FuzzyQuery} is using this to take the edit distance into account.
* <p><b>Please note:</b> This attribute is intended to be added only by the TermsEnum

View File

@ -1,180 +0,0 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.LongBitSet;
/**
* Rewrites MultiTermQueries into a filter, using DocTermOrds for term enumeration.
* <p>
* This can be used to perform these queries against an unindexed docvalues field.
* @lucene.experimental
*/
public final class DocTermOrdsRewriteMethod extends MultiTermQuery.RewriteMethod {
@Override
public Query rewrite(IndexReader reader, MultiTermQuery query) {
Query result = new ConstantScoreQuery(new MultiTermQueryDocTermOrdsWrapperFilter(query));
result.setBoost(query.getBoost());
return result;
}
static class MultiTermQueryDocTermOrdsWrapperFilter extends Filter {
protected final MultiTermQuery query;
/**
* Wrap a {@link MultiTermQuery} as a Filter.
*/
protected MultiTermQueryDocTermOrdsWrapperFilter(MultiTermQuery query) {
this.query = query;
}
@Override
public String toString(String field) {
// query.toString should be ok for the filter, too, if the query boost is 1.0f
return query.toString(field);
}
@Override
public final boolean equals(final Object o) {
if (o==this) return true;
if (o==null) return false;
if (this.getClass().equals(o.getClass())) {
return this.query.equals( ((MultiTermQueryDocTermOrdsWrapperFilter)o).query );
}
return false;
}
@Override
public final int hashCode() {
return query.hashCode();
}
/** Returns the field name for this query */
public final String getField() { return query.getField(); }
/**
* Returns a DocIdSet with documents that should be permitted in search
* results.
*/
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, final Bits acceptDocs) throws IOException {
final SortedSetDocValues docTermOrds = DocValues.getSortedSet(context.reader(), query.field);
// Cannot use FixedBitSet because we require long index (ord):
final LongBitSet termSet = new LongBitSet(docTermOrds.getValueCount());
TermsEnum termsEnum = query.getTermsEnum(new Terms() {
@Override
public TermsEnum iterator(TermsEnum reuse) {
return docTermOrds.termsEnum();
}
@Override
public long getSumTotalTermFreq() {
return -1;
}
@Override
public long getSumDocFreq() {
return -1;
}
@Override
public int getDocCount() {
return -1;
}
@Override
public long size() {
return -1;
}
@Override
public boolean hasFreqs() {
return false;
}
@Override
public boolean hasOffsets() {
return false;
}
@Override
public boolean hasPositions() {
return false;
}
@Override
public boolean hasPayloads() {
return false;
}
});
assert termsEnum != null;
if (termsEnum.next() != null) {
// fill into a bitset
do {
termSet.set(termsEnum.ord());
} while (termsEnum.next() != null);
} else {
return null;
}
return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
@Override
protected final boolean matchDoc(int doc) throws ArrayIndexOutOfBoundsException {
docTermOrds.setDocument(doc);
long ord;
// TODO: we could track max bit set and early terminate (since they come in sorted order)
while ((ord = docTermOrds.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
if (termSet.get(ord)) {
return true;
}
}
return false;
}
};
}
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
return true;
}
@Override
public int hashCode() {
return 877;
}
}

View File

@ -18,11 +18,12 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
import java.util.Objects;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Bits;
@ -38,19 +39,19 @@ public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
@Override
public Query rewrite(IndexReader reader, MultiTermQuery query) {
Query result = new ConstantScoreQuery(new MultiTermQueryDocValuesWrapperFilter(query));
Query result = new ConstantScoreQuery(new MultiTermQueryDocValuesWrapper(query));
result.setBoost(query.getBoost());
return result;
}
static class MultiTermQueryDocValuesWrapperFilter extends Filter {
static class MultiTermQueryDocValuesWrapper extends Query {
protected final MultiTermQuery query;
/**
* Wrap a {@link MultiTermQuery} as a Filter.
*/
protected MultiTermQueryDocValuesWrapperFilter(MultiTermQuery query) {
protected MultiTermQueryDocValuesWrapper(MultiTermQuery query) {
this.query = query;
}
@ -65,28 +66,26 @@ public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
if (o==this) return true;
if (o==null) return false;
if (this.getClass().equals(o.getClass())) {
return this.query.equals( ((MultiTermQueryDocValuesWrapperFilter)o).query );
final MultiTermQueryDocValuesWrapper that = (MultiTermQueryDocValuesWrapper) o;
return this.query.equals(that.query) && this.getBoost() == that.getBoost();
}
return false;
}
@Override
public final int hashCode() {
return query.hashCode();
return Objects.hash(getClass(), query, getBoost());
}
/** Returns the field name for this query */
public final String getField() { return query.getField(); }
/**
* Returns a DocIdSet with documents that should be permitted in search
* results.
*/
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, final Bits acceptDocs) throws IOException {
final SortedDocValues fcsi = DocValues.getSorted(context.reader(), query.field);
// Cannot use FixedBitSet because we require long index (ord):
final LongBitSet termSet = new LongBitSet(fcsi.getValueCount());
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
Scorer scorer(LeafReaderContext context, Bits acceptDocs, float score) throws IOException {
final SortedSetDocValues fcsi = DocValues.getSortedSet(context.reader(), query.field);
TermsEnum termsEnum = query.getTermsEnum(new Terms() {
@Override
@ -136,26 +135,80 @@ public final class DocValuesRewriteMethod extends MultiTermQuery.RewriteMethod {
});
assert termsEnum != null;
if (termsEnum.next() != null) {
if (termsEnum.next() == null) {
// no matching terms
return null;
}
// fill into a bitset
// Cannot use FixedBitSet because we require long index (ord):
final LongBitSet termSet = new LongBitSet(fcsi.getValueCount());
do {
long ord = termsEnum.ord();
if (ord >= 0) {
termSet.set(ord);
}
} while (termsEnum.next() != null);
} else {
return null;
}
return new DocValuesDocIdSet(context.reader().maxDoc(), acceptDocs) {
final DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
final TwoPhaseIterator twoPhaseIterator = new TwoPhaseIterator() {
@Override
protected final boolean matchDoc(int doc) throws ArrayIndexOutOfBoundsException {
int ord = fcsi.getOrd(doc);
if (ord == -1) {
public DocIdSetIterator approximation() {
return approximation;
}
@Override
public boolean matches() throws IOException {
final int doc = approximation.docID();
if (acceptDocs != null && acceptDocs.get(doc) == false) {
return false;
}
return termSet.get(ord);
fcsi.setDocument(doc);
for (long ord = fcsi.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = fcsi.nextOrd()) {
if (termSet.get(ord)) {
return true;
}
}
return false;
}
};
final DocIdSetIterator disi = TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator);
return new Scorer(this) {
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
return twoPhaseIterator;
}
@Override
public float score() throws IOException {
return score;
}
@Override
public int freq() throws IOException {
return 1;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
return disi.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return disi.advance(target);
}
@Override
public long cost() {
return disi.cost();
}
};
}
};
}

View File

@ -39,17 +39,17 @@ import org.apache.lucene.util.AttributeSource;
* matched.
*
* <p><b>NOTE</b>: if {@link #setRewriteMethod} is either
* {@link #CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE} or {@link
* #SCORING_BOOLEAN_QUERY_REWRITE}, you may encounter a
* {@link #CONSTANT_SCORE_BOOLEAN_REWRITE} or {@link
* #SCORING_BOOLEAN_REWRITE}, you may encounter a
* {@link BooleanQuery.TooManyClauses} exception during
* searching, which happens when the number of terms to be
* searched exceeds {@link
* BooleanQuery#getMaxClauseCount()}. Setting {@link
* #setRewriteMethod} to {@link #CONSTANT_SCORE_FILTER_REWRITE}
* #setRewriteMethod} to {@link #CONSTANT_SCORE_REWRITE}
* prevents this.
*
* <p>The recommended rewrite method is {@link
* #CONSTANT_SCORE_FILTER_REWRITE}: it doesn't spend CPU
* #CONSTANT_SCORE_REWRITE}: it doesn't spend CPU
* computing unhelpful scores, and is the most
* performant rewrite method given the query. If you
* need scoring (like {@link FuzzyQuery}, use
@ -58,12 +58,12 @@ import org.apache.lucene.util.AttributeSource;
* and not hit this limitation.
*
* Note that org.apache.lucene.queryparser.classic.QueryParser produces
* MultiTermQueries using {@link #CONSTANT_SCORE_FILTER_REWRITE}
* MultiTermQueries using {@link #CONSTANT_SCORE_REWRITE}
* by default.
*/
public abstract class MultiTermQuery extends Query {
protected final String field;
protected RewriteMethod rewriteMethod = CONSTANT_SCORE_FILTER_REWRITE;
protected RewriteMethod rewriteMethod = CONSTANT_SCORE_REWRITE;
/** Abstract class that defines how the query is rewritten. */
public static abstract class RewriteMethod {
@ -89,10 +89,10 @@ public abstract class MultiTermQuery extends Query {
* exception.
*
* @see #setRewriteMethod */
public static final RewriteMethod CONSTANT_SCORE_FILTER_REWRITE = new RewriteMethod() {
public static final RewriteMethod CONSTANT_SCORE_REWRITE = new RewriteMethod() {
@Override
public Query rewrite(IndexReader reader, MultiTermQuery query) {
Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter<>(query));
Query result = new MultiTermQueryConstantScoreWrapper<>(query);
result.setBoost(query.getBoost());
return result;
}
@ -104,16 +104,16 @@ public abstract class MultiTermQuery extends Query {
* query. Note that typically such scores are
* meaningless to the user, and require non-trivial CPU
* to compute, so it's almost always better to use {@link
* #CONSTANT_SCORE_FILTER_REWRITE} instead.
* #CONSTANT_SCORE_REWRITE} instead.
*
* <p><b>NOTE</b>: This rewrite method will hit {@link
* BooleanQuery.TooManyClauses} if the number of terms
* exceeds {@link BooleanQuery#getMaxClauseCount}.
*
* @see #setRewriteMethod */
public final static RewriteMethod SCORING_BOOLEAN_QUERY_REWRITE = ScoringRewrite.SCORING_BOOLEAN_QUERY_REWRITE;
public final static RewriteMethod SCORING_BOOLEAN_REWRITE = ScoringRewrite.SCORING_BOOLEAN_REWRITE;
/** Like {@link #SCORING_BOOLEAN_QUERY_REWRITE} except
/** Like {@link #SCORING_BOOLEAN_REWRITE} except
* scores are not computed. Instead, each matching
* document receives a constant score equal to the
* query's boost.
@ -123,7 +123,7 @@ public abstract class MultiTermQuery extends Query {
* exceeds {@link BooleanQuery#getMaxClauseCount}.
*
* @see #setRewriteMethod */
public final static RewriteMethod CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE = ScoringRewrite.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE;
public final static RewriteMethod CONSTANT_SCORE_BOOLEAN_REWRITE = ScoringRewrite.CONSTANT_SCORE_BOOLEAN_REWRITE;
/**
* A rewrite method that first translates each term into

View File

@ -0,0 +1,150 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Objects;
import java.util.Set;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.Bits;
/**
* A wrapper for {@link MultiTermQuery}, that exposes its
* functionality as a {@link Filter}.
* <P>
* <code>MultiTermQueryWrapperFilter</code> is not designed to
* be used by itself. Normally you subclass it to provide a Filter
* counterpart for a {@link MultiTermQuery} subclass.
* <P>
* This class also provides the functionality behind
* {@link MultiTermQuery#CONSTANT_SCORE_REWRITE};
* this is why it is not abstract.
*/
final class MultiTermQueryConstantScoreWrapper<Q extends MultiTermQuery> extends Query {
protected final Q query;
/**
* Wrap a {@link MultiTermQuery} as a Filter.
*/
protected MultiTermQueryConstantScoreWrapper(Q query) {
this.query = query;
}
@Override
public String toString(String field) {
// query.toString should be ok for the filter, too, if the query boost is 1.0f
return query.toString(field);
}
@Override
@SuppressWarnings({"rawtypes"})
public final boolean equals(final Object o) {
if (o==this) return true;
if (o==null) return false;
if (this.getClass().equals(o.getClass())) {
final MultiTermQueryConstantScoreWrapper that = (MultiTermQueryConstantScoreWrapper) o;
return this.query.equals(that.query) && this.getBoost() == that.getBoost();
}
return false;
}
@Override
public final int hashCode() {
return Objects.hash(getClass(), query, getBoost());
}
@Override
public void extractTerms(Set<Term> terms) {
// no-op
}
/** Returns the field name for this query */
public final String getField() { return query.getField(); }
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
Scorer scorer(LeafReaderContext context, Bits acceptDocs, float score) throws IOException {
final Terms terms = context.reader().terms(query.field);
if (terms == null) {
// field does not exist
return null;
}
final TermsEnum termsEnum = query.getTermsEnum(terms);
assert termsEnum != null;
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
PostingsEnum docs = null;
while (termsEnum.next() != null) {
docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.NONE);
builder.or(docs);
}
final BitDocIdSet set = builder.build();
if (set == null) {
return null;
}
final DocIdSetIterator disi = set.iterator();
if (disi == null) {
return null;
}
return new Scorer(this) {
@Override
public float score() throws IOException {
return score;
}
@Override
public int freq() throws IOException {
return 1;
}
@Override
public int docID() {
return disi.docID();
}
@Override
public int nextDoc() throws IOException {
return disi.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return disi.advance(target);
}
@Override
public long cost() {
return disi.cost();
}
};
}
};
}
}

View File

@ -1,102 +0,0 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.Bits;
/**
* A wrapper for {@link MultiTermQuery}, that exposes its
* functionality as a {@link Filter}.
* <P>
* <code>MultiTermQueryWrapperFilter</code> is not designed to
* be used by itself. Normally you subclass it to provide a Filter
* counterpart for a {@link MultiTermQuery} subclass.
* <P>
* For example, {@link TermRangeFilter} and {@link PrefixFilter} extend
* <code>MultiTermQueryWrapperFilter</code>.
* This class also provides the functionality behind
* {@link MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE};
* this is why it is not abstract.
*/
public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filter {
protected final Q query;
/**
* Wrap a {@link MultiTermQuery} as a Filter.
*/
protected MultiTermQueryWrapperFilter(Q query) {
this.query = query;
}
@Override
public String toString(String field) {
// query.toString should be ok for the filter, too, if the query boost is 1.0f
return query.toString(field);
}
@Override
@SuppressWarnings({"unchecked","rawtypes"})
public final boolean equals(final Object o) {
if (o==this) return true;
if (o==null) return false;
if (this.getClass().equals(o.getClass())) {
return this.query.equals( ((MultiTermQueryWrapperFilter)o).query );
}
return false;
}
@Override
public final int hashCode() {
return query.hashCode();
}
/** Returns the field name for this query */
public final String getField() { return query.getField(); }
/**
* Returns a DocIdSet with documents that should be permitted in search
* results.
*/
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
final Terms terms = context.reader().terms(query.field);
if (terms == null) {
// field does not exist
return null;
}
final TermsEnum termsEnum = query.getTermsEnum(terms);
assert termsEnum != null;
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
PostingsEnum docs = null;
while (termsEnum.next() != null) {
docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.NONE);
builder.or(docs);
}
return builder.build();
}
}

View File

@ -1,197 +0,0 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
import org.apache.lucene.document.DoubleField; // for javadocs
import org.apache.lucene.document.FloatField; // for javadocs
import org.apache.lucene.document.IntField; // for javadocs
import org.apache.lucene.document.LongField; // for javadocs
import org.apache.lucene.util.NumericUtils; // for javadocs
/**
* A {@link Filter} that only accepts numeric values within
* a specified range. To use this, you must first index the
* numeric values using {@link IntField}, {@link
* FloatField}, {@link LongField} or {@link DoubleField} (expert: {@link
* NumericTokenStream}).
*
* <p>You create a new NumericRangeFilter with the static
* factory methods, eg:
*
* <pre class="prettyprint">
* Filter f = NumericRangeFilter.newFloatRange("weight", 0.03f, 0.10f, true, true);
* </pre>
*
* accepts all documents whose float valued "weight" field
* ranges from 0.03 to 0.10, inclusive.
* See {@link NumericRangeQuery} for details on how Lucene
* indexes and searches numeric valued fields.
*
* @since 2.9
**/
public final class NumericRangeFilter<T extends Number> extends MultiTermQueryWrapperFilter<NumericRangeQuery<T>> {
private NumericRangeFilter(final NumericRangeQuery<T> query) {
super(query);
}
/**
* Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>long</code>
* range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeFilter<Long> newLongRange(final String field, final int precisionStep,
Long min, Long max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeFilter<>(
NumericRangeQuery.newLongRange(field, precisionStep, min, max, minInclusive, maxInclusive)
);
}
/**
* Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>long</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (16).
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeFilter<Long> newLongRange(final String field,
Long min, Long max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeFilter<>(
NumericRangeQuery.newLongRange(field, min, max, minInclusive, maxInclusive)
);
}
/**
* Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>int</code>
* range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeFilter<Integer> newIntRange(final String field, final int precisionStep,
Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeFilter<>(
NumericRangeQuery.newIntRange(field, precisionStep, min, max, minInclusive, maxInclusive)
);
}
/**
* Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>int</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeFilter<Integer> newIntRange(final String field,
Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeFilter<>(
NumericRangeQuery.newIntRange(field, min, max, minInclusive, maxInclusive)
);
}
/**
* Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>double</code>
* range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>.
* {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
* with {@code min == max == Double.NaN}. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeFilter<Double> newDoubleRange(final String field, final int precisionStep,
Double min, Double max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeFilter<>(
NumericRangeQuery.newDoubleRange(field, precisionStep, min, max, minInclusive, maxInclusive)
);
}
/**
* Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>double</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (16).
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>.
* {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
* with {@code min == max == Double.NaN}. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeFilter<Double> newDoubleRange(final String field,
Double min, Double max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeFilter<>(
NumericRangeQuery.newDoubleRange(field, min, max, minInclusive, maxInclusive)
);
}
/**
* Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>float</code>
* range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>.
* {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
* with {@code min == max == Float.NaN}. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeFilter<Float> newFloatRange(final String field, final int precisionStep,
Float min, Float max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeFilter<>(
NumericRangeQuery.newFloatRange(field, precisionStep, min, max, minInclusive, maxInclusive)
);
}
/**
* Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>float</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>.
* {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
* with {@code min == max == Float.NaN}. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeFilter<Float> newFloatRange(final String field,
Float min, Float max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeFilter<>(
NumericRangeQuery.newFloatRange(field, min, max, minInclusive, maxInclusive)
);
}
/** Returns <code>true</code> if the lower endpoint is inclusive */
public boolean includesMin() { return query.includesMin(); }
/** Returns <code>true</code> if the upper endpoint is inclusive */
public boolean includesMax() { return query.includesMax(); }
/** Returns the lower value of this range filter */
public T getMin() { return query.getMin(); }
/** Returns the upper value of this range filter */
public T getMax() { return query.getMax(); }
/** Returns the precision step. */
public int getPrecisionStep() { return query.getPrecisionStep(); }
}

View File

@ -41,9 +41,7 @@ import org.apache.lucene.index.Term; // for javadocs
* numeric values using {@link IntField}, {@link
* FloatField}, {@link LongField} or {@link DoubleField} (expert: {@link
* NumericTokenStream}). If your terms are instead textual,
* you should use {@link TermRangeQuery}. {@link
* NumericRangeFilter} is the filter equivalent of this
* query.</p>
* you should use {@link TermRangeQuery}.</p>
*
* <p>You create a new NumericRangeQuery with the static
* factory methods, eg:
@ -72,7 +70,7 @@ import org.apache.lucene.index.Term; // for javadocs
* details.
*
* <p>This query defaults to {@linkplain
* MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}.
* MultiTermQuery#CONSTANT_SCORE_REWRITE}.
* With precision steps of &le;4, this query can be run with
* one of the BooleanQuery rewrite methods without changing
* BooleanQuery's default max clause count.

View File

@ -1,47 +0,0 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.Term;
/**
* A Filter that restricts search results to values that have a matching prefix in a given
* field.
*/
public class PrefixFilter extends MultiTermQueryWrapperFilter<PrefixQuery> {
public PrefixFilter(Term prefix) {
super(new PrefixQuery(prefix));
}
public Term getPrefix() { return query.getPrefix(); }
/** Prints a user-readable version of this filter. */
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("PrefixFilter(");
buffer.append(getPrefix().toString());
buffer.append(")");
return buffer.toString();
}
}

View File

@ -29,7 +29,7 @@ import org.apache.lucene.util.ToStringUtils;
* is built by QueryParser for input like <code>app*</code>.
*
* <p>This query uses the {@link
* MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}
* MultiTermQuery#CONSTANT_SCORE_REWRITE}
* rewrite method. */
public class PrefixQuery extends MultiTermQuery {
private Term prefix;

View File

@ -97,7 +97,7 @@ public abstract class Query implements Cloneable {
*/
public void extractTerms(Set<Term> terms) {
// needs to be implemented by query subclasses
throw new UnsupportedOperationException();
throw new UnsupportedOperationException(getClass().getName());
}
/** Returns a clone of this query. */

View File

@ -45,14 +45,14 @@ public abstract class ScoringRewrite<Q extends Query> extends TermCollectingRewr
* query. Note that typically such scores are
* meaningless to the user, and require non-trivial CPU
* to compute, so it's almost always better to use {@link
* MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE} instead.
* MultiTermQuery#CONSTANT_SCORE_REWRITE} instead.
*
* <p><b>NOTE</b>: This rewrite method will hit {@link
* BooleanQuery.TooManyClauses} if the number of terms
* exceeds {@link BooleanQuery#getMaxClauseCount}.
*
* @see MultiTermQuery#setRewriteMethod */
public final static ScoringRewrite<BooleanQuery> SCORING_BOOLEAN_QUERY_REWRITE = new ScoringRewrite<BooleanQuery>() {
public final static ScoringRewrite<BooleanQuery> SCORING_BOOLEAN_REWRITE = new ScoringRewrite<BooleanQuery>() {
@Override
protected BooleanQuery getTopLevelQuery() {
return new BooleanQuery(true);
@ -73,7 +73,7 @@ public abstract class ScoringRewrite<Q extends Query> extends TermCollectingRewr
}
};
/** Like {@link #SCORING_BOOLEAN_QUERY_REWRITE} except
/** Like {@link #SCORING_BOOLEAN_REWRITE} except
* scores are not computed. Instead, each matching
* document receives a constant score equal to the
* query's boost.
@ -83,10 +83,10 @@ public abstract class ScoringRewrite<Q extends Query> extends TermCollectingRewr
* exceeds {@link BooleanQuery#getMaxClauseCount}.
*
* @see MultiTermQuery#setRewriteMethod */
public final static RewriteMethod CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE = new RewriteMethod() {
public final static RewriteMethod CONSTANT_SCORE_BOOLEAN_REWRITE = new RewriteMethod() {
@Override
public Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException {
final BooleanQuery bq = SCORING_BOOLEAN_QUERY_REWRITE.rewrite(reader, query);
final BooleanQuery bq = SCORING_BOOLEAN_REWRITE.rewrite(reader, query);
// strip the scores off
final Query result = new ConstantScoreQuery(bq);
result.setBoost(query.getBoost());

View File

@ -1,85 +0,0 @@
package org.apache.lucene.search;
import org.apache.lucene.util.BytesRef;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A Filter that restricts search results to a range of term
* values in a given field.
*
* <p>This filter matches the documents looking for terms that fall into the
* supplied range according to {@link
* Byte#compareTo(Byte)}, It is not intended
* for numerical ranges; use {@link NumericRangeFilter} instead.
* @since 2.9
*/
public class TermRangeFilter extends MultiTermQueryWrapperFilter<TermRangeQuery> {
/**
* @param fieldName The field this range applies to
* @param lowerTerm The lower bound on this range
* @param upperTerm The upper bound on this range
* @param includeLower Does this range include the lower bound?
* @param includeUpper Does this range include the upper bound?
* @throws IllegalArgumentException if both terms are null or if
* lowerTerm is null and includeLower is true (similar for upperTerm
* and includeUpper)
*/
public TermRangeFilter(String fieldName, BytesRef lowerTerm, BytesRef upperTerm,
boolean includeLower, boolean includeUpper) {
super(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper));
}
/**
* Factory that creates a new TermRangeFilter using Strings for term text.
*/
public static TermRangeFilter newStringRange(String field, String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) {
BytesRef lower = lowerTerm == null ? null : new BytesRef(lowerTerm);
BytesRef upper = upperTerm == null ? null : new BytesRef(upperTerm);
return new TermRangeFilter(field, lower, upper, includeLower, includeUpper);
}
/**
* Constructs a filter for field <code>fieldName</code> matching
* less than or equal to <code>upperTerm</code>.
*/
public static TermRangeFilter Less(String fieldName, BytesRef upperTerm) {
return new TermRangeFilter(fieldName, null, upperTerm, false, true);
}
/**
* Constructs a filter for field <code>fieldName</code> matching
* greater than or equal to <code>lowerTerm</code>.
*/
public static TermRangeFilter More(String fieldName, BytesRef lowerTerm) {
return new TermRangeFilter(fieldName, lowerTerm, null, true, false);
}
/** Returns the lower value of this range filter */
public BytesRef getLowerTerm() { return query.getLowerTerm(); }
/** Returns the upper value of this range filter */
public BytesRef getUpperTerm() { return query.getUpperTerm(); }
/** Returns <code>true</code> if the lower endpoint is inclusive */
public boolean includesLower() { return query.includesLower(); }
/** Returns <code>true</code> if the upper endpoint is inclusive */
public boolean includesUpper() { return query.includesUpper(); }
}

View File

@ -35,7 +35,7 @@ import org.apache.lucene.util.ToStringUtils;
* for numerical ranges; use {@link NumericRangeQuery} instead.
*
* <p>This query uses the {@link
* MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}
* MultiTermQuery#CONSTANT_SCORE_REWRITE}
* rewrite method.
* @since 2.9
*/

View File

@ -43,7 +43,7 @@ public final class UsageTrackingFilterCachingPolicy implements FilterCachingPoli
// This does not measure the cost of iterating over the filter (for this we
// already have the DocIdSetIterator#cost API) but the cost to build the
// DocIdSet in the first place
return filter instanceof MultiTermQueryWrapperFilter;
return filter instanceof QueryWrapperFilter && ((QueryWrapperFilter) filter).getQuery() instanceof MultiTermQuery;
}
static boolean isCheapToCache(DocIdSet set) {

View File

@ -35,7 +35,7 @@ import org.apache.lucene.util.automaton.Operations;
* a Wildcard term should not start with the wildcard <code>*</code>
*
* <p>This query uses the {@link
* MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}
* MultiTermQuery#CONSTANT_SCORE_REWRITE}
* rewrite method.
*
* @see AutomatonQuery

View File

@ -28,7 +28,6 @@ import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.FilteredTermsEnum;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.NumericRangeFilter;
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
/**
@ -57,7 +56,7 @@ import org.apache.lucene.search.NumericRangeQuery; // for javadocs
* <p>For easy usage, the trie algorithm is implemented for indexing inside
* {@link NumericTokenStream} that can index <code>int</code>, <code>long</code>,
* <code>float</code>, and <code>double</code>. For querying,
* {@link NumericRangeQuery} and {@link NumericRangeFilter} implement the query part
* {@link NumericRangeQuery} implements the query part
* for the same data types.
*
* <p>This class can also be used, to generate lexicographically sortable (according to
@ -74,7 +73,7 @@ public final class NumericUtils {
/**
* The default precision step used by {@link LongField},
* {@link DoubleField}, {@link NumericTokenStream}, {@link
* NumericRangeQuery}, and {@link NumericRangeFilter}.
* NumericRangeQuery}.
*/
public static final int PRECISION_STEP_DEFAULT = 16;

View File

@ -96,13 +96,13 @@ public class TestAutomatonQuery extends LuceneTestCase {
throws IOException {
AutomatonQuery query = new AutomatonQuery(newTerm("bogus"), automaton);
query.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
query.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
assertEquals(expected, automatonQueryNrHits(query));
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
assertEquals(expected, automatonQueryNrHits(query));
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
assertEquals(expected, automatonQueryNrHits(query));
}

View File

@ -102,13 +102,13 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase {
throws IOException {
AutomatonQuery query = new AutomatonQuery(newTerm("bogus"), automaton);
query.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
query.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
assertEquals(expected, automatonQueryNrHits(query));
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
assertEquals(expected, automatonQueryNrHits(query));
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
assertEquals(expected, automatonQueryNrHits(query));
}

View File

@ -166,7 +166,7 @@ public class TestBooleanQuery extends LuceneTestCase {
BooleanQuery query = new BooleanQuery(); // Query: +foo -ba*
query.add(new TermQuery(new Term("field", "foo")), BooleanClause.Occur.MUST);
WildcardQuery wildcardQuery = new WildcardQuery(new Term("field", "ba*"));
wildcardQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
wildcardQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
query.add(wildcardQuery, BooleanClause.Occur.MUST_NOT);
MultiReader multireader = new MultiReader(reader1, reader2);

View File

@ -105,7 +105,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
/** test null docidset */
public void testEmpty3() throws Exception {
Filter expected = new PrefixFilter(new Term("bogusField", "bogusVal"));
Filter expected = new QueryWrapperFilter(new PrefixQuery(new Term("bogusField", "bogusVal")));
Filter actual = new CachingWrapperFilter(expected, MAYBE_CACHE_POLICY);
assertFilterEquals(expected, actual);
}
@ -266,9 +266,18 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
// not cacheable:
assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false);
// returns default empty docidset, always cacheable:
assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true);
assertDocIdSetCacheable(reader, new Filter() {
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) {
return null;
}
@Override
public String toString(String field) {
return "cacheableFilter";
}
}, true);
// is cacheable:
assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", 10, 20, true, true), false);
assertDocIdSetCacheable(reader, new QueryWrapperFilter(NumericRangeQuery.newIntRange("test", 10, 20, true, true)), false);
// a fixedbitset filter is always cacheable
assertDocIdSetCacheable(reader, new Filter() {
@Override

View File

@ -25,7 +25,6 @@ import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@ -37,7 +36,7 @@ public class TestConstantScoreQuery extends LuceneTestCase {
public void testCSQ() throws Exception {
final Query q1 = new ConstantScoreQuery(new TermQuery(new Term("a", "b")));
final Query q2 = new ConstantScoreQuery(new TermQuery(new Term("a", "c")));
final Query q3 = new ConstantScoreQuery(TermRangeFilter.newStringRange("a", "b", "c", true, true));
final Query q3 = new ConstantScoreQuery(TermRangeQuery.newStringRange("a", "b", "c", true, true));
QueryUtils.check(q1);
QueryUtils.check(q2);
QueryUtils.checkEqual(q1,q1);

View File

@ -57,15 +57,15 @@ public class TestDateFilter extends LuceneTestCase {
// filter that should preserve matches
// DateFilter df1 = DateFilter.Before("datefield", now);
TermRangeFilter df1 = TermRangeFilter.newStringRange("datefield", DateTools
Filter df1 = new QueryWrapperFilter(TermRangeQuery.newStringRange("datefield", DateTools
.timeToString(now - 2000, DateTools.Resolution.MILLISECOND), DateTools
.timeToString(now, DateTools.Resolution.MILLISECOND), false, true);
.timeToString(now, DateTools.Resolution.MILLISECOND), false, true));
// filter that should discard matches
// DateFilter df2 = DateFilter.Before("datefield", now - 999999);
TermRangeFilter df2 = TermRangeFilter.newStringRange("datefield", DateTools
Filter df2 = new QueryWrapperFilter(TermRangeQuery.newStringRange("datefield", DateTools
.timeToString(0, DateTools.Resolution.MILLISECOND), DateTools
.timeToString(now - 2000, DateTools.Resolution.MILLISECOND), true,
false);
false));
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
@ -120,16 +120,16 @@ public class TestDateFilter extends LuceneTestCase {
// filter that should preserve matches
// DateFilter df1 = DateFilter.After("datefield", now);
TermRangeFilter df1 = TermRangeFilter.newStringRange("datefield", DateTools
Filter df1 = new QueryWrapperFilter(TermRangeQuery.newStringRange("datefield", DateTools
.timeToString(now, DateTools.Resolution.MILLISECOND), DateTools
.timeToString(now + 999999, DateTools.Resolution.MILLISECOND), true,
false);
false));
// filter that should discard matches
// DateFilter df2 = DateFilter.After("datefield", now + 999999);
TermRangeFilter df2 = TermRangeFilter.newStringRange("datefield", DateTools
Filter df2 = new QueryWrapperFilter(TermRangeQuery.newStringRange("datefield", DateTools
.timeToString(now + 999999, DateTools.Resolution.MILLISECOND),
DateTools.timeToString(now + 999999999,
DateTools.Resolution.MILLISECOND), false, true);
DateTools.Resolution.MILLISECOND), false, true));
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));

View File

@ -39,9 +39,9 @@ import org.apache.lucene.util.automaton.RegExp;
import org.apache.lucene.util.UnicodeUtil;
/**
* Tests the DocTermOrdsRewriteMethod
* Tests the DocValuesRewriteMethod
*/
public class TestDocTermOrdsRewriteMethod extends LuceneTestCase {
public class TestDocValuesRewriteMethod extends LuceneTestCase {
protected IndexSearcher searcher1;
protected IndexSearcher searcher2;
private IndexReader reader;
@ -115,7 +115,7 @@ public class TestDocTermOrdsRewriteMethod extends LuceneTestCase {
*/
protected void assertSame(String regexp) throws IOException {
RegexpQuery docValues = new RegexpQuery(new Term(fieldName, regexp), RegExp.NONE);
docValues.setRewriteMethod(new DocTermOrdsRewriteMethod());
docValues.setRewriteMethod(new DocValuesRewriteMethod());
RegexpQuery inverted = new RegexpQuery(new Term(fieldName, regexp), RegExp.NONE);
TopDocs invertedDocs = searcher1.search(inverted, 25);
@ -131,9 +131,9 @@ public class TestDocTermOrdsRewriteMethod extends LuceneTestCase {
assertEquals(a1, a2);
assertFalse(a1.equals(b));
a1.setRewriteMethod(new DocTermOrdsRewriteMethod());
a2.setRewriteMethod(new DocTermOrdsRewriteMethod());
b.setRewriteMethod(new DocTermOrdsRewriteMethod());
a1.setRewriteMethod(new DocValuesRewriteMethod());
a2.setRewriteMethod(new DocValuesRewriteMethod());
b.setRewriteMethod(new DocValuesRewriteMethod());
assertEquals(a1, a2);
assertFalse(a1.equals(b));
QueryUtils.check(a1);

View File

@ -34,7 +34,7 @@ public class TestFieldCacheRewriteMethod extends TestRegexpRandom2 {
fieldCache.setRewriteMethod(new DocValuesRewriteMethod());
RegexpQuery filter = new RegexpQuery(new Term(fieldName, regexp), RegExp.NONE);
filter.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
filter.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
TopDocs fieldCacheDocs = searcher1.search(fieldCache, 25);
TopDocs filterDocs = searcher2.search(filter, 25);

View File

@ -320,21 +320,21 @@ public class TestFilteredQuery extends LuceneTestCase {
public void testEqualsHashcode() throws Exception {
// some tests before, if the used queries and filters work:
assertEquals(new PrefixFilter(new Term("field", "o")), new PrefixFilter(new Term("field", "o")));
assertFalse(new PrefixFilter(new Term("field", "a")).equals(new PrefixFilter(new Term("field", "o"))));
assertEquals(new PrefixQuery(new Term("field", "o")), new PrefixQuery(new Term("field", "o")));
assertFalse(new PrefixQuery(new Term("field", "a")).equals(new PrefixQuery(new Term("field", "o"))));
QueryUtils.checkHashEquals(new TermQuery(new Term("field", "one")));
QueryUtils.checkUnequal(
new TermQuery(new Term("field", "one")), new TermQuery(new Term("field", "two"))
);
// now test FilteredQuery equals/hashcode:
QueryUtils.checkHashEquals(new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o"))));
QueryUtils.checkHashEquals(new FilteredQuery(new TermQuery(new Term("field", "one")), new QueryWrapperFilter(new PrefixQuery(new Term("field", "o")))));
QueryUtils.checkUnequal(
new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o"))),
new FilteredQuery(new TermQuery(new Term("field", "two")), new PrefixFilter(new Term("field", "o")))
new FilteredQuery(new TermQuery(new Term("field", "one")), new QueryWrapperFilter(new PrefixQuery(new Term("field", "o")))),
new FilteredQuery(new TermQuery(new Term("field", "two")), new QueryWrapperFilter(new PrefixQuery(new Term("field", "o"))))
);
QueryUtils.checkUnequal(
new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "a"))),
new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o")))
new FilteredQuery(new TermQuery(new Term("field", "one")), new QueryWrapperFilter(new PrefixQuery(new Term("field", "a")))),
new FilteredQuery(new TermQuery(new Term("field", "one")), new QueryWrapperFilter(new PrefixQuery(new Term("field", "o"))))
);
}
@ -352,7 +352,7 @@ public class TestFilteredQuery extends LuceneTestCase {
// pass
}
try {
new FilteredQuery(null, new PrefixFilter(new Term("field", "o")));
new FilteredQuery(null, new QueryWrapperFilter(new PrefixQuery(new Term("field", "o"))));
fail("Should throw IllegalArgumentException");
} catch (IllegalArgumentException iae) {
// pass
@ -390,13 +390,13 @@ public class TestFilteredQuery extends LuceneTestCase {
}
public void testRewrite() throws Exception {
assertRewrite(new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o")), randomFilterStrategy()), FilteredQuery.class);
assertRewrite(new FilteredQuery(new PrefixQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o")), randomFilterStrategy()), FilteredQuery.class);
assertRewrite(new FilteredQuery(new TermQuery(new Term("field", "one")), new CachingWrapperFilter(new QueryWrapperFilter(new PrefixQuery(new Term("field", "o")))), randomFilterStrategy()), FilteredQuery.class);
assertRewrite(new FilteredQuery(new PrefixQuery(new Term("field", "one")), new CachingWrapperFilter(new QueryWrapperFilter(new PrefixQuery(new Term("field", "o")))), randomFilterStrategy()), FilteredQuery.class);
}
public void testGetFilterStrategy() {
FilterStrategy randomFilterStrategy = randomFilterStrategy();
FilteredQuery filteredQuery = new FilteredQuery(new TermQuery(new Term("field", "one")), new PrefixFilter(new Term("field", "o")), randomFilterStrategy);
FilteredQuery filteredQuery = new FilteredQuery(new TermQuery(new Term("field", "one")), new QueryWrapperFilter(new PrefixQuery(new Term("field", "o"))), randomFilterStrategy);
assertSame(randomFilterStrategy, filteredQuery.getFilterStrategy());
}

View File

@ -87,7 +87,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** macro for readability */
public static Query csrq(String f, String l, String h, boolean il, boolean ih) {
TermRangeQuery query = TermRangeQuery.newStringRange(f, l, h, il, ih);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
if (VERBOSE) {
System.out.println("TEST: query=" + query);
}
@ -106,14 +106,14 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/** macro for readability */
public static Query cspq(Term prefix) {
PrefixQuery query = new PrefixQuery(prefix);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
return query;
}
/** macro for readability */
public static Query cswcq(Term wild) {
WildcardQuery query = new WildcardQuery(wild);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
query.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
return query;
}
@ -152,7 +152,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
result[i].score, SCORE_COMP_THRESH);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), 1000).scoreDocs;
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE), 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
@ -160,7 +160,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
result[i].score, SCORE_COMP_THRESH);
}
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), 1000).scoreDocs;
result = search.search(csrq("data", "1", "6", T, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 6, numHits);
for (int i = 0; i < numHits; i++) {
@ -193,7 +193,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
bq = new BooleanQuery();
bq.add(dummyTerm, BooleanClause.Occur.SHOULD); // hits one doc
bq.add(csrq("data", "#", "#", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE), BooleanClause.Occur.SHOULD); // hits no docs
bq.add(csrq("data", "#", "#", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE), BooleanClause.Occur.SHOULD); // hits no docs
result = search.search(bq, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 1, numHits);
@ -204,7 +204,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
bq = new BooleanQuery();
bq.add(dummyTerm, BooleanClause.Occur.SHOULD); // hits one doc
bq.add(csrq("data", "#", "#", T, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), BooleanClause.Occur.SHOULD); // hits no docs
bq.add(csrq("data", "#", "#", T, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), BooleanClause.Occur.SHOULD); // hits no docs
result = search.search(bq, 1000).scoreDocs;
numHits = result.length;
assertEquals("wrong number of results", 1, numHits);
@ -264,9 +264,9 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
Assert.assertEquals(0, hits[1].doc);
assertTrue(hits[0].score > hits[1].score);
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #0
q1 = csrq("data", "A", "A", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE); // matches document #0
q1.setBoost(.1f);
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE); // matches document #1
q2 = csrq("data", "Z", "Z", T, T, MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE); // matches document #1
bq = new BooleanQuery(true);
bq.add(q1, BooleanClause.Occur.SHOULD);
bq.add(q2, BooleanClause.Occur.SHOULD);
@ -347,37 +347,37 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
result = search.search(csrq("id", minIP, maxIP, T, T), numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", minIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F), numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", minIP, maxIP, T, F, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T), numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", minIP, maxIP, F, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F), numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", minIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T), numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", medIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T), numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", minIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
@ -405,49 +405,49 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
result = search.search(csrq("id", minIP, minIP, F, F), numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", minIP, minIP, F, F, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F), numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", medIP, medIP, F, F, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F), numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", maxIP, maxIP, F, F, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(csrq("id", minIP, minIP, T, T), numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", minIP, minIP, T, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T), numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", null, minIP, F, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T), numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", maxIP, maxIP, T, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F), numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", maxIP, null, T, F, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T), numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE), numDocs).scoreDocs;
result = search.search(csrq("id", medIP, medIP, T, T, MultiTermQuery.CONSTANT_SCORE_REWRITE), numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}

View File

@ -131,9 +131,9 @@ public class TestMultiTermQueryRewrites extends LuceneTestCase {
}
public void testRewritesWithDuplicateTerms() throws Exception {
checkDuplicateTerms(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
checkDuplicateTerms(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
checkDuplicateTerms(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
checkDuplicateTerms(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
// use a large PQ here to only test duplicate terms and dont mix up when all scores are equal
checkDuplicateTerms(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(1024));
@ -187,7 +187,7 @@ public class TestMultiTermQueryRewrites extends LuceneTestCase {
}
public void testBoosts() throws Exception {
checkBoosts(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
checkBoosts(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
// use a large PQ here to only test boosts and dont mix up when all scores are equal
checkBoosts(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(1024));
@ -225,10 +225,10 @@ public class TestMultiTermQueryRewrites extends LuceneTestCase {
}
public void testMaxClauseLimitations() throws Exception {
checkMaxClauseLimitation(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
checkMaxClauseLimitation(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
checkMaxClauseLimitation(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
checkMaxClauseLimitation(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
checkNoMaxClauseLimitation(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
checkNoMaxClauseLimitation(MultiTermQuery.CONSTANT_SCORE_REWRITE);
checkNoMaxClauseLimitation(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(1024));
checkNoMaxClauseLimitation(new MultiTermQuery.TopTermsBoostOnlyBooleanQueryRewrite(1024));
}

View File

@ -150,25 +150,20 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
int count=3000;
int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange(field, precisionStep, lower, upper, true, true);
for (byte i=0; i<3; i++) {
for (byte i=0; i<2; i++) {
TopDocs topDocs;
String type;
switch (i) {
case 0:
type = " (constant score filter rewrite)";
q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
break;
case 1:
type = " (constant score boolean rewrite)";
q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
break;
case 2:
type = " (filter)";
topDocs = searcher.search(new FilteredQuery(new MatchAllDocsQuery(), f), noDocs, Sort.INDEXORDER);
break;
default:
return;
}
@ -197,17 +192,6 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
testRange(2);
}
@Test
public void testInverseRange() throws Exception {
LeafReaderContext context = SlowCompositeReaderWrapper.wrap(reader).getContext();
NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange("field8", 8, 1000, -1000, true, true);
assertNull("A inverse range should return the null instance", f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newIntRange("field8", 8, Integer.MAX_VALUE, null, false, false);
assertNull("A exclusive range starting with Integer.MAX_VALUE should return the null instance", f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newIntRange("field8", 8, null, Integer.MIN_VALUE, false, false);
assertNull("A exclusive range ending with Integer.MIN_VALUE should return the null instance", f.getDocIdSet(context, context.reader().getLiveDocs()));
}
@Test
public void testOneMatchQuery() throws Exception {
NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange("ascfield8", 8, 1000, 1000, true, true);
@ -547,11 +531,6 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
TopDocs tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
Filter tf=NumericRangeFilter.newFloatRange(field, precisionStep,
NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
tTopDocs = searcher.search(new FilteredQuery(new MatchAllDocsQuery(), tf), 1);
assertEquals("Returned count of range filter must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
}
@Test

View File

@ -159,25 +159,20 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
int count=3000;
long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
NumericRangeQuery<Long> q = NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange(field, precisionStep, lower, upper, true, true);
for (byte i=0; i<3; i++) {
for (byte i=0; i<2; i++) {
TopDocs topDocs;
String type;
switch (i) {
case 0:
type = " (constant score filter rewrite)";
q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
break;
case 1:
type = " (constant score boolean rewrite)";
q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
break;
case 2:
type = " (filter)";
topDocs = searcher.search(new FilteredQuery(new MatchAllDocsQuery(), f), noDocs, Sort.INDEXORDER);
break;
default:
return;
}
@ -211,20 +206,6 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
testRange(2);
}
@Test
public void testInverseRange() throws Exception {
LeafReaderContext context = SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()).getContext();
NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange("field8", 8, 1000L, -1000L, true, true);
assertNull("A inverse range should return the null instance",
f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newLongRange("field8", 8, Long.MAX_VALUE, null, false, false);
assertNull("A exclusive range starting with Long.MAX_VALUE should return the null instance",
f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newLongRange("field8", 8, null, Long.MIN_VALUE, false, false);
assertNull("A exclusive range ending with Long.MIN_VALUE should return the null instance",
f.getDocIdSet(context, context.reader().getLiveDocs()));
}
@Test
public void testOneMatchQuery() throws Exception {
NumericRangeQuery<Long> q = NumericRangeQuery.newLongRange("ascfield8", 8, 1000L, 1000L, true, true);
@ -584,11 +565,6 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
TopDocs tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
Filter tf=NumericRangeFilter.newDoubleRange(field, precisionStep,
NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
tTopDocs = searcher.search(new FilteredQuery(new MatchAllDocsQuery(), tf), 1);
assertEquals("Returned count of range filter must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
}
@Test

View File

@ -1,107 +0,0 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Field;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.Directory;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.document.Document;
/**
* Tests {@link PrefixFilter} class.
*
*/
public class TestPrefixFilter extends LuceneTestCase {
public void testPrefixFilter() throws Exception {
Directory directory = newDirectory();
String[] categories = new String[] {"/Computers/Linux",
"/Computers/Mac/One",
"/Computers/Mac/Two",
"/Computers/Windows"};
RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(newStringField("category", categories[i], Field.Store.YES));
writer.addDocument(doc);
}
IndexReader reader = writer.getReader();
// PrefixFilter combined with ConstantScoreQuery
PrefixFilter filter = new PrefixFilter(new Term("category", "/Computers"));
Query query = new ConstantScoreQuery(filter);
IndexSearcher searcher = newSearcher(reader);
ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
assertEquals(4, hits.length);
// test middle of values
filter = new PrefixFilter(new Term("category", "/Computers/Mac"));
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, 1000).scoreDocs;
assertEquals(2, hits.length);
// test start of values
filter = new PrefixFilter(new Term("category", "/Computers/Linux"));
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, 1000).scoreDocs;
assertEquals(1, hits.length);
// test end of values
filter = new PrefixFilter(new Term("category", "/Computers/Windows"));
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, 1000).scoreDocs;
assertEquals(1, hits.length);
// test non-existant
filter = new PrefixFilter(new Term("category", "/Computers/ObsoleteOS"));
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, 1000).scoreDocs;
assertEquals(0, hits.length);
// test non-existant, before values
filter = new PrefixFilter(new Term("category", "/Computers/AAA"));
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, 1000).scoreDocs;
assertEquals(0, hits.length);
// test non-existant, after values
filter = new PrefixFilter(new Term("category", "/Computers/ZZZ"));
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, 1000).scoreDocs;
assertEquals(0, hits.length);
// test zero length prefix
filter = new PrefixFilter(new Term("category", ""));
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, 1000).scoreDocs;
assertEquals(4, hits.length);
// test non existent field
filter = new PrefixFilter(new Term("nonexistantfield", "/Computers"));
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, 1000).scoreDocs;
assertEquals(0, hits.length);
writer.close();
reader.close();
directory.close();
}
}

View File

@ -1,213 +0,0 @@
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.junit.Test;
/**
* A basic 'positive' Unit test class for the TermRangeFilter class.
*
* <p>
* NOTE: at the moment, this class only tests for 'positive' results, it does
* not verify the results to ensure there are no 'false positives', nor does it
* adequately test 'negative' results. It also does not test that garbage in
* results in an Exception.
*/
public class TestTermRangeFilter extends BaseTestRangeFilter {
@Test
public void testRangeFilterId() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = newSearcher(reader);
int medId = ((maxId - minId) / 2);
String minIP = pad(minId);
String maxIP = pad(maxId);
String medIP = pad(medId);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Query q = new TermQuery(new Term("body", "body"));
// test id, bounded on both ends
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, maxIP, T, T)),
numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, maxIP, T, F)),
numDocs).scoreDocs;
assertEquals("all but last", numDocs - 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, maxIP, F, T)),
numDocs).scoreDocs;
assertEquals("all but first", numDocs - 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, maxIP, F, F)),
numDocs).scoreDocs;
assertEquals("all but ends", numDocs - 2, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", medIP, maxIP, T, T)),
numDocs).scoreDocs;
assertEquals("med and up", 1 + maxId - medId, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, medIP, T, T)),
numDocs).scoreDocs;
assertEquals("up to med", 1 + medId - minId, result.length);
// unbounded id
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, null, T, F)),
numDocs).scoreDocs;
assertEquals("min and up", numDocs, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", null, maxIP, F, T)),
numDocs).scoreDocs;
assertEquals("max and down", numDocs, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, null, F, F)),
numDocs).scoreDocs;
assertEquals("not min, but up", numDocs - 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", null, maxIP, F, F)),
numDocs).scoreDocs;
assertEquals("not max, but down", numDocs - 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", medIP, maxIP, T, F)),
numDocs).scoreDocs;
assertEquals("med and up, not max", maxId - medId, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, medIP, F, T)),
numDocs).scoreDocs;
assertEquals("not min, up to med", medId - minId, result.length);
// very small sets
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, minIP, F, F)),
numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", medIP, medIP, F, F)),
numDocs).scoreDocs;
assertEquals("med,med,F,F", 0, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", maxIP, maxIP, F, F)),
numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", minIP, minIP, T, T)),
numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", null, minIP, F, T)),
numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", maxIP, maxIP, T, T)),
numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", maxIP, null, T, F)),
numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("id", medIP, medIP, T, T)),
numDocs).scoreDocs;
assertEquals("med,med,T,T", 1, result.length);
}
@Test
public void testRangeFilterRand() throws IOException {
IndexReader reader = signedIndexReader;
IndexSearcher search = newSearcher(reader);
String minRP = pad(signedIndexDir.minR);
String maxRP = pad(signedIndexDir.maxR);
int numDocs = reader.numDocs();
assertEquals("num of docs", numDocs, 1 + maxId - minId);
ScoreDoc[] result;
Query q = new TermQuery(new Term("body", "body"));
// test extremes, bounded on both ends
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", minRP, maxRP, T, T)),
numDocs).scoreDocs;
assertEquals("find all", numDocs, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", minRP, maxRP, T, F)),
numDocs).scoreDocs;
assertEquals("all but biggest", numDocs - 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", minRP, maxRP, F, T)),
numDocs).scoreDocs;
assertEquals("all but smallest", numDocs - 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", minRP, maxRP, F, F)),
numDocs).scoreDocs;
assertEquals("all but extremes", numDocs - 2, result.length);
// unbounded
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", minRP, null, T, F)),
numDocs).scoreDocs;
assertEquals("smallest and up", numDocs, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", null, maxRP, F, T)),
numDocs).scoreDocs;
assertEquals("biggest and down", numDocs, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", minRP, null, F, F)),
numDocs).scoreDocs;
assertEquals("not smallest, but up", numDocs - 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", null, maxRP, F, F)),
numDocs).scoreDocs;
assertEquals("not biggest, but down", numDocs - 1, result.length);
// very small sets
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", minRP, minRP, F, F)),
numDocs).scoreDocs;
assertEquals("min,min,F,F", 0, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", maxRP, maxRP, F, F)),
numDocs).scoreDocs;
assertEquals("max,max,F,F", 0, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", minRP, minRP, T, T)),
numDocs).scoreDocs;
assertEquals("min,min,T,T", 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", null, minRP, F, T)),
numDocs).scoreDocs;
assertEquals("nul,min,F,T", 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", maxRP, maxRP, T, T)),
numDocs).scoreDocs;
assertEquals("max,max,T,T", 1, result.length);
result = search.search(new FilteredQuery(q, TermRangeFilter.newStringRange("rand", maxRP, null, T, F)),
numDocs).scoreDocs;
assertEquals("max,nul,T,T", 1, result.length);
}
}

View File

@ -36,8 +36,8 @@ public class TestUsageTrackingFilterCachingPolicy extends LuceneTestCase {
}
public void testCostlyFilter() {
assertTrue(UsageTrackingFilterCachingPolicy.isCostly(new PrefixFilter(new Term("field", "prefix"))));
assertTrue(UsageTrackingFilterCachingPolicy.isCostly(NumericRangeFilter.newIntRange("intField", 8, 1, 1000, true, true)));
assertTrue(UsageTrackingFilterCachingPolicy.isCostly(new QueryWrapperFilter(new PrefixQuery(new Term("field", "prefix")))));
assertTrue(UsageTrackingFilterCachingPolicy.isCostly(new QueryWrapperFilter(NumericRangeQuery.newIntRange("intField", 8, 1, 1000, true, true))));
assertFalse(UsageTrackingFilterCachingPolicy.isCostly(new QueryWrapperFilter(new TermQuery(new Term("field", "value")))));
}

View File

@ -75,19 +75,19 @@ public class TestWildcard
MultiTermQuery wq = new WildcardQuery(new Term("field", "nowildcard"));
assertMatches(searcher, wq, 1);
wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
wq.setBoost(0.1F);
Query q = searcher.rewrite(wq);
assertTrue(q instanceof TermQuery);
assertEquals(q.getBoost(), wq.getBoost(), 0);
wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
wq.setBoost(0.2F);
q = searcher.rewrite(wq);
assertTrue(q instanceof ConstantScoreQuery);
assertTrue(q instanceof MultiTermQueryConstantScoreWrapper);
assertEquals(q.getBoost(), wq.getBoost(), 0.1);
wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
wq.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
wq.setBoost(0.4F);
q = searcher.rewrite(wq);
assertTrue(q instanceof ConstantScoreQuery);
@ -105,7 +105,7 @@ public class TestWildcard
IndexSearcher searcher = newSearcher(reader);
MultiTermQuery wq = new WildcardQuery(new Term("field", ""));
wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
wq.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
assertMatches(searcher, wq, 0);
Query q = searcher.rewrite(wq);
assertTrue(q instanceof BooleanQuery);

View File

@ -48,7 +48,6 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.NumericRangeFilter;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.SortField;
@ -184,7 +183,7 @@ public class DistanceFacetsExample implements Closeable {
BooleanQuery f = new BooleanQuery();
// Add latitude range filter:
f.add(NumericRangeFilter.newDoubleRange("latitude", Math.toDegrees(minLat), Math.toDegrees(maxLat), true, true),
f.add(NumericRangeQuery.newDoubleRange("latitude", Math.toDegrees(minLat), Math.toDegrees(maxLat), true, true),
BooleanClause.Occur.FILTER);
// Add longitude range filter:
@ -198,7 +197,7 @@ public class DistanceFacetsExample implements Closeable {
BooleanClause.Occur.SHOULD);
f.add(lonF, BooleanClause.Occur.MUST);
} else {
f.add(NumericRangeFilter.newDoubleRange("longitude", Math.toDegrees(minLng), Math.toDegrees(maxLng), true, true),
f.add(NumericRangeQuery.newDoubleRange("longitude", Math.toDegrees(minLng), Math.toDegrees(maxLng), true, true),
BooleanClause.Occur.FILTER);
}

View File

@ -26,6 +26,7 @@ import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.FilteredDocIdSet;
import org.apache.lucene.util.Bits;
/** Represents a range over long values.
@ -113,53 +114,34 @@ public final class LongRange extends Range {
final int maxDoc = context.reader().maxDoc();
final Bits fastMatchBits;
final DocIdSet fastMatchDocs;
if (fastMatchFilter != null) {
DocIdSet dis = fastMatchFilter.getDocIdSet(context, null);
if (dis == null) {
fastMatchDocs = fastMatchFilter.getDocIdSet(context, null);
if (fastMatchDocs == null) {
// No documents match
return null;
}
fastMatchBits = dis.bits();
if (fastMatchBits == null) {
throw new IllegalArgumentException("fastMatchFilter does not implement DocIdSet.bits");
}
} else {
fastMatchBits = null;
fastMatchDocs = new DocIdSet() {
@Override
public long ramBytesUsed() {
return 0;
}
@Override
public DocIdSetIterator iterator() throws IOException {
return DocIdSetIterator.all(maxDoc);
}
};
}
return new DocIdSet() {
return new FilteredDocIdSet(fastMatchDocs) {
@Override
public Bits bits() {
return new Bits() {
@Override
public boolean get(int docID) {
protected boolean match(int docID) {
if (acceptDocs != null && acceptDocs.get(docID) == false) {
return false;
}
if (fastMatchBits != null && fastMatchBits.get(docID) == false) {
return false;
}
return accept(values.longVal(docID));
}
@Override
public int length() {
return maxDoc;
}
};
}
@Override
public DocIdSetIterator iterator() {
throw new UnsupportedOperationException("this filter can only be accessed via bits()");
}
@Override
public long ramBytesUsed() {
return 0L;
}
};
}
};

View File

@ -77,34 +77,39 @@ public class LongRangeFacetCounts extends RangeFacetCounts {
FunctionValues fv = valueSource.getValues(Collections.emptyMap(), hits.context);
totCount += hits.totalHits;
Bits bits;
final DocIdSetIterator fastMatchDocs;
if (fastMatchFilter != null) {
DocIdSet dis = fastMatchFilter.getDocIdSet(hits.context, null);
if (dis == null) {
// No documents match
continue;
}
bits = dis.bits();
if (bits == null) {
throw new IllegalArgumentException("fastMatchFilter does not implement DocIdSet.bits");
}
fastMatchDocs = dis.iterator();
} else {
bits = null;
fastMatchDocs = null;
}
DocIdSetIterator docs = hits.bits.iterator();
int doc;
while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (bits != null && bits.get(doc) == false) {
doc++;
for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; ) {
if (fastMatchDocs != null) {
int fastMatchDoc = fastMatchDocs.docID();
if (fastMatchDoc < doc) {
fastMatchDoc = fastMatchDocs.advance(doc);
}
if (doc != fastMatchDoc) {
doc = docs.advance(fastMatchDoc);
continue;
}
}
// Skip missing docs:
if (fv.exists(doc)) {
counter.add(fv.longVal(doc));
} else {
missingCount++;
}
doc = docs.nextDoc();
}
}

View File

@ -22,7 +22,7 @@ import org.apache.lucene.facet.DrillSideways; // javadocs
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.FilteredQuery; // javadocs
import org.apache.lucene.search.NumericRangeFilter; // javadocs
import org.apache.lucene.search.NumericRangeQuery;
/** Base class for a single labeled range.
*
@ -48,7 +48,7 @@ public abstract class Range {
* FilteredQuery#QUERY_FIRST_FILTER_STRATEGY}. If the
* {@link ValueSource} is static, e.g. an indexed numeric
* field, then it may be more efficient to use {@link
* NumericRangeFilter}. The provided fastMatchFilter,
* NumericRangeQuery}. The provided fastMatchFilter,
* if non-null, will first be consulted, and only if
* that is set for each document will the range then be
* checked. */
@ -61,7 +61,7 @@ public abstract class Range {
* {@link FilteredQuery} using its {@link
* FilteredQuery#QUERY_FIRST_FILTER_STRATEGY}. If the
* {@link ValueSource} is static, e.g. an indexed numeric
* field, then it may be more efficient to use {@link NumericRangeFilter}. */
* field, then it may be more efficient to use {@link NumericRangeQuery}. */
public Filter getFilter(ValueSource valueSource) {
return getFilter(null, valueSource);
}

View File

@ -31,8 +31,8 @@ import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.facet.DrillDownQuery;
import org.apache.lucene.facet.DrillSideways.DrillSidewaysResult;
import org.apache.lucene.facet.DrillSideways;
import org.apache.lucene.facet.DrillSideways.DrillSidewaysResult;
import org.apache.lucene.facet.FacetField;
import org.apache.lucene.facet.FacetResult;
import org.apache.lucene.facet.FacetTestCase;
@ -44,10 +44,10 @@ import org.apache.lucene.facet.MultiFacets;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
@ -62,7 +62,6 @@ import org.apache.lucene.search.Filter;
import org.apache.lucene.search.FilterCachingPolicy;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.NumericRangeFilter;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.store.Directory;
@ -481,9 +480,9 @@ public class TestRangeFacetCounts extends FacetTestCase {
Filter fastMatchFilter;
if (random().nextBoolean()) {
if (random().nextBoolean()) {
fastMatchFilter = NumericRangeFilter.newLongRange("field", minValue, maxValue, true, true);
fastMatchFilter = new QueryWrapperFilter(NumericRangeQuery.newLongRange("field", minValue, maxValue, true, true));
} else {
fastMatchFilter = NumericRangeFilter.newLongRange("field", minAcceptedValue, maxAcceptedValue, true, true);
fastMatchFilter = new QueryWrapperFilter(NumericRangeQuery.newLongRange("field", minAcceptedValue, maxAcceptedValue, true, true));
}
} else {
fastMatchFilter = null;
@ -505,11 +504,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
// Test drill-down:
DrillDownQuery ddq = new DrillDownQuery(config);
if (random().nextBoolean()) {
if (random().nextBoolean()) {
ddq.add("field", NumericRangeFilter.newLongRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
} else {
ddq.add("field", NumericRangeQuery.newLongRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
}
} else {
ddq.add("field", range.getFilter(fastMatchFilter, vs));
}
@ -640,9 +635,9 @@ public class TestRangeFacetCounts extends FacetTestCase {
Filter fastMatchFilter;
if (random().nextBoolean()) {
if (random().nextBoolean()) {
fastMatchFilter = NumericRangeFilter.newFloatRange("field", minValue, maxValue, true, true);
fastMatchFilter = new QueryWrapperFilter(NumericRangeQuery.newFloatRange("field", minValue, maxValue, true, true));
} else {
fastMatchFilter = NumericRangeFilter.newFloatRange("field", minAcceptedValue, maxAcceptedValue, true, true);
fastMatchFilter = new QueryWrapperFilter(NumericRangeQuery.newFloatRange("field", minAcceptedValue, maxAcceptedValue, true, true));
}
} else {
fastMatchFilter = null;
@ -664,11 +659,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
// Test drill-down:
DrillDownQuery ddq = new DrillDownQuery(config);
if (random().nextBoolean()) {
if (random().nextBoolean()) {
ddq.add("field", NumericRangeFilter.newFloatRange("field", (float) range.min, (float) range.max, range.minInclusive, range.maxInclusive));
} else {
ddq.add("field", NumericRangeQuery.newFloatRange("field", (float) range.min, (float) range.max, range.minInclusive, range.maxInclusive));
}
} else {
ddq.add("field", range.getFilter(fastMatchFilter, vs));
}
@ -783,9 +774,9 @@ public class TestRangeFacetCounts extends FacetTestCase {
Filter fastMatchFilter;
if (random().nextBoolean()) {
if (random().nextBoolean()) {
fastMatchFilter = NumericRangeFilter.newDoubleRange("field", minValue, maxValue, true, true);
fastMatchFilter = new QueryWrapperFilter(NumericRangeQuery.newDoubleRange("field", minValue, maxValue, true, true));
} else {
fastMatchFilter = NumericRangeFilter.newDoubleRange("field", minAcceptedValue, maxAcceptedValue, true, true);
fastMatchFilter = new QueryWrapperFilter(NumericRangeQuery.newDoubleRange("field", minAcceptedValue, maxAcceptedValue, true, true));
}
} else {
fastMatchFilter = null;
@ -807,11 +798,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
// Test drill-down:
DrillDownQuery ddq = new DrillDownQuery(config);
if (random().nextBoolean()) {
if (random().nextBoolean()) {
ddq.add("field", NumericRangeFilter.newDoubleRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
} else {
ddq.add("field", NumericRangeQuery.newDoubleRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
}
} else {
ddq.add("field", range.getFilter(fastMatchFilter, vs));
}

View File

@ -230,7 +230,7 @@ public class WeightedSpanTermExtractor {
return;
}
MultiTermQuery copy = (MultiTermQuery) query.clone();
copy.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
copy.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
origQuery = copy;
}
final IndexReader reader = getLeafContext().reader();

View File

@ -17,8 +17,6 @@ package org.apache.lucene.search.highlight;
* limitations under the License.
*/
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
@ -31,6 +29,9 @@ import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CachingTokenFilter;
@ -61,7 +62,6 @@ import org.apache.lucene.queries.CommonTermsQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiPhraseQuery;
@ -73,7 +73,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.RegexpQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.WildcardQuery;
@ -926,7 +925,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void run() throws Exception {
numHighlights = 0;
FuzzyQuery fuzzyQuery = new FuzzyQuery(new Term(FIELD_NAME, "kinnedy"), 2);
fuzzyQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
fuzzyQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
doSearching(fuzzyQuery);
doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this, true);
assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
@ -944,7 +943,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void run() throws Exception {
numHighlights = 0;
WildcardQuery wildcardQuery = new WildcardQuery(new Term(FIELD_NAME, "k?nnedy"));
wildcardQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
wildcardQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
doSearching(wildcardQuery);
doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
@ -962,7 +961,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
public void run() throws Exception {
numHighlights = 0;
WildcardQuery wildcardQuery = new WildcardQuery(new Term(FIELD_NAME, "k*dy"));
wildcardQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
wildcardQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
doSearching(wildcardQuery);
doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
@ -989,7 +988,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
new BytesRef("kannedy"),
new BytesRef("kznnedy"),
true, true);
rangeQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
rangeQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
query = rangeQuery;
doSearching(query);
@ -1008,7 +1007,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
numHighlights = 0;
query = new WildcardQuery(new Term(FIELD_NAME, "ken*"));
((WildcardQuery)query).setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
((WildcardQuery)query).setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
searcher = newSearcher(reader);
// can't rewrite ConstantScore if you want to highlight it -
// it rewrites to ConstantScoreQuery which cannot be highlighted
@ -1149,13 +1148,14 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
@Override
public void run() throws Exception {
numHighlights = 0;
TermRangeFilter rf = TermRangeFilter.newStringRange("contents", "john", "john", true, true);
SpanQuery clauses[] = { new SpanTermQuery(new Term("contents", "john")),
new SpanTermQuery(new Term("contents", "kennedy")), };
SpanNearQuery snq = new SpanNearQuery(clauses, 1, true);
FilteredQuery fq = new FilteredQuery(snq, rf);
BooleanQuery bq = new BooleanQuery();
bq.add(snq, Occur.MUST);
bq.add(TermRangeQuery.newStringRange("contents", "john", "john", true, true), Occur.FILTER);
doSearching(fq);
doSearching(bq);
doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
// Currently highlights "John" and "Kennedy" separately
assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
@ -1172,13 +1172,14 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
@Override
public void run() throws Exception {
numHighlights = 0;
TermRangeFilter rf = TermRangeFilter.newStringRange("contents", "john", "john", true, true);
PhraseQuery pq = new PhraseQuery();
pq.add(new Term("contents", "john"));
pq.add(new Term("contents", "kennedy"));
FilteredQuery fq = new FilteredQuery(pq, rf);
BooleanQuery bq = new BooleanQuery();
bq.add(pq, Occur.MUST);
bq.add(TermRangeQuery.newStringRange("contents", "john", "john", true, true), Occur.FILTER);
doSearching(fq);
doSearching(bq);
doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
// Currently highlights "John" and "Kennedy" separately
assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
@ -1198,7 +1199,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.add(new TermQuery(new Term(FIELD_NAME, "john")), Occur.SHOULD);
PrefixQuery prefixQuery = new PrefixQuery(new Term(FIELD_NAME, "kenn"));
prefixQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
prefixQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
booleanQuery.add(prefixQuery, Occur.SHOULD);
doSearching(booleanQuery);

View File

@ -276,7 +276,7 @@ public class TestBlockJoin extends LuceneTestCase {
MultiTermQuery qc = NumericRangeQuery.newIntRange("year", 2007, 2007, true, true);
// Hacky: this causes the query to need 2 rewrite
// iterations:
qc.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
qc.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
BitDocIdSetFilter parentsFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("docType", "resume"))));

View File

@ -24,7 +24,8 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.Bits;
@ -72,13 +73,13 @@ public class PKIndexSplitter {
*/
public PKIndexSplitter(Directory input, Directory dir1, Directory dir2, Term midTerm) {
this(input, dir1, dir2,
new TermRangeFilter(midTerm.field(), null, midTerm.bytes(), true, false));
new QueryWrapperFilter(new TermRangeQuery(midTerm.field(), null, midTerm.bytes(), true, false)));
}
public PKIndexSplitter(Directory input, Directory dir1,
Directory dir2, Term midTerm, IndexWriterConfig config1, IndexWriterConfig config2) {
this(input, dir1, dir2,
new TermRangeFilter(midTerm.field(), null, midTerm.bytes(), true, false), config1, config2);
new QueryWrapperFilter(new TermRangeQuery(midTerm.field(), null, midTerm.bytes(), true, false)), config1, config2);
}
public void split() throws IOException {

View File

@ -64,7 +64,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer
Operator operator = OR_OPERATOR;
boolean lowercaseExpandedTerms = true;
MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE;
MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_REWRITE;
boolean allowLeadingWildcard = false;
protected String field;
@ -272,7 +272,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer
}
/**
* By default QueryParser uses {@link org.apache.lucene.search.MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}
* By default QueryParser uses {@link org.apache.lucene.search.MultiTermQuery#CONSTANT_SCORE_REWRITE}
* when creating a {@link PrefixQuery}, {@link WildcardQuery} or {@link TermRangeQuery}. This implementation is generally preferable because it
* a) Runs faster b) Does not have the scarcity of terms unduly influence score
* c) avoids any {@link TooManyClauses} exception.

View File

@ -106,7 +106,7 @@ public class ComplexPhraseQueryParser extends QueryParser {
// QueryParser is not guaranteed threadsafe anyway so this temporary
// state change should not
// present an issue
setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
return super.parse(query);
} finally {
setMultiTermRewriteMethod(oldMethod);
@ -186,7 +186,7 @@ public class ComplexPhraseQueryParser extends QueryParser {
// Must use old-style RangeQuery in order to produce a BooleanQuery
// that can be turned into SpanOr clause
TermRangeQuery rangeQuery = TermRangeQuery.newStringRange(field, part1, part2, startInclusive, endInclusive);
rangeQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
rangeQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
return rangeQuery;
}
return super.newRangeQuery(field, part1, part2, startInclusive, endInclusive);

View File

@ -73,7 +73,7 @@ public interface CommonQueryParserConfiguration {
/**
* By default, it uses
* {@link MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE} when creating a
* {@link MultiTermQuery#CONSTANT_SCORE_REWRITE} when creating a
* prefix, wildcard and range queries. This implementation is generally
* preferable because it a) Runs faster b) Does not have the scarcity of terms
* unduly influence score c) avoids any {@link TooManyListenersException}

View File

@ -256,7 +256,7 @@ public class StandardQueryParser extends QueryParserHelper implements CommonQuer
/**
* By default, it uses
* {@link MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE} when creating a
* {@link MultiTermQuery#CONSTANT_SCORE_REWRITE} when creating a
* prefix, wildcard and range queries. This implementation is generally
* preferable because it a) Runs faster b) Does not have the scarcity of terms
* unduly influence score c) avoids any {@link TooManyListenersException}

View File

@ -208,7 +208,7 @@ public class StandardQueryConfigHandler extends QueryConfigHandler {
set(ConfigurationKeys.FIELD_BOOST_MAP, new LinkedHashMap<String, Float>());
set(ConfigurationKeys.FUZZY_CONFIG, new FuzzyConfig());
set(ConfigurationKeys.LOCALE, Locale.getDefault());
set(ConfigurationKeys.MULTI_TERM_REWRITE_METHOD, MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
set(ConfigurationKeys.MULTI_TERM_REWRITE_METHOD, MultiTermQuery.CONSTANT_SCORE_REWRITE);
set(ConfigurationKeys.FIELD_DATE_RESOLUTION_MAP, new HashMap<CharSequence, DateTools.Resolution>());
}

View File

@ -30,7 +30,7 @@ import org.apache.lucene.search.MultiTermQuery;
/**
* This processor instates the default
* {@link org.apache.lucene.search.MultiTermQuery.RewriteMethod},
* {@link MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}, for multi-term
* {@link MultiTermQuery#CONSTANT_SCORE_REWRITE}, for multi-term
* query nodes.
*/
public class MultiTermRewriteMethodProcessor extends QueryNodeProcessorImpl {

View File

@ -64,8 +64,6 @@ public class CoreParser implements QueryBuilder {
this.analyzer = analyzer;
this.parser = parser;
filterFactory = new FilterBuilderFactory();
filterFactory.addBuilder("RangeFilter", new RangeFilterBuilder());
filterFactory.addBuilder("NumericRangeFilter", new NumericRangeFilterBuilder());
queryFactory = new QueryBuilderFactory();
queryFactory.addBuilder("TermQuery", new TermQueryBuilder());
@ -73,6 +71,7 @@ public class CoreParser implements QueryBuilder {
queryFactory.addBuilder("MatchAllDocsQuery", new MatchAllDocsQueryBuilder());
queryFactory.addBuilder("BooleanQuery", new BooleanQueryBuilder(queryFactory));
queryFactory.addBuilder("NumericRangeQuery", new NumericRangeQueryBuilder());
queryFactory.addBuilder("RangeQuery", new RangeQueryBuilder());
queryFactory.addBuilder("DisjunctionMaxQuery", new DisjunctionMaxQueryBuilder(queryFactory));
if (parser != null) {
queryFactory.addBuilder("UserQuery", new UserInputQueryBuilder(parser));
@ -80,7 +79,7 @@ public class CoreParser implements QueryBuilder {
queryFactory.addBuilder("UserQuery", new UserInputQueryBuilder(defaultField, analyzer));
}
queryFactory.addBuilder("FilteredQuery", new FilteredQueryBuilder(filterFactory, queryFactory));
queryFactory.addBuilder("ConstantScoreQuery", new ConstantScoreQueryBuilder(filterFactory));
queryFactory.addBuilder("ConstantScoreQuery", new ConstantScoreQueryBuilder(queryFactory));
filterFactory.addBuilder("CachedFilter", new CachedFilterBuilder(queryFactory,
filterFactory, maxNumCachedFilters));

View File

@ -1,11 +1,11 @@
package org.apache.lucene.queryparser.xml.builders;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.queryparser.xml.DOMUtils;
import org.apache.lucene.queryparser.xml.FilterBuilderFactory;
import org.apache.lucene.queryparser.xml.ParserException;
import org.apache.lucene.queryparser.xml.QueryBuilder;
import org.apache.lucene.queryparser.xml.QueryBuilderFactory;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Query;
import org.w3c.dom.Element;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -29,17 +29,17 @@ import org.w3c.dom.Element;
*/
public class ConstantScoreQueryBuilder implements QueryBuilder {
private final FilterBuilderFactory filterFactory;
private final QueryBuilderFactory queryFactory;
public ConstantScoreQueryBuilder(FilterBuilderFactory filterFactory) {
this.filterFactory = filterFactory;
public ConstantScoreQueryBuilder(QueryBuilderFactory queryFactory) {
this.queryFactory = queryFactory;
}
@Override
public Query getQuery(Element e) throws ParserException {
Element filterElem = DOMUtils.getFirstChildOrFail(e);
Element queryElem = DOMUtils.getFirstChildOrFail(e);
Query q = new ConstantScoreQuery(filterFactory.getFilter(filterElem));
Query q = new ConstantScoreQuery(queryFactory.getQuery(queryElem));
q.setBoost(DOMUtils.getAttribute(e, "boost", 1.0f));
return q;
}

View File

@ -1,168 +0,0 @@
package org.apache.lucene.queryparser.xml.builders;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeFilter;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.queryparser.xml.DOMUtils;
import org.apache.lucene.queryparser.xml.FilterBuilder;
import org.apache.lucene.queryparser.xml.ParserException;
import org.w3c.dom.Element;
import java.io.IOException;
/**
* Creates a {@link NumericRangeFilter}. The table below specifies the required
* attributes and the defaults if optional attributes are omitted. For more
* detail on what each of the attributes actually do, consult the documentation
* for {@link NumericRangeFilter}:
* <table summary="supported attributes">
* <tr>
* <th>Attribute name</th>
* <th>Values</th>
* <th>Required</th>
* <th>Default</th>
* </tr>
* <tr>
* <td>fieldName</td>
* <td>String</td>
* <td>Yes</td>
* <td>N/A</td>
* </tr>
* <tr>
* <td>lowerTerm</td>
* <td>Specified by <tt>type</tt></td>
* <td>Yes</td>
* <td>N/A</td>
* </tr>
* <tr>
* <td>upperTerm</td>
* <td>Specified by <tt>type</tt></td>
* <td>Yes</td>
* <td>N/A</td>
* </tr>
* <tr>
* <td>type</td>
* <td>int, long, float, double</td>
* <td>No</td>
* <td>int</td>
* </tr>
* <tr>
* <td>includeLower</td>
* <td>true, false</td>
* <td>No</td>
* <td>true</td>
* </tr>
* <tr>
* <td>includeUpper</td>
* <td>true, false</td>
* <td>No</td>
* <td>true</td>
* </tr>
* <tr>
* <td>precisionStep</td>
* <td>Integer</td>
* <td>No</td>
* <td>4</td>
* </tr>
* </table>
* <p>
* If an error occurs parsing the supplied <tt>lowerTerm</tt> or
* <tt>upperTerm</tt> into the numeric type specified by <tt>type</tt>, then the
* error will be silently ignored and the resulting filter will not match any
* documents.
*/
public class NumericRangeFilterBuilder implements FilterBuilder {
private static final NoMatchFilter NO_MATCH_FILTER = new NoMatchFilter();
private boolean strictMode = false;
/**
* Specifies how this {@link NumericRangeFilterBuilder} will handle errors.
* <p>
* If this is set to true, {@link #getFilter(Element)} will throw a
* {@link ParserException} if it is unable to parse the lowerTerm or upperTerm
* into the appropriate numeric type. If this is set to false, then this
* exception will be silently ignored and the resulting filter will not match
* any documents.
* <p>
* Defaults to false.
*/
public void setStrictMode(boolean strictMode) {
this.strictMode = strictMode;
}
@Override
public Filter getFilter(Element e) throws ParserException {
String field = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
String lowerTerm = DOMUtils.getAttributeOrFail(e, "lowerTerm");
String upperTerm = DOMUtils.getAttributeOrFail(e, "upperTerm");
boolean lowerInclusive = DOMUtils.getAttribute(e, "includeLower", true);
boolean upperInclusive = DOMUtils.getAttribute(e, "includeUpper", true);
int precisionStep = DOMUtils.getAttribute(e, "precisionStep", NumericUtils.PRECISION_STEP_DEFAULT);
String type = DOMUtils.getAttribute(e, "type", "int");
try {
Filter filter;
if (type.equalsIgnoreCase("int")) {
filter = NumericRangeFilter.newIntRange(field, precisionStep, Integer
.valueOf(lowerTerm), Integer.valueOf(upperTerm), lowerInclusive,
upperInclusive);
} else if (type.equalsIgnoreCase("long")) {
filter = NumericRangeFilter.newLongRange(field, precisionStep, Long
.valueOf(lowerTerm), Long.valueOf(upperTerm), lowerInclusive,
upperInclusive);
} else if (type.equalsIgnoreCase("double")) {
filter = NumericRangeFilter.newDoubleRange(field, precisionStep, Double
.valueOf(lowerTerm), Double.valueOf(upperTerm), lowerInclusive,
upperInclusive);
} else if (type.equalsIgnoreCase("float")) {
filter = NumericRangeFilter.newFloatRange(field, precisionStep, Float
.valueOf(lowerTerm), Float.valueOf(upperTerm), lowerInclusive,
upperInclusive);
} else {
throw new ParserException("type attribute must be one of: [long, int, double, float]");
}
return filter;
} catch (NumberFormatException nfe) {
if (strictMode) {
throw new ParserException("Could not parse lowerTerm or upperTerm into a number", nfe);
}
return NO_MATCH_FILTER;
}
}
static class NoMatchFilter extends Filter {
@Override
public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException {
return null;
}
@Override
public String toString(String field) {
return "NoMatchFilter()";
}
}
}

View File

@ -3,12 +3,6 @@
*/
package org.apache.lucene.queryparser.xml.builders;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.queryparser.xml.DOMUtils;
import org.apache.lucene.queryparser.xml.FilterBuilder;
import org.apache.lucene.queryparser.xml.ParserException;
import org.w3c.dom.Element;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -26,21 +20,27 @@ import org.w3c.dom.Element;
* limitations under the License.
*/
import org.apache.lucene.queryparser.xml.DOMUtils;
import org.apache.lucene.queryparser.xml.ParserException;
import org.apache.lucene.queryparser.xml.QueryBuilder;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermRangeQuery;
import org.w3c.dom.Element;
/**
* Builder for {@link TermRangeFilter}
* Builder for {@link TermRangeQuery}
*/
public class RangeFilterBuilder implements FilterBuilder {
public class RangeQueryBuilder implements QueryBuilder {
@Override
public Filter getFilter(Element e) throws ParserException {
public Query getQuery(Element e) throws ParserException {
String fieldName = DOMUtils.getAttributeWithInheritance(e, "fieldName");
String lowerTerm = e.getAttribute("lowerTerm");
String upperTerm = e.getAttribute("upperTerm");
boolean includeLower = DOMUtils.getAttribute(e, "includeLower", true);
boolean includeUpper = DOMUtils.getAttribute(e, "includeUpper", true);
return TermRangeFilter.newStringRange(fieldName, lowerTerm, upperTerm, includeLower, includeUpper);
return TermRangeQuery.newStringRange(fieldName, lowerTerm, upperTerm, includeLower, includeUpper);
}
}

View File

@ -321,15 +321,15 @@ public class TestQPHelper extends LuceneTestCase {
StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
Query q = qp.parse("foo*bar", "field");
assertTrue(q instanceof WildcardQuery);
assertEquals(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE, ((MultiTermQuery) q).getRewriteMethod());
assertEquals(MultiTermQuery.CONSTANT_SCORE_REWRITE, ((MultiTermQuery) q).getRewriteMethod());
q = qp.parse("foo*", "field");
assertTrue(q instanceof PrefixQuery);
assertEquals(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE, ((MultiTermQuery) q).getRewriteMethod());
assertEquals(MultiTermQuery.CONSTANT_SCORE_REWRITE, ((MultiTermQuery) q).getRewriteMethod());
q = qp.parse("[a TO z]", "field");
assertTrue(q instanceof TermRangeQuery);
assertEquals(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE, ((MultiTermQuery) q).getRewriteMethod());
assertEquals(MultiTermQuery.CONSTANT_SCORE_REWRITE, ((MultiTermQuery) q).getRewriteMethod());
}
public void testCJK() throws Exception {
@ -665,12 +665,12 @@ public class TestQPHelper extends LuceneTestCase {
public void testRange() throws Exception {
assertQueryEquals("[ a TO z]", null, "[a TO z]");
assertEquals(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
assertEquals(MultiTermQuery.CONSTANT_SCORE_REWRITE, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
StandardQueryParser qp = new StandardQueryParser();
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]", "field")).getRewriteMethod());
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]", "field")).getRewriteMethod());
// test open ranges
assertQueryEquals("[ a TO * ]", null, "[a TO *]");
@ -1153,12 +1153,12 @@ public class TestQPHelper extends LuceneTestCase {
assertEquals(q, qp.parse("/[A-Z][123]/", df));
q.setBoost(0.5f);
assertEquals(q, qp.parse("/[A-Z][123]/^0.5", df));
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
q.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
q.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
assertTrue(qp.parse("/[A-Z][123]/^0.5", df) instanceof RegexpQuery);
assertEquals(q, qp.parse("/[A-Z][123]/^0.5", df));
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE, ((RegexpQuery)qp.parse("/[A-Z][123]/^0.5", df)).getRewriteMethod());
qp.setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_REWRITE, ((RegexpQuery)qp.parse("/[A-Z][123]/^0.5", df)).getRewriteMethod());
qp.setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
Query escaped = new RegexpQuery(new Term("field", "[a-z]\\/[123]"));
assertEquals(escaped, qp.parse("/[a-z]\\/[123]/", df));

View File

@ -571,12 +571,12 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
assertQueryEquals("[ a TO z}", null, "[a TO z}");
assertQueryEquals("{ a TO z]", null, "{a TO z]");
assertEquals(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE, ((TermRangeQuery)getQuery("[ a TO z]")).getRewriteMethod());
assertEquals(MultiTermQuery.CONSTANT_SCORE_REWRITE, ((TermRangeQuery)getQuery("[ a TO z]")).getRewriteMethod());
CommonQueryParserConfiguration qp = getParserConfig( new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)getQuery("[ a TO z]", qp)).getRewriteMethod());
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_REWRITE,((TermRangeQuery)getQuery("[ a TO z]", qp)).getRewriteMethod());
// test open ranges
assertQueryEquals("[ a TO * ]", null, "[a TO *]");
@ -982,12 +982,12 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
assertEquals(q, getQuery("/[A-Z][123]/",qp));
q.setBoost(0.5f);
assertEquals(q, getQuery("/[A-Z][123]/^0.5",qp));
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
q.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
q.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
assertTrue(getQuery("/[A-Z][123]/^0.5",qp) instanceof RegexpQuery);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE, ((RegexpQuery)getQuery("/[A-Z][123]/^0.5",qp)).getRewriteMethod());
assertEquals(MultiTermQuery.SCORING_BOOLEAN_REWRITE, ((RegexpQuery)getQuery("/[A-Z][123]/^0.5",qp)).getRewriteMethod());
assertEquals(q, getQuery("/[A-Z][123]/^0.5",qp));
qp.setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
qp.setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
Query escaped = new RegexpQuery(new Term("field", "[a-z]\\/[123]"));
assertEquals(escaped, getQuery("/[a-z]\\/[123]/",qp));

View File

@ -16,5 +16,5 @@
limitations under the License.
-->
<ConstantScoreQuery>
<RangeFilter fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</ConstantScoreQuery>

View File

@ -15,13 +15,11 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
<FilteredQuery>
<Query>
<BooleanQuery>
<Clause occurs="must">
<MatchAllDocsQuery/>
</Query>
<Filter>
<RangeFilter fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</Filter>
</FilteredQuery>
</Clause>
<Clause occurs="filter">
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</Clause>
</BooleanQuery>

View File

@ -1,37 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<FilteredQuery>
<Query>
<BooleanQuery fieldName="contents">
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="mustnot">
<TermQuery >sumitomo</TermQuery>
</Clause>
<Clause occurs="must">
<TermQuery>bank</TermQuery>
</Clause>
</BooleanQuery>
</Query>
<Filter>
<NumericRangeFilter fieldName="date2" lowerTerm="19870409" upperTerm="19870412"/>
</Filter>
</FilteredQuery>

View File

@ -15,23 +15,4 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
<FilteredQuery>
<Query>
<BooleanQuery fieldName="contents">
<Clause occurs="should">
<TermQuery>merger</TermQuery>
</Clause>
<Clause occurs="mustnot">
<TermQuery >sumitomo</TermQuery>
</Clause>
<Clause occurs="must">
<TermQuery>bank</TermQuery>
</Clause>
</BooleanQuery>
</Query>
<Filter>
<RangeFilter fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</Filter>
</FilteredQuery>
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>

View File

@ -121,9 +121,9 @@ public class TestParser extends LuceneTestCase {
assertEquals(1, ndq.getDisjuncts().size());
}
public void testRangeFilterQueryXML() throws ParserException, IOException {
Query q = parse("RangeFilterQuery.xml");
dumpResults("RangeFilter", q, 5);
public void testRangeQueryXML() throws ParserException, IOException {
Query q = parse("RangeQuery.xml");
dumpResults("RangeQuery", q, 5);
}
public void testUserQueryXML() throws ParserException, IOException {
@ -194,11 +194,6 @@ public class TestParser extends LuceneTestCase {
assertEquals("DuplicateFilterQuery should produce 1 result ", 1, h);
}
public void testNumericRangeFilterQueryXML() throws ParserException, IOException {
Query q = parse("NumericRangeFilterQuery.xml");
dumpResults("NumericRangeFilter", q, 5);
}
public void testNumericRangeQueryQueryXML() throws ParserException, IOException {
Query q = parse("NumericRangeQueryQuery.xml");
dumpResults("NumericRangeQuery", q, 5);

View File

@ -15,12 +15,11 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
<FilteredQuery>
<Query>
<BooleanQuery>
<Clause occurs="must">
<UserQuery>"Bank of England"</UserQuery>
</Query>
<Filter>
<RangeFilter fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</Filter>
</FilteredQuery>
</Clause>
<Clause occurs="filter">
<RangeQuery fieldName="date" lowerTerm="19870409" upperTerm="19870412"/>
</Clause>
</BooleanQuery>

View File

@ -1,216 +0,0 @@
package org.apache.lucene.queryparser.xml.builders;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.NumericRangeFilter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.queryparser.xml.ParserException;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
public class TestNumericRangeFilterBuilder extends LuceneTestCase {
public void testGetFilterHandleNumericParseErrorStrict() throws Exception {
NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
filterBuilder.setStrictMode(true);
String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
Document doc = getDocumentFromString(xml);
try {
filterBuilder.getFilter(doc.getDocumentElement());
} catch (ParserException e) {
return;
}
fail("Expected to throw " + ParserException.class);
}
public void testGetFilterHandleNumericParseError() throws Exception {
NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
filterBuilder.setStrictMode(false);
String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='NaN'/>";
Document doc = getDocumentFromString(xml);
Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
Directory ramDir = newDirectory();
IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(null));
writer.commit();
try {
LeafReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(ramDir));
try {
assertNull(filter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
}
finally {
reader.close();
}
}
finally {
writer.commit();
writer.close();
ramDir.close();
}
}
@SuppressWarnings({"unchecked","rawtypes"})
public void testGetFilterInt() throws Exception {
NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
filterBuilder.setStrictMode(true);
String xml = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='10'/>";
Document doc = getDocumentFromString(xml);
Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
assertTrue(filter instanceof NumericRangeFilter<?>);
NumericRangeFilter<Integer> numRangeFilter = (NumericRangeFilter<Integer>) filter;
assertEquals(Integer.valueOf(-1), numRangeFilter.getMin());
assertEquals(Integer.valueOf(10), numRangeFilter.getMax());
assertEquals("AGE", numRangeFilter.getField());
assertTrue(numRangeFilter.includesMin());
assertTrue(numRangeFilter.includesMax());
String xml2 = "<NumericRangeFilter fieldName='AGE' type='int' lowerTerm='-1' upperTerm='10' includeUpper='false'/>";
Document doc2 = getDocumentFromString(xml2);
Filter filter2 = filterBuilder.getFilter(doc2.getDocumentElement());
assertTrue(filter2 instanceof NumericRangeFilter);
NumericRangeFilter<Integer> numRangeFilter2 = (NumericRangeFilter) filter2;
assertEquals(Integer.valueOf(-1), numRangeFilter2.getMin());
assertEquals(Integer.valueOf(10), numRangeFilter2.getMax());
assertEquals("AGE", numRangeFilter2.getField());
assertTrue(numRangeFilter2.includesMin());
assertFalse(numRangeFilter2.includesMax());
}
@SuppressWarnings({"unchecked","rawtypes"})
public void testGetFilterLong() throws Exception {
NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
filterBuilder.setStrictMode(true);
String xml = "<NumericRangeFilter fieldName='AGE' type='LoNg' lowerTerm='-2321' upperTerm='60000000'/>";
Document doc = getDocumentFromString(xml);
Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
assertTrue(filter instanceof NumericRangeFilter<?>);
NumericRangeFilter<Long> numRangeFilter = (NumericRangeFilter) filter;
assertEquals(Long.valueOf(-2321L), numRangeFilter.getMin());
assertEquals(Long.valueOf(60000000L), numRangeFilter.getMax());
assertEquals("AGE", numRangeFilter.getField());
assertTrue(numRangeFilter.includesMin());
assertTrue(numRangeFilter.includesMax());
String xml2 = "<NumericRangeFilter fieldName='AGE' type='LoNg' lowerTerm='-2321' upperTerm='60000000' includeUpper='false'/>";
Document doc2 = getDocumentFromString(xml2);
Filter filter2 = filterBuilder.getFilter(doc2.getDocumentElement());
assertTrue(filter2 instanceof NumericRangeFilter<?>);
NumericRangeFilter<Long> numRangeFilter2 = (NumericRangeFilter) filter2;
assertEquals(Long.valueOf(-2321L), numRangeFilter2.getMin());
assertEquals(Long.valueOf(60000000L), numRangeFilter2.getMax());
assertEquals("AGE", numRangeFilter2.getField());
assertTrue(numRangeFilter2.includesMin());
assertFalse(numRangeFilter2.includesMax());
}
@SuppressWarnings({"unchecked","rawtypes"})
public void testGetFilterDouble() throws Exception {
NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
filterBuilder.setStrictMode(true);
String xml = "<NumericRangeFilter fieldName='AGE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023'/>";
Document doc = getDocumentFromString(xml);
Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
assertTrue(filter instanceof NumericRangeFilter<?>);
NumericRangeFilter<Double> numRangeFilter = (NumericRangeFilter) filter;
assertEquals(Double.valueOf(-23.21d), numRangeFilter.getMin());
assertEquals(Double.valueOf(60000.00023d), numRangeFilter.getMax());
assertEquals("AGE", numRangeFilter.getField());
assertTrue(numRangeFilter.includesMin());
assertTrue(numRangeFilter.includesMax());
String xml2 = "<NumericRangeFilter fieldName='AGE' type='doubLe' lowerTerm='-23.21' upperTerm='60000.00023' includeUpper='false'/>";
Document doc2 = getDocumentFromString(xml2);
Filter filter2 = filterBuilder.getFilter(doc2.getDocumentElement());
assertTrue(filter2 instanceof NumericRangeFilter<?>);
NumericRangeFilter<Double> numRangeFilter2 = (NumericRangeFilter) filter2;
assertEquals(Double.valueOf(-23.21d), numRangeFilter2.getMin());
assertEquals(Double.valueOf(60000.00023d), numRangeFilter2.getMax());
assertEquals("AGE", numRangeFilter2.getField());
assertTrue(numRangeFilter2.includesMin());
assertFalse(numRangeFilter2.includesMax());
}
@SuppressWarnings({"unchecked","rawtypes"})
public void testGetFilterFloat() throws Exception {
NumericRangeFilterBuilder filterBuilder = new NumericRangeFilterBuilder();
filterBuilder.setStrictMode(true);
String xml = "<NumericRangeFilter fieldName='AGE' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23'/>";
Document doc = getDocumentFromString(xml);
Filter filter = filterBuilder.getFilter(doc.getDocumentElement());
assertTrue(filter instanceof NumericRangeFilter<?>);
NumericRangeFilter<Float> numRangeFilter = (NumericRangeFilter) filter;
assertEquals(Float.valueOf(-2.321432f), numRangeFilter.getMin());
assertEquals(Float.valueOf(32432.23f), numRangeFilter.getMax());
assertEquals("AGE", numRangeFilter.getField());
assertTrue(numRangeFilter.includesMin());
assertTrue(numRangeFilter.includesMax());
String xml2 = "<NumericRangeFilter fieldName='AGE' type='FLOAT' lowerTerm='-2.321432' upperTerm='32432.23' includeUpper='false' precisionStep='2' />";
Document doc2 = getDocumentFromString(xml2);
Filter filter2 = filterBuilder.getFilter(doc2.getDocumentElement());
assertTrue(filter2 instanceof NumericRangeFilter<?>);
NumericRangeFilter<Float> numRangeFilter2 = (NumericRangeFilter) filter2;
assertEquals(Float.valueOf(-2.321432f), numRangeFilter2.getMin());
assertEquals(Float.valueOf(32432.23f), numRangeFilter2.getMax());
assertEquals("AGE", numRangeFilter2.getField());
assertTrue(numRangeFilter2.includesMin());
assertFalse(numRangeFilter2.includesMax());
}
private static Document getDocumentFromString(String str)
throws SAXException, IOException, ParserConfigurationException {
InputStream is = new ByteArrayInputStream(str.getBytes(StandardCharsets.UTF_8));
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
factory.setNamespaceAware(true);
DocumentBuilder builder = factory.newDocumentBuilder();
Document doc = builder.parse(is);
is.close();
return doc;
}
}

View File

@ -34,13 +34,14 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.StorableField;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FilteredQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@ -77,12 +78,16 @@ public abstract class CollationTestBase extends LuceneTestCase {
// index Term below should NOT be returned by a TermRangeFilter with a Farsi
// Collator (or an Arabic one for the case when Farsi searcher not
// supported).
ScoreDoc[] result = searcher.search
(new FilteredQuery(query, new TermRangeFilter("content", firstBeg, firstEnd, true, true)), 1).scoreDocs;
BooleanQuery bq = new BooleanQuery();
bq.add(query, Occur.MUST);
bq.add(new TermRangeQuery("content", firstBeg, firstEnd, true, true), Occur.FILTER);
ScoreDoc[] result = searcher.search(bq, 1).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
result = searcher.search
(new FilteredQuery(query, new TermRangeFilter("content", secondBeg, secondEnd, true, true)), 1).scoreDocs;
bq = new BooleanQuery();
bq.add(query, Occur.MUST);
bq.add(new TermRangeQuery("content", secondBeg, secondEnd, true, true), Occur.FILTER);
result = searcher.search(bq, 1).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
reader.close();

View File

@ -78,7 +78,7 @@ public abstract class SolrQueryParserBase extends QueryBuilder {
/** The default operator that parser uses to combine query terms */
Operator operator = OR_OPERATOR;
MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE;
MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_REWRITE;
boolean allowLeadingWildcard = true;
String defaultField;
@ -290,7 +290,7 @@ public abstract class SolrQueryParserBase extends QueryBuilder {
/**
* By default QueryParser uses {@link org.apache.lucene.search.MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE}
* By default QueryParser uses {@link org.apache.lucene.search.MultiTermQuery#CONSTANT_SCORE_REWRITE}
* when creating a PrefixQuery, WildcardQuery or RangeQuery. This implementation is generally preferable because it
* a) Runs faster b) Does not have the scarcity of terms unduly influence score
* c) avoids any "TooManyBooleanClauses" exception.

View File

@ -40,7 +40,6 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.StorableField;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.DocTermOrdsRewriteMethod;
import org.apache.lucene.search.DocValuesRangeQuery;
import org.apache.lucene.search.DocValuesRewriteMethod;
import org.apache.lucene.search.MultiTermQuery;
@ -732,9 +731,9 @@ public abstract class FieldType extends FieldProperties {
*/
public MultiTermQuery.RewriteMethod getRewriteMethod(QParser parser, SchemaField field) {
if (!field.indexed() && field.hasDocValues()) {
return field.multiValued() ? new DocTermOrdsRewriteMethod() : new DocValuesRewriteMethod();
return new DocValuesRewriteMethod();
} else {
return MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE;
return MultiTermQuery.CONSTANT_SCORE_REWRITE;
}
}

View File

@ -27,7 +27,6 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DocValuesTermsQuery;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.MultiTermQueryWrapperFilter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery;
@ -85,12 +84,10 @@ public class TermsQParserPlugin extends QParserPlugin {
@Override
Filter makeFilter(String fname, BytesRef[] byteRefs) {
Automaton union = Automata.makeStringUnion(Arrays.asList(byteRefs));
return new MultiTermQueryWrapperFilter<AutomatonQuery>(new AutomatonQuery(new Term(fname), union)) {
};
return new QueryWrapperFilter(new AutomatonQuery(new Term(fname), union));
}
},
docValuesTermsFilter {//on 4x this is FieldCacheTermsFilter but we use the 5x name any way
//note: limited to one val per doc
@Override
Filter makeFilter(String fname, BytesRef[] byteRefs) {
return new QueryWrapperFilter(new DocValuesTermsQuery(fname, byteRefs));

View File

@ -22,8 +22,9 @@ import javax.xml.stream.XMLStreamReader;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter;
import org.apache.lucene.search.join.ScoreMode;
@ -560,8 +561,8 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 {
protected ToParentBlockJoinQuery join(final String childTerm) {
return new ToParentBlockJoinQuery(
new TermQuery(new Term(child, childTerm)), new BitDocIdSetCachingWrapperFilter(new TermRangeFilter(parent,
null, null, false, false)), ScoreMode.None);
new TermQuery(new Term(child, childTerm)), new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(
new TermRangeQuery(parent, null, null, false, false))), ScoreMode.None);
}
private Collection<? extends Callable<Void>> callables(List<Document> blocks) {