LUCENE-4784, LUCENE-4785, LUCENE-4786: Fixed references to deprecated classes SinkTokenizer, ValueSourceQuery and RangeQuery.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1448852 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Adrien Grand 2013-02-21 23:30:36 +00:00
parent 2234f62e1c
commit d2d444d2ed
6 changed files with 30 additions and 19 deletions

View File

@ -247,6 +247,9 @@ Documentation
* LUCENE-4718: Fixed documentation of oal.queryparser.classic.
(Hayden Muhl via Adrien Grand)
* LUCENE-4784, LUCENE-4785, LUCENE-4786: Fixed references to deprecated classes
SinkTokenizer, ValueSourceQuery and RangeQuery. (Hao Zhong via Adrien Grand)
Build
* LUCENE-4636: Upgrade ivy to 2.3.0 (Shawn Heisey via Robert Muir)

View File

@ -20,6 +20,8 @@
<TITLE>org.apache.lucene.analysis.sinks</TITLE>
</HEAD>
<BODY>
Implementations of the SinkTokenizer that might be useful.
{@link org.apache.lucene.analysis.sinks.TeeSinkTokenFilter} and implementations
of {@link org.apache.lucene.analysis.sinks.TeeSinkTokenFilter.SinkFilter} that
might be useful.
</BODY>
</HTML>

View File

@ -18,6 +18,8 @@ package org.apache.lucene.document;
*/
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.util.NumericUtils; // for javadocs
import java.text.ParseException;
@ -35,7 +37,7 @@ import java.util.TimeZone;
*
* <P>This class also helps you to limit the resolution of your dates. Do not
* save dates with a finer resolution than you really need, as then
* RangeQuery and PrefixQuery will require more memory and become slower.
* {@link TermRangeQuery} and {@link PrefixQuery} will require more memory and become slower.
*
* <P>
* Another approach is {@link NumericUtils}, which provides

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader; // for javadocs
import org.apache.lucene.queries.function.FunctionQuery;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.FieldCache; // for javadocs
@ -55,7 +56,7 @@ public class CustomScoreProvider {
* <p>
* If your custom scoring is different than the default herein you
* should override at least one of the two customScore() methods.
* If the number of ValueSourceQueries is always &lt; 2 it is
* If the number of {@link FunctionQuery function queries} is always &lt; 2 it is
* sufficient to override the other
* {@link #customScore(int, float, float) customScore()}
* method, which is simpler.
@ -67,7 +68,7 @@ public class CustomScoreProvider {
*
* @param doc id of scored doc.
* @param subQueryScore score of that doc by the subQuery.
* @param valSrcScores scores of that doc by the ValueSourceQuery.
* @param valSrcScores scores of that doc by the {@link FunctionQuery}.
* @return custom score.
*/
public float customScore(int doc, float subQueryScore, float valSrcScores[]) throws IOException {
@ -85,13 +86,13 @@ public class CustomScoreProvider {
}
/**
* Compute a custom score by the subQuery score and the ValueSourceQuery score.
* Compute a custom score by the subQuery score and the {@link FunctionQuery} score.
* <p>
* Subclasses can override this method to modify the custom score.
* <p>
* If your custom scoring is different than the default herein you
* should override at least one of the two customScore() methods.
* If the number of ValueSourceQueries is always &lt; 2 it is
* If the number of {@link FunctionQuery function queries} is always &lt; 2 it is
* sufficient to override this customScore() method, which is simpler.
* <p>
* The default computation herein is a multiplication of the two scores:
@ -101,7 +102,7 @@ public class CustomScoreProvider {
*
* @param doc id of scored doc.
* @param subQueryScore score of that doc by the subQuery.
* @param valSrcScore score of that doc by the ValueSourceQuery.
* @param valSrcScore score of that doc by the {@link FunctionQuery}.
* @return custom score.
*/
public float customScore(int doc, float subQueryScore, float valSrcScore) throws IOException {

View File

@ -26,6 +26,8 @@ import java.util.Arrays;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.FunctionQuery;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Query;
@ -39,7 +41,7 @@ import org.apache.lucene.util.ToStringUtils;
* Query that sets document score as a programmatic function of several (sub) scores:
* <ol>
* <li>the score of its subQuery (any query)</li>
* <li>(optional) the score of its ValueSourceQuery (or queries).</li>
* <li>(optional) the score of its {@link FunctionQuery} (or queries).</li>
* </ol>
* Subclasses can modify the computation by overriding {@link #getCustomScoreProvider}.
*
@ -355,12 +357,12 @@ public class CustomScoreQuery extends Query {
/**
* Checks if this is strict custom scoring.
* In strict custom scoring, the ValueSource part does not participate in weight normalization.
* In strict custom scoring, the {@link ValueSource} part does not participate in weight normalization.
* This may be useful when one wants full control over how scores are modified, and does
* not care about normalizing by the ValueSource part.
* not care about normalizing by the {@link ValueSource} part.
* One particular case where this is useful if for testing this query.
* <P>
* Note: only has effect when the ValueSource part is not null.
* Note: only has effect when the {@link ValueSource} part is not null.
*/
public boolean isStrict() {
return strict;

View File

@ -32,6 +32,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.QueryParser.Operator;
import org.apache.lucene.queryparser.flexible.standard.CommonQueryParserConfiguration;
import org.apache.lucene.search.*;
import org.apache.lucene.search.BooleanQuery.TooManyClauses;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Version;
@ -310,11 +311,11 @@ public abstract class QueryParserBase implements CommonQueryParserConfiguration
/**
* By default QueryParser uses {@link org.apache.lucene.search.MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
* when creating a PrefixQuery, WildcardQuery or RangeQuery. This implementation is generally preferable because it
* when creating a {@link PrefixQuery}, {@link WildcardQuery} or {@link TermRangeQuery}. This implementation is generally preferable because it
* a) Runs faster b) Does not have the scarcity of terms unduly influence score
* c) avoids any "TooManyBooleanClauses" exception.
* c) avoids any {@link TooManyClauses} exception.
* However, if your application really needs to use the
* old-fashioned BooleanQuery expansion rewriting and the above
* old-fashioned {@link BooleanQuery} expansion rewriting and the above
* points are not relevant then use this to change
* the rewrite method.
*/
@ -416,9 +417,9 @@ public abstract class QueryParserBase implements CommonQueryParserConfiguration
}
/**
* Set whether or not to analyze range terms when constructing RangeQuerys.
* Set whether or not to analyze range terms when constructing {@link TermRangeQuery}s.
* For example, setting this to true can enable analyzing terms into
* collation keys for locale-sensitive RangeQuery.
* collation keys for locale-sensitive {@link TermRangeQuery}.
*
* @param analyzeRangeTerms whether or not terms should be analyzed for RangeQuerys
*/
@ -427,7 +428,7 @@ public abstract class QueryParserBase implements CommonQueryParserConfiguration
}
/**
* @return whether or not to analyze range terms when constructing RangeQuerys.
* @return whether or not to analyze range terms when constructing {@link TermRangeQuery}s.
*/
public boolean getAnalyzeRangeTerms() {
return analyzeRangeTerms;
@ -844,13 +845,13 @@ public abstract class QueryParserBase implements CommonQueryParserConfiguration
}
/**
* Builds a new TermRangeQuery instance
* Builds a new {@link TermRangeQuery} instance
* @param field Field
* @param part1 min
* @param part2 max
* @param startInclusive true if the start of the range is inclusive
* @param endInclusive true if the end of the range is inclusive
* @return new TermRangeQuery instance
* @return new {@link TermRangeQuery} instance
*/
protected Query newRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) {
final BytesRef start;