revert 2458

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@948417 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2010-05-26 12:51:50 +00:00
parent 2e8a9b98de
commit 6149b63976
19 changed files with 81 additions and 474 deletions

View File

@ -406,10 +406,6 @@ Bug fixes
lock (previously we only released on IOException). (Tamas Cservenak
via Mike McCandless)
* LUCENE-2458: QueryParser no longer automatically forms phrase queries,
assuming whitespace tokenization. Previously all CJK queries, for example,
would be turned into phrase queries. (Robert Muir)
New features
* LUCENE-2128: Parallelized fetching document frequencies during weight

View File

@ -490,6 +490,10 @@ The source distribution does not contain sources of the previous Lucene Java ver
<!-- ================================================================== -->
<target name="clean-javacc">
<delete>
<fileset dir="src/java/org/apache/lucene/analysis/standard" includes="*.java">
<containsregexp expression="Generated.*By.*JavaCC"/>
<exclude name="ParseException.java"/>
</fileset>
<fileset dir="src/java/org/apache/lucene/queryParser" includes="*.java">
<containsregexp expression="Generated.*By.*JavaCC"/>
</fileset>

View File

@ -55,7 +55,7 @@ public class FieldQueryTest extends AbstractTestCase {
}
public void testFlattenTermAndPhrase2gram() throws Exception {
Query query = paB.parse( "AA AND \"BCD\" OR \"EFGH\"" );
Query query = paB.parse( "AA AND BCD OR EFGH" );
FieldQuery fq = new FieldQuery( query, true, true );
Set<Query> flatQueries = new HashSet<Query>();
fq.flatten( query, flatQueries );
@ -679,7 +679,7 @@ public class FieldQueryTest extends AbstractTestCase {
}
public void testQueryPhraseMapOverlap2gram() throws Exception {
Query query = paB.parse( "\"abc\" AND \"bcd\"" );
Query query = paB.parse( "abc AND bcd" );
// phraseHighlight = true, fieldMatch = true
FieldQuery fq = new FieldQuery( query, true, true );

View File

@ -126,7 +126,7 @@ public class ExtendableQueryParser extends QueryParser {
}
@Override
protected Query getFieldQuery(final String field, final String queryText, boolean quoted)
protected Query getFieldQuery(final String field, final String queryText)
throws ParseException {
final Pair<String,String> splitExtensionField = this.extensions
.splitExtensionField(defaultField, field);
@ -136,7 +136,7 @@ public class ExtendableQueryParser extends QueryParser {
return extension.parse(new ExtensionQuery(this, splitExtensionField.cur,
queryText));
}
return super.getFieldQuery(field, queryText, quoted);
return super.getFieldQuery(field, queryText);
}
}

View File

@ -299,7 +299,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
/**
* @exception ParseException throw in overridden method to disallow
*/
protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
protected Query getFieldQuery(String field, String queryText) throws ParseException {
// Use the analyzer to get all the tokens, and then build a TermQuery,
// PhraseQuery, or nothing based on the term count
@ -330,19 +330,15 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
source.restoreState(list.get(0));
return new TermQuery(new Term(field, termAtt.term()));
} else {
if (severalTokensAtSamePosition || !quoted) {
if (positionCount == 1 || !quoted) {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = new BooleanQuery(positionCount == 1);
BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ?
BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
BooleanQuery q = new BooleanQuery();
for (int i = 0; i < list.size(); i++) {
source.restoreState(list.get(i));
TermQuery currentQuery = new TermQuery(
new Term(field, termAtt.term()));
q.add(currentQuery, occur);
q.add(currentQuery, BooleanClause.Occur.SHOULD);
}
return q;
}
@ -375,7 +371,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
}
/**
* Base implementation delegates to {@link #getFieldQuery(String,String,boolean)}.
* Base implementation delegates to {@link #getFieldQuery(String,String)}.
* This method may be overridden, for example, to return
* a SpanNearQuery instead of a PhraseQuery.
*
@ -383,7 +379,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
*/
protected Query getFieldQuery(String field, String queryText, int slop)
throws ParseException {
Query query = getFieldQuery(field, queryText, true);
Query query = getFieldQuery(field, queryText);
if (query instanceof PhraseQuery) {
((PhraseQuery) query).setSlop(slop);
@ -851,7 +847,7 @@ public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
}
q = getFuzzyQuery(field, termImage, fms);
} else {
q = getFieldQuery(field, termImage, false);
q = getFieldQuery(field, termImage);
}
break;
case RANGEIN_START:

View File

@ -127,7 +127,7 @@ public class PrecedenceQueryParser {
Locale locale = Locale.getDefault();
static enum Operator { OR, AND }
/** Constructs a query parser.
* @param f the default field for query terms.
* @param a used to find terms in the query text.
@ -323,7 +323,7 @@ public class PrecedenceQueryParser {
/**
* @exception ParseException throw in overridden method to disallow
*/
protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
protected Query getFieldQuery(String field, String queryText) throws ParseException {
// Use the analyzer to get all the tokens, and then build a TermQuery,
// PhraseQuery, or nothing based on the term count
@ -354,19 +354,15 @@ public class PrecedenceQueryParser {
source.restoreState(list.get(0));
return new TermQuery(new Term(field, termAtt.term()));
} else {
if (severalTokensAtSamePosition || !quoted) {
if (positionCount == 1 || !quoted) {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = new BooleanQuery(positionCount == 1);
BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ?
BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
BooleanQuery q = new BooleanQuery();
for (int i = 0; i < list.size(); i++) {
source.restoreState(list.get(i));
TermQuery currentQuery = new TermQuery(
new Term(field, termAtt.term()));
q.add(currentQuery, occur);
q.add(currentQuery, BooleanClause.Occur.SHOULD);
}
return q;
}
@ -399,7 +395,7 @@ public class PrecedenceQueryParser {
}
/**
* Base implementation delegates to {@link #getFieldQuery(String,String,boolean)}.
* Base implementation delegates to {@link #getFieldQuery(String,String)}.
* This method may be overridden, for example, to return
* a SpanNearQuery instead of a PhraseQuery.
*
@ -407,7 +403,7 @@ public class PrecedenceQueryParser {
*/
protected Query getFieldQuery(String field, String queryText, int slop)
throws ParseException {
Query query = getFieldQuery(field, queryText, true);
Query query = getFieldQuery(field, queryText);
if (query instanceof PhraseQuery) {
((PhraseQuery) query).setSlop(slop);
@ -834,7 +830,7 @@ Query Term(String field) : {
}
q = getFuzzyQuery(field, termImage, fms);
} else {
q = getFieldQuery(field, termImage, false);
q = getFieldQuery(field, termImage);
}
}
| ( <RANGEIN_START> ( goop1=<RANGEIN_GOOP>|goop1=<RANGEIN_QUOTED> )

View File

@ -451,7 +451,7 @@ public class QueryParserWrapper {
/**
* @exception ParseException throw in overridden method to disallow
*/
protected Query getFieldQuery(String field, String queryText, boolean quoted)
protected Query getFieldQuery(String field, String queryText)
throws ParseException {
throw new UnsupportedOperationException();
}

View File

@ -36,7 +36,6 @@ import org.apache.lucene.queryParser.core.nodes.GroupQueryNode;
import org.apache.lucene.queryParser.core.nodes.NoTokenFoundQueryNode;
import org.apache.lucene.queryParser.core.nodes.ParametricQueryNode;
import org.apache.lucene.queryParser.core.nodes.QueryNode;
import org.apache.lucene.queryParser.core.nodes.QuotedFieldQueryNode;
import org.apache.lucene.queryParser.core.nodes.TextableQueryNode;
import org.apache.lucene.queryParser.core.nodes.TokenizedPhraseQueryNode;
import org.apache.lucene.queryParser.core.processors.QueryNodeProcessorImpl;
@ -188,8 +187,8 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl {
return fieldNode;
} else if (severalTokensAtSamePosition || !(node instanceof QuotedFieldQueryNode)) {
if (positionCount == 1 || !(node instanceof QuotedFieldQueryNode)) {
} else if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
LinkedList<QueryNode> children = new LinkedList<QueryNode>();
@ -207,11 +206,9 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl {
children.add(new FieldQueryNode(field, term, -1, -1));
}
if (positionCount == 1)
return new GroupQueryNode(
return new GroupQueryNode(
new StandardBooleanQueryNode(children, true));
else
return new StandardBooleanQueryNode(children, false);
} else {
// phrase query:

View File

@ -23,13 +23,9 @@ import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.PhraseQuery;
@ -41,7 +37,6 @@ import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.util.LocalizedTestCase;
import org.apache.lucene.util.automaton.BasicAutomata;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.RegExp;
import java.io.IOException;
import java.io.Reader;
@ -265,90 +260,6 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
assertQueryEquals(".NET", a, ".NET");
}
//individual CJK chars as terms, like StandardAnalyzer
private class SimpleCJKTokenizer extends Tokenizer {
private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
public SimpleCJKTokenizer(Reader input) {
super(input);
}
@Override
public boolean incrementToken() throws IOException {
int ch = input.read();
if (ch < 0)
return false;
clearAttributes();
termAtt.setEmpty().append((char) ch);
return true;
}
}
private class SimpleCJKAnalyzer extends Analyzer {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new SimpleCJKTokenizer(reader);
}
}
public void testCJKTerm() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
BooleanQuery expected = new BooleanQuery();
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
assertEquals(expected, getQuery("中国", analyzer));
}
public void testCJKBoostedTerm() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
BooleanQuery expected = new BooleanQuery();
expected.setBoost(0.5f);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
assertEquals(expected, getQuery("中国^0.5", analyzer));
}
public void testCJKPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"", analyzer));
}
public void testCJKBoostedPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.setBoost(0.5f);
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"^0.5", analyzer));
}
public void testCJKSloppyPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.setSlop(3);
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"~3", analyzer));
}
public void testSlop() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("\"term germ\"~2 flork", null, "\"term germ\"~2 flork");
@ -442,11 +353,11 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
assertQueryEquals("term -stop term", qpAnalyzer, "term term");
assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll");
assertQueryEquals("term phrase term", qpAnalyzer,
"term (phrase1 phrase2) term");
"term \"phrase1 phrase2\" term");
// note the parens in this next assertion differ from the original
// QueryParser behavior
assertQueryEquals("term AND NOT phrase term", qpAnalyzer,
"(+term -(phrase1 phrase2)) term");
"(+term -\"phrase1 phrase2\") term");
assertQueryEquals("stop", qpAnalyzer, "");
assertQueryEquals("stop OR stop AND stop", qpAnalyzer, "");
assertTrue(getQuery("term term term", qpAnalyzer) instanceof BooleanQuery);

View File

@ -37,8 +37,6 @@ import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
@ -59,7 +57,6 @@ import org.apache.lucene.queryParser.core.processors.QueryNodeProcessorImpl;
import org.apache.lucene.queryParser.core.processors.QueryNodeProcessorPipeline;
import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator;
import org.apache.lucene.queryParser.standard.nodes.WildcardQueryNode;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.IndexSearcher;
@ -336,90 +333,6 @@ public class TestQPHelper extends LocalizedTestCase {
assertQueryEqualsAllowLeadingWildcard("??\u3000??\u3000??", null, "??\u0020??\u0020??");
}
//individual CJK chars as terms, like StandardAnalyzer
private class SimpleCJKTokenizer extends Tokenizer {
private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
public SimpleCJKTokenizer(Reader input) {
super(input);
}
@Override
public boolean incrementToken() throws IOException {
int ch = input.read();
if (ch < 0)
return false;
clearAttributes();
termAtt.setEmpty().append((char) ch);
return true;
}
}
private class SimpleCJKAnalyzer extends Analyzer {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new SimpleCJKTokenizer(reader);
}
}
public void testCJKTerm() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
BooleanQuery expected = new BooleanQuery();
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
assertEquals(expected, getQuery("中国", analyzer));
}
public void testCJKBoostedTerm() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
BooleanQuery expected = new BooleanQuery();
expected.setBoost(0.5f);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
assertEquals(expected, getQuery("中国^0.5", analyzer));
}
public void testCJKPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"", analyzer));
}
public void testCJKBoostedPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.setBoost(0.5f);
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"^0.5", analyzer));
}
public void testCJKSloppyPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.setSlop(3);
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"~3", analyzer));
}
public void testSimple() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("term term term", null, "term term term");
@ -618,10 +531,10 @@ public class TestQPHelper extends LocalizedTestCase {
assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll");
assertQueryEquals("term phrase term", qpAnalyzer,
"term phrase1 phrase2 term");
"term \"phrase1 phrase2\" term");
assertQueryEquals("term AND NOT phrase term", qpAnalyzer,
"+term -(phrase1 phrase2) term");
"+term -\"phrase1 phrase2\" term");
assertQueryEquals("stop^3", qpAnalyzer, "");
assertQueryEquals("stop", qpAnalyzer, "");

View File

@ -35,8 +35,6 @@ import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.DateField;
@ -55,7 +53,6 @@ import org.apache.lucene.queryParser.core.processors.QueryNodeProcessorImpl;
import org.apache.lucene.queryParser.core.processors.QueryNodeProcessorPipeline;
import org.apache.lucene.queryParser.standard.nodes.WildcardQueryNode;
import org.apache.lucene.queryParser.standard.processors.WildcardQueryNodeProcessor;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.IndexSearcher;
@ -328,90 +325,6 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
assertQueryEqualsAllowLeadingWildcard("??\u3000??\u3000??", null, "??\u0020??\u0020??");
}
//individual CJK chars as terms, like StandardAnalyzer
private class SimpleCJKTokenizer extends Tokenizer {
private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
public SimpleCJKTokenizer(Reader input) {
super(input);
}
@Override
public boolean incrementToken() throws IOException {
int ch = input.read();
if (ch < 0)
return false;
clearAttributes();
termAtt.setEmpty().append((char) ch);
return true;
}
}
private class SimpleCJKAnalyzer extends Analyzer {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new SimpleCJKTokenizer(reader);
}
}
public void testCJKTerm() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
BooleanQuery expected = new BooleanQuery();
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
assertEquals(expected, getQuery("中国", analyzer));
}
public void testCJKBoostedTerm() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
BooleanQuery expected = new BooleanQuery();
expected.setBoost(0.5f);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
assertEquals(expected, getQuery("中国^0.5", analyzer));
}
public void testCJKPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"", analyzer));
}
public void testCJKBoostedPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.setBoost(0.5f);
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"^0.5", analyzer));
}
public void testCJKSloppyPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.setSlop(3);
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"~3", analyzer));
}
public void testSimple() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("term term term", null, "term term term");
@ -617,10 +530,10 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll");
assertQueryEquals("term phrase term", qpAnalyzer,
"term phrase1 phrase2 term");
"term \"phrase1 phrase2\" term");
assertQueryEquals("term AND NOT phrase term", qpAnalyzer,
"+term -(phrase1 phrase2) term");
"+term -\"phrase1 phrase2\" term");
assertQueryEquals("stop^3", qpAnalyzer, "");
assertQueryEquals("stop", qpAnalyzer, "");

View File

@ -101,7 +101,7 @@ public class MultiFieldQueryParser extends QueryParser
if (field == null) {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (int i = 0; i < fields.length; i++) {
Query q = super.getFieldQuery(fields[i], queryText, true);
Query q = super.getFieldQuery(fields[i], queryText);
if (q != null) {
//If the user passes a map of boosts
if (boosts != null) {
@ -119,7 +119,7 @@ public class MultiFieldQueryParser extends QueryParser
return null;
return getBooleanQuery(clauses, true);
}
Query q = super.getFieldQuery(field, queryText, true);
Query q = super.getFieldQuery(field, queryText);
applySlop(q,slop);
return q;
}
@ -134,29 +134,8 @@ public class MultiFieldQueryParser extends QueryParser
@Override
protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
if (field == null) {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (int i = 0; i < fields.length; i++) {
Query q = super.getFieldQuery(fields[i], queryText, quoted);
if (q != null) {
//If the user passes a map of boosts
if (boosts != null) {
//Get the boost from the map and apply them
Float boost = boosts.get(fields[i]);
if (boost != null) {
q.setBoost(boost.floatValue());
}
}
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
return null;
return getBooleanQuery(clauses, true);
}
Query q = super.getFieldQuery(field, queryText, quoted);
return q;
protected Query getFieldQuery(String field, String queryText) throws ParseException {
return getFieldQuery(field, queryText, 0);
}

View File

@ -150,8 +150,6 @@ public class QueryParser implements QueryParserConstants {
// for use when constructing RangeQuerys.
Collator rangeCollator = null;
private Version matchVersion;
/** The default operator for parsing queries.
* Use {@link QueryParser#setDefaultOperator} to change it.
*/
@ -164,7 +162,6 @@ public class QueryParser implements QueryParserConstants {
*/
public QueryParser(Version matchVersion, String f, Analyzer a) {
this(new FastCharStream(new StringReader("")));
this.matchVersion = matchVersion;
analyzer = a;
field = f;
if (matchVersion.onOrAfter(Version.LUCENE_29)) {
@ -509,10 +506,11 @@ public class QueryParser implements QueryParserConstants {
throw new RuntimeException("Clause cannot be both required and prohibited");
}
/**
* @exception ParseException throw in overridden method to disallow
*/
protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
protected Query getFieldQuery(String field, String queryText) throws ParseException {
// Use the analyzer to get all the tokens, and then build a TermQuery,
// PhraseQuery, or nothing based on the term count
@ -589,14 +587,10 @@ public class QueryParser implements QueryParserConstants {
}
return newTermQuery(new Term(field, term));
} else {
if (severalTokensAtSamePosition || !quoted) {
if (positionCount == 1 || !quoted) {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = newBooleanQuery(positionCount == 1);
BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ?
BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
BooleanQuery q = newBooleanQuery(true);
for (int i = 0; i < numTokens; i++) {
String term = null;
try {
@ -609,7 +603,7 @@ public class QueryParser implements QueryParserConstants {
Query currentQuery = newTermQuery(
new Term(field, term));
q.add(currentQuery, occur);
q.add(currentQuery, BooleanClause.Occur.SHOULD);
}
return q;
}
@ -688,7 +682,7 @@ public class QueryParser implements QueryParserConstants {
/**
* Base implementation delegates to {@link #getFieldQuery(String,String,boolean)}.
* Base implementation delegates to {@link #getFieldQuery(String,String)}.
* This method may be overridden, for example, to return
* a SpanNearQuery instead of a PhraseQuery.
*
@ -696,7 +690,7 @@ public class QueryParser implements QueryParserConstants {
*/
protected Query getFieldQuery(String field, String queryText, int slop)
throws ParseException {
Query query = getFieldQuery(field, queryText, true);
Query query = getFieldQuery(field, queryText);
if (query instanceof PhraseQuery) {
((PhraseQuery) query).setSlop(slop);
@ -1349,7 +1343,7 @@ public class QueryParser implements QueryParserConstants {
}
q = getFuzzyQuery(field, termImage,fms);
} else {
q = getFieldQuery(field, termImage, !matchVersion.onOrAfter(Version.LUCENE_31));
q = getFieldQuery(field, termImage);
}
break;
case RANGEIN_START:
@ -1518,12 +1512,6 @@ public class QueryParser implements QueryParserConstants {
finally { jj_save(0, xla); }
}
private boolean jj_3R_3() {
if (jj_scan_token(STAR)) return true;
if (jj_scan_token(COLON)) return true;
return false;
}
private boolean jj_3R_2() {
if (jj_scan_token(TERM)) return true;
if (jj_scan_token(COLON)) return true;
@ -1540,6 +1528,12 @@ public class QueryParser implements QueryParserConstants {
return false;
}
private boolean jj_3R_3() {
if (jj_scan_token(STAR)) return true;
if (jj_scan_token(COLON)) return true;
return false;
}
/** Generated Token Manager. */
public QueryParserTokenManager token_source;
/** Current token. */

View File

@ -174,8 +174,6 @@ public class QueryParser {
// for use when constructing RangeQuerys.
Collator rangeCollator = null;
private Version matchVersion;
/** The default operator for parsing queries.
* Use {@link QueryParser#setDefaultOperator} to change it.
*/
@ -188,7 +186,6 @@ public class QueryParser {
*/
public QueryParser(Version matchVersion, String f, Analyzer a) {
this(new FastCharStream(new StringReader("")));
this.matchVersion = matchVersion;
analyzer = a;
field = f;
if (matchVersion.onOrAfter(Version.LUCENE_29)) {
@ -533,10 +530,11 @@ public class QueryParser {
throw new RuntimeException("Clause cannot be both required and prohibited");
}
/**
* @exception ParseException throw in overridden method to disallow
*/
protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
protected Query getFieldQuery(String field, String queryText) throws ParseException {
// Use the analyzer to get all the tokens, and then build a TermQuery,
// PhraseQuery, or nothing based on the term count
@ -613,14 +611,10 @@ public class QueryParser {
}
return newTermQuery(new Term(field, term));
} else {
if (severalTokensAtSamePosition || !quoted) {
if (positionCount == 1 || !quoted) {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = newBooleanQuery(positionCount == 1);
BooleanClause.Occur occur = positionCount > 1 && operator == AND_OPERATOR ?
BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD;
BooleanQuery q = newBooleanQuery(true);
for (int i = 0; i < numTokens; i++) {
String term = null;
try {
@ -633,7 +627,7 @@ public class QueryParser {
Query currentQuery = newTermQuery(
new Term(field, term));
q.add(currentQuery, occur);
q.add(currentQuery, BooleanClause.Occur.SHOULD);
}
return q;
}
@ -712,7 +706,7 @@ public class QueryParser {
/**
* Base implementation delegates to {@link #getFieldQuery(String,String,boolean)}.
* Base implementation delegates to {@link #getFieldQuery(String,String)}.
* This method may be overridden, for example, to return
* a SpanNearQuery instead of a PhraseQuery.
*
@ -720,7 +714,7 @@ public class QueryParser {
*/
protected Query getFieldQuery(String field, String queryText, int slop)
throws ParseException {
Query query = getFieldQuery(field, queryText, true);
Query query = getFieldQuery(field, queryText);
if (query instanceof PhraseQuery) {
((PhraseQuery) query).setSlop(slop);
@ -1320,7 +1314,7 @@ Query Term(String field) : {
}
q = getFuzzyQuery(field, termImage,fms);
} else {
q = getFieldQuery(field, termImage, !matchVersion.onOrAfter(Version.LUCENE_31));
q = getFieldQuery(field, termImage);
}
}
| ( <RANGEIN_START> ( goop1=<RANGEIN_GOOP>|goop1=<RANGEIN_QUOTED> )

View File

@ -104,9 +104,9 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
// direct call to (super's) getFieldQuery to demonstrate differnce
// between phrase and multiphrase with modified default slop
assertEquals("\"foo bar\"~99",
qp.getSuperFieldQuery("","foo bar", true).toString());
qp.getSuperFieldQuery("","foo bar").toString());
assertEquals("\"(multi multi2) bar\"~99",
qp.getSuperFieldQuery("","multi bar", true).toString());
qp.getSuperFieldQuery("","multi bar").toString());
// ask sublcass to parse phrase with modified default slop
@ -243,15 +243,15 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
}
/** expose super's version */
public Query getSuperFieldQuery(String f, String t, boolean quoted)
public Query getSuperFieldQuery(String f, String t)
throws ParseException {
return super.getFieldQuery(f,t,quoted);
return super.getFieldQuery(f,t);
}
/** wrap super's version */
@Override
protected Query getFieldQuery(String f, String t, boolean quoted)
protected Query getFieldQuery(String f, String t)
throws ParseException {
return new DumbQueryWrapper(getSuperFieldQuery(f,t,quoted));
return new DumbQueryWrapper(getSuperFieldQuery(f,t));
}
}

View File

@ -34,7 +34,6 @@ import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.DateField;
@ -45,7 +44,6 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.FuzzyQuery;
@ -251,90 +249,6 @@ public class TestQueryParser extends LocalizedTestCase {
assertQueryEquals("用語\u3000用語\u3000用語", null, "用語\u0020用語\u0020用語");
}
//individual CJK chars as terms, like StandardAnalyzer
private class SimpleCJKTokenizer extends Tokenizer {
private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
public SimpleCJKTokenizer(Reader input) {
super(input);
}
@Override
public boolean incrementToken() throws IOException {
int ch = input.read();
if (ch < 0)
return false;
clearAttributes();
termAtt.setEmpty().append((char) ch);
return true;
}
}
private class SimpleCJKAnalyzer extends Analyzer {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new SimpleCJKTokenizer(reader);
}
}
public void testCJKTerm() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
BooleanQuery expected = new BooleanQuery();
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
assertEquals(expected, getQuery("中国", analyzer));
}
public void testCJKBoostedTerm() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
BooleanQuery expected = new BooleanQuery();
expected.setBoost(0.5f);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
expected.add(new TermQuery(new Term("field", "")), BooleanClause.Occur.SHOULD);
assertEquals(expected, getQuery("中国^0.5", analyzer));
}
public void testCJKPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"", analyzer));
}
public void testCJKBoostedPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.setBoost(0.5f);
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"^0.5", analyzer));
}
public void testCJKSloppyPhrase() throws Exception {
// individual CJK chars as terms
Analyzer analyzer = new SimpleCJKAnalyzer();
PhraseQuery expected = new PhraseQuery();
expected.setSlop(3);
expected.add(new Term("field", ""));
expected.add(new Term("field", ""));
assertEquals(expected, getQuery("\"中国\"~3", analyzer));
}
public void testSimple() throws Exception {
assertQueryEquals("term term term", null, "term term term");
assertQueryEquals("türm term term", new MockAnalyzer(), "türm term term");
@ -523,9 +437,9 @@ public class TestQueryParser extends LocalizedTestCase {
assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll");
assertQueryEquals("term phrase term", qpAnalyzer,
"term (phrase1 phrase2) term");
"term \"phrase1 phrase2\" term");
assertQueryEquals("term AND NOT phrase term", qpAnalyzer,
"+term -(phrase1 phrase2) term");
"+term -\"phrase1 phrase2\" term");
assertQueryEquals("stop^3", qpAnalyzer, "");
assertQueryEquals("stop", qpAnalyzer, "");
assertQueryEquals("(stop)^3", qpAnalyzer, "");
@ -998,9 +912,9 @@ public class TestQueryParser extends LocalizedTestCase {
}
@Override
protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
protected Query getFieldQuery(String field, String queryText) throws ParseException {
type[0]=3;
return super.getFieldQuery(field, queryText, quoted);
return super.getFieldQuery(field, queryText);
}
};

View File

@ -870,7 +870,7 @@ class ExtendedDismaxQParser extends QParser {
int slop;
@Override
protected Query getFieldQuery(String field, String val, boolean quoted) throws ParseException {
protected Query getFieldQuery(String field, String val) throws ParseException {
//System.out.println("getFieldQuery: val="+val);
this.type = QType.FIELD;
@ -1005,7 +1005,7 @@ class ExtendedDismaxQParser extends QParser {
switch (type) {
case FIELD: // fallthrough
case PHRASE:
Query query = super.getFieldQuery(field, val, type == QType.PHRASE);
Query query = super.getFieldQuery(field, val);
if (query instanceof PhraseQuery) {
PhraseQuery pq = (PhraseQuery)query;
if (minClauseSize > 1 && pq.getTerms().length < minClauseSize) return null;

View File

@ -128,7 +128,7 @@ public class SolrQueryParser extends QueryParser {
}
}
protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException {
protected Query getFieldQuery(String field, String queryText) throws ParseException {
checkNullField(field);
// intercept magic field name of "_" to use as a hook for our
// own functions.
@ -152,7 +152,7 @@ public class SolrQueryParser extends QueryParser {
}
// default to a normal field query
return super.getFieldQuery(field, queryText, quoted);
return super.getFieldQuery(field, queryText);
}
protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException {

View File

@ -776,7 +776,7 @@ public class SolrPluginUtils {
* DisjunctionMaxQuery. (so yes: aliases which point at other
* aliases should work)
*/
protected Query getFieldQuery(String field, String queryText, boolean quoted)
protected Query getFieldQuery(String field, String queryText)
throws ParseException {
if (aliases.containsKey(field)) {
@ -791,7 +791,7 @@ public class SolrPluginUtils {
for (String f : a.fields.keySet()) {
Query sub = getFieldQuery(f,queryText,quoted);
Query sub = getFieldQuery(f,queryText);
if (null != sub) {
if (null != a.fields.get(f)) {
sub.setBoost(a.fields.get(f));
@ -804,7 +804,7 @@ public class SolrPluginUtils {
} else {
try {
return super.getFieldQuery(field, queryText, quoted);
return super.getFieldQuery(field, queryText);
} catch (Exception e) {
return null;
}