use object parser value for queries that support it

This commit is contained in:
Shay Banon 2012-12-29 00:14:46 -08:00
parent fd5719b232
commit 9a8d558e51
18 changed files with 166 additions and 87 deletions

View File

@ -0,0 +1,40 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene;
import org.apache.lucene.util.BytesRef;
/**
*/
public class BytesRefs {
/**
* Converts an object value to BytesRef.
*/
public static BytesRef toBytesRef(Object value) {
if (value == null) {
return null;
}
if (value instanceof BytesRef) {
return (BytesRef) value;
}
return new BytesRef(value.toString());
}
}

View File

@ -53,8 +53,6 @@ public class Lucene {
public static ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0];
public static final int BATCH_ENUM_DOCS = 32;
public static Version parseVersion(@Nullable String version, Version defaultVersion, ESLogger logger) {
if (version == null) {
return defaultVersion;

View File

@ -20,7 +20,6 @@
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.Bits;

View File

@ -20,7 +20,6 @@
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.util.Bits;

View File

@ -21,11 +21,13 @@ package org.elasticsearch.common.lucene.search;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.*;
import org.apache.lucene.queries.mlt.MoreLikeThis;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.TFIDFSimilarity;
import org.apache.lucene.queries.mlt.MoreLikeThis;
import org.elasticsearch.common.io.FastStringReader;
import java.io.IOException;

View File

@ -169,7 +169,7 @@ public class MultiPhrasePrefixQuery extends Query {
continue;
}
for (BytesRef term = termsEnum.term(); term != null; term = termsEnum.next()) {
for (BytesRef term = termsEnum.term(); term != null; term = termsEnum.next()) {
if (!StringHelper.startsWith(term, prefix.bytes())) {
break;
}

View File

@ -135,6 +135,10 @@ public interface XContentParser extends Closeable {
BytesRef bytes() throws IOException;
Object objectText() throws IOException;
Object objectBytes() throws IOException;
boolean hasTextCharacters();
char[] textCharacters() throws IOException;

View File

@ -21,6 +21,8 @@ package org.elasticsearch.common.xcontent.json;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.UnicodeUtil;
import org.elasticsearch.ElasticSearchIllegalStateException;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.support.AbstractXContentParser;
@ -83,6 +85,49 @@ public class JsonXContentParser extends AbstractXContentParser {
return parser.getText();
}
@Override
public BytesRef bytes() throws IOException {
BytesRef bytes = new BytesRef();
UnicodeUtil.UTF16toUTF8(parser.getTextCharacters(), parser.getTextOffset(), parser.getTextLength(), bytes);
return bytes;
}
@Override
public Object objectText() throws IOException {
JsonToken currentToken = parser.getCurrentToken();
if (currentToken == JsonToken.VALUE_STRING) {
return text();
} else if (currentToken == JsonToken.VALUE_NUMBER_INT || currentToken == JsonToken.VALUE_NUMBER_FLOAT) {
return parser.getNumberValue();
} else if (currentToken == JsonToken.VALUE_TRUE) {
return Boolean.TRUE;
} else if (currentToken == JsonToken.VALUE_FALSE) {
return Boolean.FALSE;
} else if (currentToken == JsonToken.VALUE_NULL) {
return null;
} else {
return text();
}
}
@Override
public Object objectBytes() throws IOException {
JsonToken currentToken = parser.getCurrentToken();
if (currentToken == JsonToken.VALUE_STRING) {
return bytes();
} else if (currentToken == JsonToken.VALUE_NUMBER_INT || currentToken == JsonToken.VALUE_NUMBER_FLOAT) {
return parser.getNumberValue();
} else if (currentToken == JsonToken.VALUE_TRUE) {
return Boolean.TRUE;
} else if (currentToken == JsonToken.VALUE_FALSE) {
return Boolean.FALSE;
} else if (currentToken == JsonToken.VALUE_NULL) {
return null;
} else {
return bytes();
}
}
@Override
public boolean hasTextCharacters() {
return parser.hasTextCharacters();

View File

@ -20,7 +20,6 @@
package org.elasticsearch.common.xcontent.support;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.UnicodeUtil;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.xcontent.XContentParser;
@ -117,13 +116,6 @@ public abstract class AbstractXContentParser implements XContentParser {
return bytes();
}
@Override
public BytesRef bytes() throws IOException {
BytesRef bytes = new BytesRef();
UnicodeUtil.UTF16toUTF8(textCharacters(), textOffset(), textLength(), bytes);
return bytes;
}
@Override
public Map<String, Object> map() throws IOException {
return XContentMapConverter.readMap(this);

View File

@ -28,6 +28,7 @@ import org.apache.lucene.search.*;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.RegexpFilter;
import org.elasticsearch.common.lucene.search.TermFilter;
@ -384,10 +385,7 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T>, Mapper {
@Override
public BytesRef indexedValueForSearch(Object value) {
if (value instanceof BytesRef) {
return (BytesRef) value;
}
return new BytesRef(value.toString());
return BytesRefs.toBytesRef(value);
}
@Override

View File

@ -68,7 +68,7 @@ public class MatchQueryParser implements QueryParser {
}
String fieldName = parser.currentName();
String text = null;
Object value = null;
float boost = 1.0f;
MatchQuery matchQuery = new MatchQuery(parseContext);
String minimumShouldMatch = null;
@ -81,7 +81,7 @@ public class MatchQueryParser implements QueryParser {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("query".equals(currentFieldName)) {
text = parser.text();
value = parser.objectText();
} else if ("type".equals(currentFieldName)) {
String tStr = parser.text();
if ("boolean".equals(tStr)) {
@ -142,7 +142,7 @@ public class MatchQueryParser implements QueryParser {
}
parser.nextToken();
} else {
text = parser.text();
value = parser.objectText();
// move to the next token
token = parser.nextToken();
if (token != XContentParser.Token.END_OBJECT) {
@ -150,11 +150,11 @@ public class MatchQueryParser implements QueryParser {
}
}
if (text == null) {
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No text specified for text query");
}
Query query = matchQuery.parse(type, fieldName, text);
Query query = matchQuery.parse(type, fieldName, value);
if (query == null) {
return null;
}

View File

@ -56,7 +56,7 @@ public class MultiMatchQueryParser implements QueryParser {
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
String text = null;
Object value = null;
float boost = 1.0f;
MatchQuery.Type type = MatchQuery.Type.BOOLEAN;
MultiMatchQuery multiMatchQuery = new MultiMatchQuery(parseContext);
@ -100,7 +100,7 @@ public class MultiMatchQueryParser implements QueryParser {
}
} else if (token.isValue()) {
if ("query".equals(currentFieldName)) {
text = parser.text();
value = parser.objectText();
} else if ("type".equals(currentFieldName)) {
String tStr = parser.text();
if ("boolean".equals(tStr)) {
@ -153,7 +153,7 @@ public class MultiMatchQueryParser implements QueryParser {
}
}
if (text == null) {
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No text specified for match_all query");
}
@ -161,7 +161,7 @@ public class MultiMatchQueryParser implements QueryParser {
throw new QueryParsingException(parseContext.index(), "No fields specified for match_all query");
}
Query query = multiMatchQuery.parse(type, fieldNameWithBoosts, text);
Query query = multiMatchQuery.parse(type, fieldNameWithBoosts, value);
if (query == null) {
return null;
}

View File

@ -21,8 +21,8 @@ package org.elasticsearch.index.query;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.TermRangeFilter;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
import org.elasticsearch.index.mapper.MapperService;
@ -54,8 +54,8 @@ public class RangeFilterParser implements FilterParser {
boolean cache = true;
CacheKeyFilter.Key cacheKey = null;
String fieldName = null;
BytesRef from = null;
BytesRef to = null;
Object from = null;
Object to = null;
boolean includeLower = true;
boolean includeUpper = true;
@ -72,24 +72,24 @@ public class RangeFilterParser implements FilterParser {
currentFieldName = parser.currentName();
} else {
if ("from".equals(currentFieldName)) {
from = parser.bytesOrNull();
from = parser.objectBytes();
} else if ("to".equals(currentFieldName)) {
to = parser.bytesOrNull();
to = parser.objectBytes();
} else if ("include_lower".equals(currentFieldName) || "includeLower".equals(currentFieldName)) {
includeLower = parser.booleanValue();
} else if ("include_upper".equals(currentFieldName) || "includeUpper".equals(currentFieldName)) {
includeUpper = parser.booleanValue();
} else if ("gt".equals(currentFieldName)) {
from = parser.bytesOrNull();
from = parser.objectBytes();
includeLower = false;
} else if ("gte".equals(currentFieldName) || "ge".equals(currentFieldName)) {
from = parser.bytesOrNull();
from = parser.objectBytes();
includeLower = true;
} else if ("lt".equals(currentFieldName)) {
to = parser.bytesOrNull();
to = parser.objectBytes();
includeUpper = false;
} else if ("lte".equals(currentFieldName) || "le".equals(currentFieldName)) {
to = parser.bytesOrNull();
to = parser.objectBytes();
includeUpper = true;
} else {
throw new QueryParsingException(parseContext.index(), "[range] filter does not support [" + currentFieldName + "]");
@ -117,12 +117,12 @@ public class RangeFilterParser implements FilterParser {
MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
if (smartNameFieldMappers != null) {
if (smartNameFieldMappers.hasMapper()) {
//LUCENE 4 UPGRADE range filter should use bytesref too?
filter = smartNameFieldMappers.mapper().rangeFilter(from != null ? from.utf8ToString() : null, to != null ? to.utf8ToString() : null, includeLower, includeUpper, parseContext);
filter = smartNameFieldMappers.mapper().rangeFilter(from, to, includeLower, includeUpper, parseContext);
}
}
if (filter == null) {
filter = new TermRangeFilter(fieldName, from, to, includeLower, includeUpper);
filter = new TermRangeFilter(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper);
}
if (cache) {

View File

@ -21,8 +21,8 @@ package org.elasticsearch.index.query;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.MapperService;
@ -60,8 +60,8 @@ public class RangeQueryParser implements QueryParser {
throw new QueryParsingException(parseContext.index(), "[range] query malformed, after field missing start object");
}
BytesRef from = null;
BytesRef to = null;
Object from = null;
Object to = null;
boolean includeLower = true;
boolean includeUpper = true;
float boost = 1.0f;
@ -72,9 +72,9 @@ public class RangeQueryParser implements QueryParser {
currentFieldName = parser.currentName();
} else {
if ("from".equals(currentFieldName)) {
from = parser.bytesOrNull();
from = parser.objectBytes();
} else if ("to".equals(currentFieldName)) {
to = parser.bytesOrNull();
to = parser.objectBytes();
} else if ("include_lower".equals(currentFieldName) || "includeLower".equals(currentFieldName)) {
includeLower = parser.booleanValue();
} else if ("include_upper".equals(currentFieldName) || "includeUpper".equals(currentFieldName)) {
@ -82,16 +82,16 @@ public class RangeQueryParser implements QueryParser {
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("gt".equals(currentFieldName)) {
from = parser.bytesOrNull();
from = parser.objectBytes();
includeLower = false;
} else if ("gte".equals(currentFieldName) || "ge".equals(currentFieldName)) {
from = parser.bytesOrNull();
from = parser.objectBytes();
includeLower = true;
} else if ("lt".equals(currentFieldName)) {
to = parser.bytesOrNull();
to = parser.objectBytes();
includeUpper = false;
} else if ("lte".equals(currentFieldName) || "le".equals(currentFieldName)) {
to = parser.bytesOrNull();
to = parser.objectBytes();
includeUpper = true;
} else {
throw new QueryParsingException(parseContext.index(), "[range] query does not support [" + currentFieldName + "]");
@ -110,11 +110,11 @@ public class RangeQueryParser implements QueryParser {
if (smartNameFieldMappers != null) {
if (smartNameFieldMappers.hasMapper()) {
//LUCENE 4 UPGRADE Mapper#rangeQuery should use bytesref as well?
query = smartNameFieldMappers.mapper().rangeQuery(from != null ? from.utf8ToString() : null, to != null ? to.utf8ToString() : null, includeLower, includeUpper, parseContext);
query = smartNameFieldMappers.mapper().rangeQuery(from, to, includeLower, includeUpper, parseContext);
}
}
if (query == null) {
query = new TermRangeQuery(fieldName, from, to, includeLower, includeUpper);
query = new TermRangeQuery(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper);
}
query.setBoost(boost);
return wrapSmartNameQuery(query, smartNameFieldMappers, parseContext);

View File

@ -22,6 +22,7 @@ package org.elasticsearch.index.query;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Filter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.lucene.search.TermFilter;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
@ -54,7 +55,7 @@ public class TermFilterParser implements FilterParser {
boolean cache = true; // since usually term filter is on repeating terms, cache it by default
CacheKeyFilter.Key cacheKey = null;
String fieldName = null;
String value = null;
Object value = null;
String filterName = null;
String currentFieldName = null;
@ -70,9 +71,9 @@ public class TermFilterParser implements FilterParser {
currentFieldName = parser.currentName();
} else {
if ("term".equals(currentFieldName)) {
value = parser.text();
value = parser.objectBytes();
} else if ("value".equals(currentFieldName)) {
value = parser.text();
value = parser.objectBytes();
} else if ("_name".equals(currentFieldName)) {
filterName = parser.text();
} else if ("_cache".equals(currentFieldName)) {
@ -93,7 +94,7 @@ public class TermFilterParser implements FilterParser {
cacheKey = new CacheKeyFilter.Key(parser.text());
} else {
fieldName = currentFieldName;
value = parser.text();
value = parser.objectBytes();
}
}
}
@ -121,7 +122,7 @@ public class TermFilterParser implements FilterParser {
}
}
if (filter == null) {
filter = new TermFilter(new Term(fieldName, value));
filter = new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(value)));
}
if (cache) {

View File

@ -24,6 +24,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.Filter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.lucene.search.*;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
@ -55,14 +56,14 @@ public class TermsFilterParser implements FilterParser {
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
MapperService.SmartNameFieldMappers smartNameFieldMappers = null;
MapperService.SmartNameFieldMappers smartNameFieldMappers;
Boolean cache = null;
String filterName = null;
String currentFieldName = null;
CacheKeyFilter.Key cacheKey = null;
XContentParser.Token token;
String execution = "plain";
List<String> terms = Lists.newArrayList();
List<Object> terms = Lists.newArrayList();
String fieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
@ -71,7 +72,7 @@ public class TermsFilterParser implements FilterParser {
fieldName = currentFieldName;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String value = parser.text();
Object value = parser.objectBytes();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "No value specified for term filter");
}
@ -120,7 +121,7 @@ public class TermsFilterParser implements FilterParser {
}
} else {
for (int i = 0; i < filterTerms.length; i++) {
filterTerms[i] = new Term(fieldName, terms.get(i));
filterTerms[i] = new Term(fieldName, BytesRefs.toBytesRef(terms.get(i)));
}
}
filter = new XTermsFilter(filterTerms);
@ -131,12 +132,12 @@ public class TermsFilterParser implements FilterParser {
} else if ("bool".equals(execution)) {
XBooleanFilter boolFiler = new XBooleanFilter();
if (fieldMapper != null) {
for (String term : terms) {
for (Object term : terms) {
boolFiler.add(parseContext.cacheFilter(fieldMapper.termFilter(term, parseContext), null), BooleanClause.Occur.SHOULD);
}
} else {
for (String term : terms) {
boolFiler.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, term)), null), BooleanClause.Occur.SHOULD);
for (Object term : terms) {
boolFiler.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), null), BooleanClause.Occur.SHOULD);
}
}
filter = boolFiler;
@ -147,12 +148,12 @@ public class TermsFilterParser implements FilterParser {
} else if ("bool_nocache".equals(execution)) {
XBooleanFilter boolFiler = new XBooleanFilter();
if (fieldMapper != null) {
for (String term : terms) {
for (Object term : terms) {
boolFiler.add(fieldMapper.termFilter(term, parseContext), BooleanClause.Occur.SHOULD);
}
} else {
for (String term : terms) {
boolFiler.add(new TermFilter(new Term(fieldName, term)), BooleanClause.Occur.SHOULD);
for (Object term : terms) {
boolFiler.add(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), BooleanClause.Occur.SHOULD);
}
}
filter = boolFiler;
@ -163,12 +164,12 @@ public class TermsFilterParser implements FilterParser {
} else if ("and".equals(execution)) {
List<Filter> filters = Lists.newArrayList();
if (fieldMapper != null) {
for (String term : terms) {
for (Object term : terms) {
filters.add(parseContext.cacheFilter(fieldMapper.termFilter(term, parseContext), null));
}
} else {
for (String term : terms) {
filters.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, term)), null));
for (Object term : terms) {
filters.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), null));
}
}
filter = new AndFilter(filters);
@ -179,12 +180,12 @@ public class TermsFilterParser implements FilterParser {
} else if ("and_nocache".equals(execution)) {
List<Filter> filters = Lists.newArrayList();
if (fieldMapper != null) {
for (String term : terms) {
for (Object term : terms) {
filters.add(fieldMapper.termFilter(term, parseContext));
}
} else {
for (String term : terms) {
filters.add(new TermFilter(new Term(fieldName, term)));
for (Object term : terms) {
filters.add(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))));
}
}
filter = new AndFilter(filters);
@ -195,12 +196,12 @@ public class TermsFilterParser implements FilterParser {
} else if ("or".equals(execution)) {
List<Filter> filters = Lists.newArrayList();
if (fieldMapper != null) {
for (String term : terms) {
for (Object term : terms) {
filters.add(parseContext.cacheFilter(fieldMapper.termFilter(term, parseContext), null));
}
} else {
for (String term : terms) {
filters.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, term)), null));
for (Object term : terms) {
filters.add(parseContext.cacheFilter(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))), null));
}
}
filter = new OrFilter(filters);
@ -211,12 +212,12 @@ public class TermsFilterParser implements FilterParser {
} else if ("or_nocache".equals(execution)) {
List<Filter> filters = Lists.newArrayList();
if (fieldMapper != null) {
for (String term : terms) {
for (Object term : terms) {
filters.add(fieldMapper.termFilter(term, parseContext));
}
} else {
for (String term : terms) {
filters.add(new TermFilter(new Term(fieldName, term)));
for (Object term : terms) {
filters.add(new TermFilter(new Term(fieldName, BytesRefs.toBytesRef(term))));
}
}
filter = new OrFilter(filters);

View File

@ -135,7 +135,7 @@ public class MatchQuery {
this.zeroTermsQuery = zeroTermsQuery;
}
public Query parse(Type type, String fieldName, String text) {
public Query parse(Type type, String fieldName, Object value) {
FieldMapper mapper = null;
final String field;
MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);
@ -150,7 +150,7 @@ public class MatchQuery {
if (smartNameFieldMappers.explicitTypeInNameWithDocMapper()) {
String[] previousTypes = QueryParseContext.setTypesWithPrevious(new String[]{smartNameFieldMappers.docMapper().type()});
try {
return wrapSmartNameQuery(mapper.termQuery(text, parseContext), smartNameFieldMappers, parseContext);
return wrapSmartNameQuery(mapper.termQuery(value, parseContext), smartNameFieldMappers, parseContext);
} catch (RuntimeException e) {
if (lenient) {
return null;
@ -161,7 +161,7 @@ public class MatchQuery {
}
} else {
try {
return wrapSmartNameQuery(mapper.termQuery(text, parseContext), smartNameFieldMappers, parseContext);
return wrapSmartNameQuery(mapper.termQuery(value, parseContext), smartNameFieldMappers, parseContext);
} catch (RuntimeException e) {
if (lenient) {
return null;
@ -196,7 +196,7 @@ public class MatchQuery {
PositionIncrementAttribute posIncrAtt = null;
boolean success = false;
try {
source = analyzer.tokenStream(field, new FastStringReader(text));
source = analyzer.tokenStream(field, new FastStringReader(value.toString()));
source.reset();
success = true;
} catch (IOException ex) {

View File

@ -44,14 +44,14 @@ public class MultiMatchQuery extends MatchQuery {
super(parseContext);
}
public Query parse(Type type, Map<String, Float> fieldNames, String text) {
public Query parse(Type type, Map<String, Float> fieldNames, Object value) {
if (fieldNames.size() == 1) {
Map.Entry<String, Float> fieldBoost = fieldNames.entrySet().iterator().next();
Float boostValue = fieldBoost.getValue();
if (boostValue == null) {
return parse(type, fieldBoost.getKey(), text);
return parse(type, fieldBoost.getKey(), value);
} else {
Query query = parse(type, fieldBoost.getKey(), text);
Query query = parse(type, fieldBoost.getKey(), value);
query.setBoost(boostValue);
return query;
}
@ -61,7 +61,7 @@ public class MultiMatchQuery extends MatchQuery {
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(tieBreaker);
boolean clauseAdded = false;
for (String fieldName : fieldNames.keySet()) {
Query query = parse(type, fieldName, text);
Query query = parse(type, fieldName, value);
Float boostValue = fieldNames.get(fieldName);
if (boostValue != null) {
query.setBoost(boostValue);
@ -75,7 +75,7 @@ public class MultiMatchQuery extends MatchQuery {
} else {
BooleanQuery booleanQuery = new BooleanQuery();
for (String fieldName : fieldNames.keySet()) {
Query query = parse(type, fieldName, text);
Query query = parse(type, fieldName, value);
Float boostValue = fieldNames.get(fieldName);
if (boostValue != null) {
query.setBoost(boostValue);