Merge branch 'master' into feature/client_aggs_parsing
This commit is contained in:
commit
4a69b658cd
|
@ -57,7 +57,7 @@ import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Collections;
|
||||||
import static java.util.Collections.unmodifiableMap;
|
import static java.util.Collections.unmodifiableMap;
|
||||||
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
|
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
|
||||||
|
|
||||||
|
@ -91,7 +91,8 @@ public class MapperQueryParser extends QueryParser {
|
||||||
|
|
||||||
public void reset(QueryParserSettings settings) {
|
public void reset(QueryParserSettings settings) {
|
||||||
this.settings = settings;
|
this.settings = settings;
|
||||||
if (settings.fieldsAndWeights().isEmpty()) {
|
if (settings.fieldsAndWeights() == null) {
|
||||||
|
// this query has no explicit fields to query so we fallback to the default field
|
||||||
this.field = settings.defaultField();
|
this.field = settings.defaultField();
|
||||||
} else if (settings.fieldsAndWeights().size() == 1) {
|
} else if (settings.fieldsAndWeights().size() == 1) {
|
||||||
this.field = settings.fieldsAndWeights().keySet().iterator().next();
|
this.field = settings.fieldsAndWeights().keySet().iterator().next();
|
||||||
|
@ -148,6 +149,11 @@ public class MapperQueryParser extends QueryParser {
|
||||||
if (fields != null) {
|
if (fields != null) {
|
||||||
if (fields.size() == 1) {
|
if (fields.size() == 1) {
|
||||||
return getFieldQuerySingle(fields.iterator().next(), queryText, quoted);
|
return getFieldQuerySingle(fields.iterator().next(), queryText, quoted);
|
||||||
|
} else if (fields.isEmpty()) {
|
||||||
|
// the requested fields do not match any field in the mapping
|
||||||
|
// happens for wildcard fields only since we cannot expand to a valid field name
|
||||||
|
// if there is no match in the mappings.
|
||||||
|
return new MatchNoDocsQuery("empty fields");
|
||||||
}
|
}
|
||||||
if (settings.useDisMax()) {
|
if (settings.useDisMax()) {
|
||||||
List<Query> queries = new ArrayList<>();
|
List<Query> queries = new ArrayList<>();
|
||||||
|
@ -721,7 +727,7 @@ public class MapperQueryParser extends QueryParser {
|
||||||
}
|
}
|
||||||
|
|
||||||
private Query applyBoost(String field, Query q) {
|
private Query applyBoost(String field, Query q) {
|
||||||
Float fieldBoost = settings.fieldsAndWeights().get(field);
|
Float fieldBoost = settings.fieldsAndWeights() == null ? null : settings.fieldsAndWeights().get(field);
|
||||||
if (fieldBoost != null && fieldBoost != 1f) {
|
if (fieldBoost != null && fieldBoost != 1f) {
|
||||||
return new BoostQuery(q, fieldBoost);
|
return new BoostQuery(q, fieldBoost);
|
||||||
}
|
}
|
||||||
|
@ -780,7 +786,8 @@ public class MapperQueryParser extends QueryParser {
|
||||||
if (field != null) {
|
if (field != null) {
|
||||||
fields = context.simpleMatchToIndexNames(field);
|
fields = context.simpleMatchToIndexNames(field);
|
||||||
} else {
|
} else {
|
||||||
fields = settings.fieldsAndWeights().keySet();
|
Map<String, Float> fieldsAndWeights = settings.fieldsAndWeights();
|
||||||
|
fields = fieldsAndWeights == null ? Collections.emptyList() : fieldsAndWeights.keySet();
|
||||||
}
|
}
|
||||||
return fields;
|
return fields;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.common.xcontent;
|
||||||
import org.elasticsearch.common.CheckedFunction;
|
import org.elasticsearch.common.CheckedFunction;
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser.NamedObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||||
|
|
||||||
|
@ -30,6 +31,7 @@ import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.function.BiConsumer;
|
import java.util.function.BiConsumer;
|
||||||
import java.util.function.BiFunction;
|
import java.util.function.BiFunction;
|
||||||
|
import java.util.function.Consumer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Superclass for {@link ObjectParser} and {@link ConstructingObjectParser}. Defines most of the "declare" methods so they can be shared.
|
* Superclass for {@link ObjectParser} and {@link ConstructingObjectParser}. Defines most of the "declare" methods so they can be shared.
|
||||||
|
@ -44,6 +46,94 @@ public abstract class AbstractObjectParser<Value, Context>
|
||||||
public abstract <T> void declareField(BiConsumer<Value, T> consumer, ContextParser<Context, T> parser, ParseField parseField,
|
public abstract <T> void declareField(BiConsumer<Value, T> consumer, ContextParser<Context, T> parser, ParseField parseField,
|
||||||
ValueType type);
|
ValueType type);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Declares named objects in the style of aggregations. These are named
|
||||||
|
* inside and object like this:
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* <code>
|
||||||
|
* {
|
||||||
|
* "aggregations": {
|
||||||
|
* "name_1": { "aggregation_type": {} },
|
||||||
|
* "name_2": { "aggregation_type": {} },
|
||||||
|
* "name_3": { "aggregation_type": {} }
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* </code>
|
||||||
|
* </pre>
|
||||||
|
*
|
||||||
|
* Unlike the other version of this method, "ordered" mode (arrays of
|
||||||
|
* objects) is not supported.
|
||||||
|
*
|
||||||
|
* See NamedObjectHolder in ObjectParserTests for examples of how to invoke
|
||||||
|
* this.
|
||||||
|
*
|
||||||
|
* @param consumer
|
||||||
|
* sets the values once they have been parsed
|
||||||
|
* @param namedObjectParser
|
||||||
|
* parses each named object
|
||||||
|
* @param parseField
|
||||||
|
* the field to parse
|
||||||
|
*/
|
||||||
|
public abstract <T> void declareNamedObjects(BiConsumer<Value, List<T>> consumer, NamedObjectParser<T, Context> namedObjectParser,
|
||||||
|
ParseField parseField);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Declares named objects in the style of highlighting's field element.
|
||||||
|
* These are usually named inside and object like this:
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* <code>
|
||||||
|
* {
|
||||||
|
* "highlight": {
|
||||||
|
* "fields": { <------ this one
|
||||||
|
* "title": {},
|
||||||
|
* "body": {},
|
||||||
|
* "category": {}
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* </code>
|
||||||
|
* </pre>
|
||||||
|
*
|
||||||
|
* but, when order is important, some may be written this way:
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
* <code>
|
||||||
|
* {
|
||||||
|
* "highlight": {
|
||||||
|
* "fields": [ <------ this one
|
||||||
|
* {"title": {}},
|
||||||
|
* {"body": {}},
|
||||||
|
* {"category": {}}
|
||||||
|
* ]
|
||||||
|
* }
|
||||||
|
* }
|
||||||
|
* </code>
|
||||||
|
* </pre>
|
||||||
|
*
|
||||||
|
* This is because json doesn't enforce ordering. Elasticsearch reads it in
|
||||||
|
* the order sent but tools that generate json are free to put object
|
||||||
|
* members in an unordered Map, jumbling them. Thus, if you care about order
|
||||||
|
* you can send the object in the second way.
|
||||||
|
*
|
||||||
|
* See NamedObjectHolder in ObjectParserTests for examples of how to invoke
|
||||||
|
* this.
|
||||||
|
*
|
||||||
|
* @param consumer
|
||||||
|
* sets the values once they have been parsed
|
||||||
|
* @param namedObjectParser
|
||||||
|
* parses each named object
|
||||||
|
* @param orderedModeCallback
|
||||||
|
* called when the named object is parsed using the "ordered"
|
||||||
|
* mode (the array of objects)
|
||||||
|
* @param parseField
|
||||||
|
* the field to parse
|
||||||
|
*/
|
||||||
|
public abstract <T> void declareNamedObjects(BiConsumer<Value, List<T>> consumer, NamedObjectParser<T, Context> namedObjectParser,
|
||||||
|
Consumer<Value> orderedModeCallback, ParseField parseField);
|
||||||
|
|
||||||
public <T> void declareField(BiConsumer<Value, T> consumer, CheckedFunction<XContentParser, T, IOException> parser,
|
public <T> void declareField(BiConsumer<Value, T> consumer, CheckedFunction<XContentParser, T, IOException> parser,
|
||||||
ParseField parseField, ValueType type) {
|
ParseField parseField, ValueType type) {
|
||||||
if (parser == null) {
|
if (parser == null) {
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.xcontent;
|
||||||
|
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.ParsingException;
|
import org.elasticsearch.common.ParsingException;
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser.NamedObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -77,14 +78,14 @@ public final class ConstructingObjectParser<Value, Context> extends AbstractObje
|
||||||
/**
|
/**
|
||||||
* Consumer that marks a field as a required constructor argument instead of a real object field.
|
* Consumer that marks a field as a required constructor argument instead of a real object field.
|
||||||
*/
|
*/
|
||||||
private static final BiConsumer<Object, Object> REQUIRED_CONSTRUCTOR_ARG_MARKER = (a, b) -> {
|
private static final BiConsumer<?, ?> REQUIRED_CONSTRUCTOR_ARG_MARKER = (a, b) -> {
|
||||||
throw new UnsupportedOperationException("I am just a marker I should never be called.");
|
throw new UnsupportedOperationException("I am just a marker I should never be called.");
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Consumer that marks a field as an optional constructor argument instead of a real object field.
|
* Consumer that marks a field as an optional constructor argument instead of a real object field.
|
||||||
*/
|
*/
|
||||||
private static final BiConsumer<Object, Object> OPTIONAL_CONSTRUCTOR_ARG_MARKER = (a, b) -> {
|
private static final BiConsumer<?, ?> OPTIONAL_CONSTRUCTOR_ARG_MARKER = (a, b) -> {
|
||||||
throw new UnsupportedOperationException("I am just a marker I should never be called.");
|
throw new UnsupportedOperationException("I am just a marker I should never be called.");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -189,7 +190,7 @@ public final class ConstructingObjectParser<Value, Context> extends AbstractObje
|
||||||
|
|
||||||
if (consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER || consumer == OPTIONAL_CONSTRUCTOR_ARG_MARKER) {
|
if (consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER || consumer == OPTIONAL_CONSTRUCTOR_ARG_MARKER) {
|
||||||
/*
|
/*
|
||||||
* Constructor arguments are detected by this "marker" consumer. It keeps the API looking clean even if it is a bit sleezy. We
|
* Constructor arguments are detected by these "marker" consumers. It keeps the API looking clean even if it is a bit sleezy. We
|
||||||
* then build a new consumer directly against the object parser that triggers the "constructor arg just arrived behavior" of the
|
* then build a new consumer directly against the object parser that triggers the "constructor arg just arrived behavior" of the
|
||||||
* parser. Conveniently, we can close over the position of the constructor in the argument list so we don't need to do any fancy
|
* parser. Conveniently, we can close over the position of the constructor in the argument list so we don't need to do any fancy
|
||||||
* or expensive lookups whenever the constructor args come in.
|
* or expensive lookups whenever the constructor args come in.
|
||||||
|
@ -204,6 +205,91 @@ public final class ConstructingObjectParser<Value, Context> extends AbstractObje
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> void declareNamedObjects(BiConsumer<Value, List<T>> consumer, NamedObjectParser<T, Context> namedObjectParser,
|
||||||
|
ParseField parseField) {
|
||||||
|
if (consumer == null) {
|
||||||
|
throw new IllegalArgumentException("[consumer] is required");
|
||||||
|
}
|
||||||
|
if (namedObjectParser == null) {
|
||||||
|
throw new IllegalArgumentException("[parser] is required");
|
||||||
|
}
|
||||||
|
if (parseField == null) {
|
||||||
|
throw new IllegalArgumentException("[parseField] is required");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER || consumer == OPTIONAL_CONSTRUCTOR_ARG_MARKER) {
|
||||||
|
/*
|
||||||
|
* Constructor arguments are detected by this "marker" consumer. It
|
||||||
|
* keeps the API looking clean even if it is a bit sleezy. We then
|
||||||
|
* build a new consumer directly against the object parser that
|
||||||
|
* triggers the "constructor arg just arrived behavior" of the
|
||||||
|
* parser. Conveniently, we can close over the position of the
|
||||||
|
* constructor in the argument list so we don't need to do any fancy
|
||||||
|
* or expensive lookups whenever the constructor args come in.
|
||||||
|
*/
|
||||||
|
int position = constructorArgInfos.size();
|
||||||
|
boolean required = consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER;
|
||||||
|
constructorArgInfos.add(new ConstructorArgInfo(parseField, required));
|
||||||
|
objectParser.declareNamedObjects((target, v) -> target.constructorArg(position, parseField, v), namedObjectParser, parseField);
|
||||||
|
} else {
|
||||||
|
numberOfFields += 1;
|
||||||
|
objectParser.declareNamedObjects(queueingConsumer(consumer, parseField), namedObjectParser, parseField);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public <T> void declareNamedObjects(BiConsumer<Value, List<T>> consumer, NamedObjectParser<T, Context> namedObjectParser,
|
||||||
|
Consumer<Value> orderedModeCallback, ParseField parseField) {
|
||||||
|
if (consumer == null) {
|
||||||
|
throw new IllegalArgumentException("[consumer] is required");
|
||||||
|
}
|
||||||
|
if (namedObjectParser == null) {
|
||||||
|
throw new IllegalArgumentException("[parser] is required");
|
||||||
|
}
|
||||||
|
if (orderedModeCallback == null) {
|
||||||
|
throw new IllegalArgumentException("[orderedModeCallback] is required");
|
||||||
|
}
|
||||||
|
if (parseField == null) {
|
||||||
|
throw new IllegalArgumentException("[parseField] is required");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER || consumer == OPTIONAL_CONSTRUCTOR_ARG_MARKER) {
|
||||||
|
/*
|
||||||
|
* Constructor arguments are detected by this "marker" consumer. It
|
||||||
|
* keeps the API looking clean even if it is a bit sleezy. We then
|
||||||
|
* build a new consumer directly against the object parser that
|
||||||
|
* triggers the "constructor arg just arrived behavior" of the
|
||||||
|
* parser. Conveniently, we can close over the position of the
|
||||||
|
* constructor in the argument list so we don't need to do any fancy
|
||||||
|
* or expensive lookups whenever the constructor args come in.
|
||||||
|
*/
|
||||||
|
int position = constructorArgInfos.size();
|
||||||
|
boolean required = consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER;
|
||||||
|
constructorArgInfos.add(new ConstructorArgInfo(parseField, required));
|
||||||
|
objectParser.declareNamedObjects((target, v) -> target.constructorArg(position, parseField, v), namedObjectParser,
|
||||||
|
wrapOrderedModeCallBack(orderedModeCallback), parseField);
|
||||||
|
} else {
|
||||||
|
numberOfFields += 1;
|
||||||
|
objectParser.declareNamedObjects(queueingConsumer(consumer, parseField), namedObjectParser,
|
||||||
|
wrapOrderedModeCallBack(orderedModeCallback), parseField);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Consumer<Target> wrapOrderedModeCallBack(Consumer<Value> callback) {
|
||||||
|
return (target) -> {
|
||||||
|
if (target.targetObject != null) {
|
||||||
|
// The target has already been built. Call the callback now.
|
||||||
|
callback.accept(target.targetObject);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* The target hasn't been built. Queue the callback.
|
||||||
|
*/
|
||||||
|
target.queuedOrderedModeCallback = callback;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates the consumer that does the "field just arrived" behavior. If the targetObject hasn't been built then it queues the value.
|
* Creates the consumer that does the "field just arrived" behavior. If the targetObject hasn't been built then it queues the value.
|
||||||
* Otherwise it just applies the value just like {@linkplain ObjectParser} does.
|
* Otherwise it just applies the value just like {@linkplain ObjectParser} does.
|
||||||
|
@ -258,6 +344,11 @@ public final class ConstructingObjectParser<Value, Context> extends AbstractObje
|
||||||
* Fields to be saved to the target object when we can build it. This is only allocated if a field has to be queued.
|
* Fields to be saved to the target object when we can build it. This is only allocated if a field has to be queued.
|
||||||
*/
|
*/
|
||||||
private Consumer<Value>[] queuedFields;
|
private Consumer<Value>[] queuedFields;
|
||||||
|
/**
|
||||||
|
* OrderedModeCallback to be called with the target object when we can
|
||||||
|
* build it. This is only allocated if the callback has to be queued.
|
||||||
|
*/
|
||||||
|
private Consumer<Value> queuedOrderedModeCallback;
|
||||||
/**
|
/**
|
||||||
* The count of fields already queued.
|
* The count of fields already queued.
|
||||||
*/
|
*/
|
||||||
|
@ -343,6 +434,9 @@ public final class ConstructingObjectParser<Value, Context> extends AbstractObje
|
||||||
private void buildTarget() {
|
private void buildTarget() {
|
||||||
try {
|
try {
|
||||||
targetObject = builder.apply(constructorArgs);
|
targetObject = builder.apply(constructorArgs);
|
||||||
|
if (queuedOrderedModeCallback != null) {
|
||||||
|
queuedOrderedModeCallback.accept(targetObject);
|
||||||
|
}
|
||||||
while (queuedFieldsCount > 0) {
|
while (queuedFieldsCount > 0) {
|
||||||
queuedFieldsCount -= 1;
|
queuedFieldsCount -= 1;
|
||||||
queuedFields[queuedFieldsCount].accept(targetObject);
|
queuedFields[queuedFieldsCount].accept(targetObject);
|
||||||
|
|
|
@ -227,41 +227,7 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
||||||
}, field, ValueType.OBJECT_OR_BOOLEAN);
|
}, field, ValueType.OBJECT_OR_BOOLEAN);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Declares named objects in the style of highlighting's field element. These are usually named inside and object like this:
|
|
||||||
* <pre><code>
|
|
||||||
* {
|
|
||||||
* "highlight": {
|
|
||||||
* "fields": { <------ this one
|
|
||||||
* "title": {},
|
|
||||||
* "body": {},
|
|
||||||
* "category": {}
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
* </code></pre>
|
|
||||||
* but, when order is important, some may be written this way:
|
|
||||||
* <pre><code>
|
|
||||||
* {
|
|
||||||
* "highlight": {
|
|
||||||
* "fields": [ <------ this one
|
|
||||||
* {"title": {}},
|
|
||||||
* {"body": {}},
|
|
||||||
* {"category": {}}
|
|
||||||
* ]
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
* </code></pre>
|
|
||||||
* This is because json doesn't enforce ordering. Elasticsearch reads it in the order sent but tools that generate json are free to put
|
|
||||||
* object members in an unordered Map, jumbling them. Thus, if you care about order you can send the object in the second way.
|
|
||||||
*
|
|
||||||
* See NamedObjectHolder in ObjectParserTests for examples of how to invoke this.
|
|
||||||
*
|
|
||||||
* @param consumer sets the values once they have been parsed
|
|
||||||
* @param namedObjectParser parses each named object
|
|
||||||
* @param orderedModeCallback called when the named object is parsed using the "ordered" mode (the array of objects)
|
|
||||||
* @param field the field to parse
|
|
||||||
*/
|
|
||||||
public <T> void declareNamedObjects(BiConsumer<Value, List<T>> consumer, NamedObjectParser<T, Context> namedObjectParser,
|
public <T> void declareNamedObjects(BiConsumer<Value, List<T>> consumer, NamedObjectParser<T, Context> namedObjectParser,
|
||||||
Consumer<Value> orderedModeCallback, ParseField field) {
|
Consumer<Value> orderedModeCallback, ParseField field) {
|
||||||
// This creates and parses the named object
|
// This creates and parses the named object
|
||||||
|
@ -311,26 +277,7 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
||||||
}, field, ValueType.OBJECT_ARRAY);
|
}, field, ValueType.OBJECT_ARRAY);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
@Override
|
||||||
* Declares named objects in the style of aggregations. These are named inside and object like this:
|
|
||||||
* <pre><code>
|
|
||||||
* {
|
|
||||||
* "aggregations": {
|
|
||||||
* "name_1": { "aggregation_type": {} },
|
|
||||||
* "name_2": { "aggregation_type": {} },
|
|
||||||
* "name_3": { "aggregation_type": {} }
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
* </code></pre>
|
|
||||||
* Unlike the other version of this method, "ordered" mode (arrays of objects) is not supported.
|
|
||||||
*
|
|
||||||
* See NamedObjectHolder in ObjectParserTests for examples of how to invoke this.
|
|
||||||
*
|
|
||||||
* @param consumer sets the values once they have been parsed
|
|
||||||
* @param namedObjectParser parses each named object
|
|
||||||
* @param field the field to parse
|
|
||||||
*/
|
|
||||||
public <T> void declareNamedObjects(BiConsumer<Value, List<T>> consumer, NamedObjectParser<T, Context> namedObjectParser,
|
public <T> void declareNamedObjects(BiConsumer<Value, List<T>> consumer, NamedObjectParser<T, Context> namedObjectParser,
|
||||||
ParseField field) {
|
ParseField field) {
|
||||||
Consumer<Value> orderedModeCallback = (v) -> {
|
Consumer<Value> orderedModeCallback = (v) -> {
|
||||||
|
|
|
@ -236,6 +236,15 @@ public final class KeywordFieldMapper extends FieldMapper {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected BytesRef indexedValueForSearch(Object value) {
|
protected BytesRef indexedValueForSearch(Object value) {
|
||||||
|
if (searchAnalyzer() == Lucene.KEYWORD_ANALYZER) {
|
||||||
|
// keyword analyzer with the default attribute source which encodes terms using UTF8
|
||||||
|
// in that case we skip normalization, which may be slow if there many terms need to
|
||||||
|
// parse (eg. large terms query) since Analyzer.normalize involves things like creating
|
||||||
|
// attributes through reflection
|
||||||
|
// This if statement will be used whenever a normalizer is NOT configured
|
||||||
|
return super.indexedValueForSearch(value);
|
||||||
|
}
|
||||||
|
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,10 +22,8 @@ package org.elasticsearch.index.mapper;
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||||
import org.apache.lucene.document.Field;
|
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||||
|
@ -36,6 +34,7 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue;
|
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue;
|
||||||
|
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||||
import static org.elasticsearch.index.mapper.TypeParsers.parseField;
|
import static org.elasticsearch.index.mapper.TypeParsers.parseField;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -47,10 +46,12 @@ public class TokenCountFieldMapper extends FieldMapper {
|
||||||
|
|
||||||
public static class Defaults {
|
public static class Defaults {
|
||||||
public static final MappedFieldType FIELD_TYPE = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER);
|
public static final MappedFieldType FIELD_TYPE = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER);
|
||||||
|
public static final boolean DEFAULT_POSITION_INCREMENTS = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Builder extends FieldMapper.Builder<Builder, TokenCountFieldMapper> {
|
public static class Builder extends FieldMapper.Builder<Builder, TokenCountFieldMapper> {
|
||||||
private NamedAnalyzer analyzer;
|
private NamedAnalyzer analyzer;
|
||||||
|
private boolean enablePositionIncrements = Defaults.DEFAULT_POSITION_INCREMENTS;
|
||||||
|
|
||||||
public Builder(String name) {
|
public Builder(String name) {
|
||||||
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||||
|
@ -66,18 +67,26 @@ public class TokenCountFieldMapper extends FieldMapper {
|
||||||
return analyzer;
|
return analyzer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Builder enablePositionIncrements(boolean enablePositionIncrements) {
|
||||||
|
this.enablePositionIncrements = enablePositionIncrements;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean enablePositionIncrements() {
|
||||||
|
return enablePositionIncrements;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TokenCountFieldMapper build(BuilderContext context) {
|
public TokenCountFieldMapper build(BuilderContext context) {
|
||||||
setupFieldType(context);
|
setupFieldType(context);
|
||||||
return new TokenCountFieldMapper(name, fieldType, defaultFieldType,
|
return new TokenCountFieldMapper(name, fieldType, defaultFieldType,
|
||||||
context.indexSettings(), analyzer, multiFieldsBuilder.build(this, context), copyTo);
|
context.indexSettings(), analyzer, enablePositionIncrements, multiFieldsBuilder.build(this, context), copyTo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class TypeParser implements Mapper.TypeParser {
|
public static class TypeParser implements Mapper.TypeParser {
|
||||||
@Override
|
@Override
|
||||||
@SuppressWarnings("unchecked")
|
public Mapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
|
||||||
TokenCountFieldMapper.Builder builder = new TokenCountFieldMapper.Builder(name);
|
TokenCountFieldMapper.Builder builder = new TokenCountFieldMapper.Builder(name);
|
||||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||||
Map.Entry<String, Object> entry = iterator.next();
|
Map.Entry<String, Object> entry = iterator.next();
|
||||||
|
@ -93,6 +102,9 @@ public class TokenCountFieldMapper extends FieldMapper {
|
||||||
}
|
}
|
||||||
builder.analyzer(analyzer);
|
builder.analyzer(analyzer);
|
||||||
iterator.remove();
|
iterator.remove();
|
||||||
|
} else if (propName.equals("enable_position_increments")) {
|
||||||
|
builder.enablePositionIncrements(nodeBooleanValue(propNode));
|
||||||
|
iterator.remove();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
parseField(builder, name, node, parserContext);
|
parseField(builder, name, node, parserContext);
|
||||||
|
@ -104,11 +116,13 @@ public class TokenCountFieldMapper extends FieldMapper {
|
||||||
}
|
}
|
||||||
|
|
||||||
private NamedAnalyzer analyzer;
|
private NamedAnalyzer analyzer;
|
||||||
|
private boolean enablePositionIncrements;
|
||||||
|
|
||||||
protected TokenCountFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
protected TokenCountFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||||
Settings indexSettings, NamedAnalyzer analyzer, MultiFields multiFields, CopyTo copyTo) {
|
Settings indexSettings, NamedAnalyzer analyzer, boolean enablePositionIncrements, MultiFields multiFields, CopyTo copyTo) {
|
||||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||||
this.analyzer = analyzer;
|
this.analyzer = analyzer;
|
||||||
|
this.enablePositionIncrements = enablePositionIncrements;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -124,7 +138,7 @@ public class TokenCountFieldMapper extends FieldMapper {
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
tokenCount = (Integer) fieldType().nullValue();
|
tokenCount = (Integer) fieldType().nullValue();
|
||||||
} else {
|
} else {
|
||||||
tokenCount = countPositions(analyzer, name(), value);
|
tokenCount = countPositions(analyzer, name(), value, enablePositionIncrements);
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean indexed = fieldType().indexOptions() != IndexOptions.NONE;
|
boolean indexed = fieldType().indexOptions() != IndexOptions.NONE;
|
||||||
|
@ -138,19 +152,26 @@ public class TokenCountFieldMapper extends FieldMapper {
|
||||||
* @param analyzer analyzer to create token stream
|
* @param analyzer analyzer to create token stream
|
||||||
* @param fieldName field name to pass to analyzer
|
* @param fieldName field name to pass to analyzer
|
||||||
* @param fieldValue field value to pass to analyzer
|
* @param fieldValue field value to pass to analyzer
|
||||||
|
* @param enablePositionIncrements should we count position increments ?
|
||||||
* @return number of position increments in a token stream
|
* @return number of position increments in a token stream
|
||||||
* @throws IOException if tokenStream throws it
|
* @throws IOException if tokenStream throws it
|
||||||
*/
|
*/
|
||||||
static int countPositions(Analyzer analyzer, String fieldName, String fieldValue) throws IOException {
|
static int countPositions(Analyzer analyzer, String fieldName, String fieldValue, boolean enablePositionIncrements) throws IOException {
|
||||||
try (TokenStream tokenStream = analyzer.tokenStream(fieldName, fieldValue)) {
|
try (TokenStream tokenStream = analyzer.tokenStream(fieldName, fieldValue)) {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
PositionIncrementAttribute position = tokenStream.addAttribute(PositionIncrementAttribute.class);
|
PositionIncrementAttribute position = tokenStream.addAttribute(PositionIncrementAttribute.class);
|
||||||
tokenStream.reset();
|
tokenStream.reset();
|
||||||
while (tokenStream.incrementToken()) {
|
while (tokenStream.incrementToken()) {
|
||||||
|
if (enablePositionIncrements) {
|
||||||
count += position.getPositionIncrement();
|
count += position.getPositionIncrement();
|
||||||
|
} else {
|
||||||
|
count += Math.min(1, position.getPositionIncrement());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
tokenStream.end();
|
tokenStream.end();
|
||||||
|
if (enablePositionIncrements) {
|
||||||
count += position.getPositionIncrement();
|
count += position.getPositionIncrement();
|
||||||
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -163,6 +184,14 @@ public class TokenCountFieldMapper extends FieldMapper {
|
||||||
return analyzer.name();
|
return analyzer.name();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates if position increments are counted.
|
||||||
|
* @return <code>true</code> if position increments are counted
|
||||||
|
*/
|
||||||
|
public boolean enablePositionIncrements() {
|
||||||
|
return enablePositionIncrements;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected String contentType() {
|
protected String contentType() {
|
||||||
return CONTENT_TYPE;
|
return CONTENT_TYPE;
|
||||||
|
@ -172,12 +201,16 @@ public class TokenCountFieldMapper extends FieldMapper {
|
||||||
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
|
||||||
super.doMerge(mergeWith, updateAllTypes);
|
super.doMerge(mergeWith, updateAllTypes);
|
||||||
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
|
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
|
||||||
|
this.enablePositionIncrements = ((TokenCountFieldMapper) mergeWith).enablePositionIncrements;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||||
super.doXContentBody(builder, includeDefaults, params);
|
super.doXContentBody(builder, includeDefaults, params);
|
||||||
builder.field("analyzer", analyzer());
|
builder.field("analyzer", analyzer());
|
||||||
|
if (includeDefaults || enablePositionIncrements() != Defaults.DEFAULT_POSITION_INCREMENTS) {
|
||||||
|
builder.field("enable_position_increments", enablePositionIncrements());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -981,7 +981,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
||||||
}
|
}
|
||||||
qpSettings.lenient(lenient == null ? context.queryStringLenient() : lenient);
|
qpSettings.lenient(lenient == null ? context.queryStringLenient() : lenient);
|
||||||
}
|
}
|
||||||
|
if (fieldsAndWeights.isEmpty() == false || resolvedFields.isEmpty() == false) {
|
||||||
|
// We set the fields and weight only if we have explicit fields to query
|
||||||
|
// Otherwise we set it to null and fallback to the default field.
|
||||||
qpSettings.fieldsAndWeights(resolvedFields);
|
qpSettings.fieldsAndWeights(resolvedFields);
|
||||||
|
}
|
||||||
qpSettings.defaultOperator(defaultOperator.toQueryParserOperator());
|
qpSettings.defaultOperator(defaultOperator.toQueryParserOperator());
|
||||||
|
|
||||||
if (analyzer == null) {
|
if (analyzer == null) {
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.QueryCache;
|
import org.apache.lucene.search.QueryCache;
|
||||||
import org.apache.lucene.search.QueryCachingPolicy;
|
import org.apache.lucene.search.QueryCachingPolicy;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
|
import org.apache.lucene.search.ScorerSupplier;
|
||||||
import org.apache.lucene.search.Weight;
|
import org.apache.lucene.search.Weight;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.lucene.ShardCoreKeyMap;
|
import org.elasticsearch.common.lucene.ShardCoreKeyMap;
|
||||||
|
@ -145,6 +146,12 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
|
||||||
return in.scorer(context);
|
return in.scorer(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
|
||||||
|
shardKeyMap.add(context.reader());
|
||||||
|
return in.scorerSupplier(context);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
|
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
|
||||||
shardKeyMap.add(context.reader());
|
shardKeyMap.add(context.reader());
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.ParsingException;
|
import org.elasticsearch.common.ParsingException;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParserTests.NamedObject;
|
||||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.hamcrest.Matcher;
|
import org.hamcrest.Matcher;
|
||||||
|
@ -37,6 +38,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru
|
||||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||||
import static org.hamcrest.Matchers.anyOf;
|
import static org.hamcrest.Matchers.anyOf;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.hasSize;
|
||||||
import static org.hamcrest.Matchers.instanceOf;
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
import static org.hamcrest.Matchers.nullValue;
|
import static org.hamcrest.Matchers.nullValue;
|
||||||
|
|
||||||
|
@ -397,4 +399,188 @@ public class ConstructingObjectParserTests extends ESTestCase {
|
||||||
return parser;
|
return parser;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObject() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": {\n"
|
||||||
|
+ " \"a\": {}"
|
||||||
|
+ "},\"named_in_constructor\": {\n"
|
||||||
|
+ " \"b\": {}"
|
||||||
|
+ "}}");
|
||||||
|
NamedObjectHolder h = NamedObjectHolder.PARSER.apply(parser, null);
|
||||||
|
assertThat(h.named, hasSize(1));
|
||||||
|
assertEquals("a", h.named.get(0).name);
|
||||||
|
assertThat(h.namedInConstructor, hasSize(1));
|
||||||
|
assertEquals("b", h.namedInConstructor.get(0).name);
|
||||||
|
assertFalse(h.namedSuppliedInOrder);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObjectInOrder() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": [\n"
|
||||||
|
+ " {\"a\": {}}"
|
||||||
|
+ "],\"named_in_constructor\": [\n"
|
||||||
|
+ " {\"b\": {}}"
|
||||||
|
+ "]}");
|
||||||
|
NamedObjectHolder h = NamedObjectHolder.PARSER.apply(parser, null);
|
||||||
|
assertThat(h.named, hasSize(1));
|
||||||
|
assertEquals("a", h.named.get(0).name);
|
||||||
|
assertThat(h.namedInConstructor, hasSize(1));
|
||||||
|
assertEquals("b", h.namedInConstructor.get(0).name);
|
||||||
|
assertTrue(h.namedSuppliedInOrder);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObjectTwoFieldsInArray() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": [\n"
|
||||||
|
+ " {\"a\": {}, \"b\": {}}"
|
||||||
|
+ "],\"named_in_constructor\": [\n"
|
||||||
|
+ " {\"c\": {}}"
|
||||||
|
+ "]}");
|
||||||
|
ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null));
|
||||||
|
assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage());
|
||||||
|
assertEquals(
|
||||||
|
"[named] can be a single object with any number of fields or an array where each entry is an object with a single field",
|
||||||
|
e.getCause().getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObjectTwoFieldsInArrayConstructorArg() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": [\n"
|
||||||
|
+ " {\"a\": {}}"
|
||||||
|
+ "],\"named_in_constructor\": [\n"
|
||||||
|
+ " {\"c\": {}, \"d\": {}}"
|
||||||
|
+ "]}");
|
||||||
|
ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null));
|
||||||
|
assertEquals("[named_object_holder] failed to parse field [named_in_constructor]", e.getMessage());
|
||||||
|
assertEquals(
|
||||||
|
"[named_in_constructor] can be a single object with any number of fields or an array where each entry is an object with a "
|
||||||
|
+ "single field", e.getCause().getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObjectNoFieldsInArray() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": [\n"
|
||||||
|
+ " {}"
|
||||||
|
+ "],\"named_in_constructor\": [\n"
|
||||||
|
+ " {\"a\": {}}"
|
||||||
|
+ "]}");
|
||||||
|
ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null));
|
||||||
|
assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage());
|
||||||
|
assertEquals(
|
||||||
|
"[named] can be a single object with any number of fields or an array where each entry is an object with a single field",
|
||||||
|
e.getCause().getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObjectNoFieldsInArrayConstructorArg() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": [\n"
|
||||||
|
+ " {\"a\": {}}"
|
||||||
|
+ "],\"named_in_constructor\": [\n"
|
||||||
|
+ " {}"
|
||||||
|
+ "]}");
|
||||||
|
ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null));
|
||||||
|
assertEquals("[named_object_holder] failed to parse field [named_in_constructor]", e.getMessage());
|
||||||
|
assertEquals(
|
||||||
|
"[named_in_constructor] can be a single object with any number of fields or an array where each entry is an object with a "
|
||||||
|
+ "single field", e.getCause().getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObjectJunkInArray() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": [\n"
|
||||||
|
+ " \"junk\""
|
||||||
|
+ "],\"named_in_constructor\": [\n"
|
||||||
|
+ " {\"a\": {}}"
|
||||||
|
+ "]}");
|
||||||
|
ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null));
|
||||||
|
assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage());
|
||||||
|
assertEquals(
|
||||||
|
"[named] can be a single object with any number of fields or an array where each entry is an object with a single field",
|
||||||
|
e.getCause().getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObjectJunkInArrayConstructorArg() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": [\n"
|
||||||
|
+ " {\"a\": {}}"
|
||||||
|
+ "],\"named_in_constructor\": [\n"
|
||||||
|
+ " \"junk\""
|
||||||
|
+ "]}");
|
||||||
|
ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null));
|
||||||
|
assertEquals("[named_object_holder] failed to parse field [named_in_constructor]", e.getMessage());
|
||||||
|
assertEquals(
|
||||||
|
"[named_in_constructor] can be a single object with any number of fields or an array where each entry is an object with a "
|
||||||
|
+ "single field", e.getCause().getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObjectInOrderNotSupported() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": [\n"
|
||||||
|
+ " {\"a\": {}}"
|
||||||
|
+ "],\"named_in_constructor\": {\"b\": {}}"
|
||||||
|
+ "}");
|
||||||
|
|
||||||
|
// Create our own parser for this test so we can disable support for the "ordered" mode specified by the array above
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
ConstructingObjectParser<NamedObjectHolder, Void> objectParser = new ConstructingObjectParser<>("named_object_holder",
|
||||||
|
a -> new NamedObjectHolder(((List<NamedObject>) a[0])));
|
||||||
|
objectParser.declareNamedObjects(ConstructingObjectParser.constructorArg(), NamedObject.PARSER,
|
||||||
|
new ParseField("named_in_constructor"));
|
||||||
|
objectParser.declareNamedObjects(NamedObjectHolder::setNamed, NamedObject.PARSER, new ParseField("named"));
|
||||||
|
|
||||||
|
// Now firing the xml through it fails
|
||||||
|
ParsingException e = expectThrows(ParsingException.class, () -> objectParser.apply(parser, null));
|
||||||
|
assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage());
|
||||||
|
assertEquals("[named] doesn't support arrays. Use a single object with multiple fields.", e.getCause().getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testParseNamedObjectInOrderNotSupportedConstructorArg() throws IOException {
|
||||||
|
XContentParser parser = createParser(JsonXContent.jsonXContent,
|
||||||
|
"{\"named\": {\"a\": {}}"
|
||||||
|
+ ",\"named_in_constructor\": [\n"
|
||||||
|
+ " {\"b\": {}}"
|
||||||
|
+ "]}");
|
||||||
|
|
||||||
|
// Create our own parser for this test so we can disable support for the "ordered" mode specified by the array above
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
ConstructingObjectParser<NamedObjectHolder, Void> objectParser = new ConstructingObjectParser<>("named_object_holder",
|
||||||
|
a -> new NamedObjectHolder(((List<NamedObject>) a[0])));
|
||||||
|
objectParser.declareNamedObjects(ConstructingObjectParser.constructorArg(), NamedObject.PARSER,
|
||||||
|
new ParseField("named_in_constructor"));
|
||||||
|
objectParser.declareNamedObjects(NamedObjectHolder::setNamed, NamedObject.PARSER, new ParseField("named"));
|
||||||
|
|
||||||
|
// Now firing the xml through it fails
|
||||||
|
ParsingException e = expectThrows(ParsingException.class, () -> objectParser.apply(parser, null));
|
||||||
|
assertEquals("[named_object_holder] failed to parse field [named_in_constructor]", e.getMessage());
|
||||||
|
assertEquals("[named_in_constructor] doesn't support arrays. Use a single object with multiple fields.", e.getCause().getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
static class NamedObjectHolder {
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
public static final ConstructingObjectParser<NamedObjectHolder, Void> PARSER = new ConstructingObjectParser<>("named_object_holder",
|
||||||
|
a -> new NamedObjectHolder(((List<NamedObject>) a[0])));
|
||||||
|
static {
|
||||||
|
PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), NamedObject.PARSER, NamedObjectHolder::keepNamedInOrder,
|
||||||
|
new ParseField("named_in_constructor"));
|
||||||
|
PARSER.declareNamedObjects(NamedObjectHolder::setNamed, NamedObject.PARSER, NamedObjectHolder::keepNamedInOrder,
|
||||||
|
new ParseField("named"));
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<NamedObject> named;
|
||||||
|
private List<NamedObject> namedInConstructor;
|
||||||
|
private boolean namedSuppliedInOrder = false;
|
||||||
|
|
||||||
|
NamedObjectHolder(List<NamedObject> namedInConstructor) {
|
||||||
|
this.namedInConstructor = namedInConstructor;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setNamed(List<NamedObject> named) {
|
||||||
|
this.named = named;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void keepNamedInOrder() {
|
||||||
|
namedSuppliedInOrder = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -150,4 +150,13 @@ public class KeywordFieldTypeTests extends FieldTypeTestCase {
|
||||||
() -> ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true));
|
() -> ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true));
|
||||||
assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
|
assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testNormalizeQueries() {
|
||||||
|
MappedFieldType ft = createDefaultFieldType();
|
||||||
|
ft.setName("field");
|
||||||
|
ft.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER);
|
||||||
|
assertEquals(new TermQuery(new Term("field", new BytesRef("FOO"))), ft.termQuery("FOO", null));
|
||||||
|
ft.setSearchAnalyzer(Lucene.STANDARD_ANALYZER);
|
||||||
|
assertEquals(new TermQuery(new Term("field", new BytesRef("foo"))), ft.termQuery("FOO", null));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,6 +131,12 @@ public class TokenCountFieldMapperIntegrationIT extends ESIntegTestCase {
|
||||||
.field("analyzer", "standard")
|
.field("analyzer", "standard")
|
||||||
.field("doc_values", true)
|
.field("doc_values", true)
|
||||||
.endObject()
|
.endObject()
|
||||||
|
.startObject("token_count_without_position_increments")
|
||||||
|
.field("type", "token_count")
|
||||||
|
.field("analyzer", "english")
|
||||||
|
.field("enable_position_increments", false)
|
||||||
|
.field("store", true)
|
||||||
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
|
@ -169,6 +175,7 @@ public class TokenCountFieldMapperIntegrationIT extends ESIntegTestCase {
|
||||||
private SearchRequestBuilder prepareSearch() {
|
private SearchRequestBuilder prepareSearch() {
|
||||||
SearchRequestBuilder request = client().prepareSearch("test").setTypes("test");
|
SearchRequestBuilder request = client().prepareSearch("test").setTypes("test");
|
||||||
request.addStoredField("foo.token_count");
|
request.addStoredField("foo.token_count");
|
||||||
|
request.addStoredField("foo.token_count_without_position_increments");
|
||||||
if (loadCountedFields) {
|
if (loadCountedFields) {
|
||||||
request.addStoredField("foo");
|
request.addStoredField("foo");
|
||||||
}
|
}
|
||||||
|
@ -186,32 +193,38 @@ public class TokenCountFieldMapperIntegrationIT extends ESIntegTestCase {
|
||||||
for (SearchHit hit : result.getHits()) {
|
for (SearchHit hit : result.getHits()) {
|
||||||
String id = hit.getId();
|
String id = hit.getId();
|
||||||
if (id.equals("single")) {
|
if (id.equals("single")) {
|
||||||
assertSearchHit(hit, 4);
|
assertSearchHit(hit, new int[]{4}, new int[]{4});
|
||||||
} else if (id.equals("bulk1")) {
|
} else if (id.equals("bulk1")) {
|
||||||
assertSearchHit(hit, 3);
|
assertSearchHit(hit, new int[]{3}, new int[]{3});
|
||||||
} else if (id.equals("bulk2")) {
|
} else if (id.equals("bulk2")) {
|
||||||
assertSearchHit(hit, 5);
|
assertSearchHit(hit, new int[]{5}, new int[]{4});
|
||||||
} else if (id.equals("multi")) {
|
} else if (id.equals("multi")) {
|
||||||
assertSearchHit(hit, 2, 7);
|
assertSearchHit(hit, new int[]{2, 7}, new int[]{2, 7});
|
||||||
} else if (id.equals("multibulk1")) {
|
} else if (id.equals("multibulk1")) {
|
||||||
assertSearchHit(hit, 1, 8);
|
assertSearchHit(hit, new int[]{1, 8}, new int[]{1, 8});
|
||||||
} else if (id.equals("multibulk2")) {
|
} else if (id.equals("multibulk2")) {
|
||||||
assertSearchHit(hit, 6, 10);
|
assertSearchHit(hit, new int[]{6, 10}, new int[]{3, 9});
|
||||||
} else {
|
} else {
|
||||||
throw new ElasticsearchException("Unexpected response!");
|
throw new ElasticsearchException("Unexpected response!");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertSearchHit(SearchHit hit, int... termCounts) {
|
private void assertSearchHit(SearchHit hit, int[] standardTermCounts, int[] englishTermCounts) {
|
||||||
assertThat(hit.field("foo.token_count"), not(nullValue()));
|
assertThat(hit.field("foo.token_count"), not(nullValue()));
|
||||||
assertThat(hit.field("foo.token_count").getValues().size(), equalTo(termCounts.length));
|
assertThat(hit.field("foo.token_count").getValues().size(), equalTo(standardTermCounts.length));
|
||||||
for (int i = 0; i < termCounts.length; i++) {
|
for (int i = 0; i < standardTermCounts.length; i++) {
|
||||||
assertThat((Integer) hit.field("foo.token_count").getValues().get(i), equalTo(termCounts[i]));
|
assertThat((Integer) hit.field("foo.token_count").getValues().get(i), equalTo(standardTermCounts[i]));
|
||||||
|
}
|
||||||
|
|
||||||
|
assertThat(hit.field("foo.token_count_without_position_increments"), not(nullValue()));
|
||||||
|
assertThat(hit.field("foo.token_count_without_position_increments").getValues().size(), equalTo(englishTermCounts.length));
|
||||||
|
for (int i = 0; i < englishTermCounts.length; i++) {
|
||||||
|
assertThat((Integer) hit.field("foo.token_count_without_position_increments").getValues().get(i), equalTo(englishTermCounts[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (loadCountedFields && storeCountedFields) {
|
if (loadCountedFields && storeCountedFields) {
|
||||||
assertThat(hit.field("foo").getValues().size(), equalTo(termCounts.length));
|
assertThat(hit.field("foo").getValues().size(), equalTo(standardTermCounts.length));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,23 +24,18 @@ import org.apache.lucene.analysis.CannedTokenStream;
|
||||||
import org.apache.lucene.analysis.MockTokenizer;
|
import org.apache.lucene.analysis.MockTokenizer;
|
||||||
import org.apache.lucene.analysis.Token;
|
import org.apache.lucene.analysis.Token;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|
||||||
import org.elasticsearch.common.compress.CompressedXContent;
|
import org.elasticsearch.common.compress.CompressedXContent;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||||
import org.elasticsearch.test.VersionUtils;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
|
||||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
|
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
|
@ -80,15 +75,38 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase {
|
||||||
assertThat(((TokenCountFieldMapper) stage2.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard"));
|
assertThat(((TokenCountFieldMapper) stage2.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testCountPositions() throws IOException {
|
/**
|
||||||
// We're looking to make sure that we:
|
* When position increments are counted, we're looking to make sure that we:
|
||||||
Token t1 = new Token(); // Don't count tokens without an increment
|
- don't count tokens without an increment
|
||||||
|
- count normal tokens with one increment
|
||||||
|
- count funny tokens with more than one increment
|
||||||
|
- count the final token increments on the rare token streams that have them
|
||||||
|
*/
|
||||||
|
public void testCountPositionsWithIncrements() throws IOException {
|
||||||
|
Analyzer analyzer = createMockAnalyzer();
|
||||||
|
assertThat(TokenCountFieldMapper.countPositions(analyzer, "", "", true), equalTo(7));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* When position increments are not counted (only positions are counted), we're looking to make sure that we:
|
||||||
|
- don't count tokens without an increment
|
||||||
|
- count normal tokens with one increment
|
||||||
|
- count funny tokens with more than one increment as only one
|
||||||
|
- don't count the final token increments on the rare token streams that have them
|
||||||
|
*/
|
||||||
|
public void testCountPositionsWithoutIncrements() throws IOException {
|
||||||
|
Analyzer analyzer = createMockAnalyzer();
|
||||||
|
assertThat(TokenCountFieldMapper.countPositions(analyzer, "", "", false), equalTo(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Analyzer createMockAnalyzer() {
|
||||||
|
Token t1 = new Token(); // Token without an increment
|
||||||
t1.setPositionIncrement(0);
|
t1.setPositionIncrement(0);
|
||||||
Token t2 = new Token();
|
Token t2 = new Token();
|
||||||
t2.setPositionIncrement(1); // Count normal tokens with one increment
|
t2.setPositionIncrement(1); // Normal token with one increment
|
||||||
Token t3 = new Token();
|
Token t3 = new Token();
|
||||||
t2.setPositionIncrement(2); // Count funny tokens with more than one increment
|
t2.setPositionIncrement(2); // Funny token with more than one increment
|
||||||
int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them
|
int finalTokenIncrement = 4; // Final token increment
|
||||||
Token[] tokens = new Token[] {t1, t2, t3};
|
Token[] tokens = new Token[] {t1, t2, t3};
|
||||||
Collections.shuffle(Arrays.asList(tokens), random());
|
Collections.shuffle(Arrays.asList(tokens), random());
|
||||||
final TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens);
|
final TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens);
|
||||||
|
@ -99,7 +117,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase {
|
||||||
return new TokenStreamComponents(new MockTokenizer(), tokenStream);
|
return new TokenStreamComponents(new MockTokenizer(), tokenStream);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
assertThat(TokenCountFieldMapper.countPositions(analyzer, "", ""), equalTo(7));
|
return analyzer;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -85,8 +85,7 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
queryStringQueryBuilder.defaultField(randomBoolean() ?
|
queryStringQueryBuilder.defaultField(randomBoolean() ?
|
||||||
STRING_FIELD_NAME : randomAlphaOfLengthBetween(1, 10));
|
STRING_FIELD_NAME : randomAlphaOfLengthBetween(1, 10));
|
||||||
}
|
} else {
|
||||||
if (randomBoolean()) {
|
|
||||||
int numFields = randomIntBetween(1, 5);
|
int numFields = randomIntBetween(1, 5);
|
||||||
for (int i = 0; i < numFields; i++) {
|
for (int i = 0; i < numFields; i++) {
|
||||||
String fieldName = randomBoolean() ? STRING_FIELD_NAME : randomAlphaOfLengthBetween(1, 10);
|
String fieldName = randomBoolean() ? STRING_FIELD_NAME : randomAlphaOfLengthBetween(1, 10);
|
||||||
|
@ -324,7 +323,6 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
|
||||||
MapperQueryParser queryParser = new MapperQueryParser(createShardContext());
|
MapperQueryParser queryParser = new MapperQueryParser(createShardContext());
|
||||||
QueryParserSettings settings = new QueryParserSettings("first foo-bar-foobar* last");
|
QueryParserSettings settings = new QueryParserSettings("first foo-bar-foobar* last");
|
||||||
settings.defaultField(STRING_FIELD_NAME);
|
settings.defaultField(STRING_FIELD_NAME);
|
||||||
settings.fieldsAndWeights(Collections.emptyMap());
|
|
||||||
settings.analyzeWildcard(true);
|
settings.analyzeWildcard(true);
|
||||||
settings.fuzziness(Fuzziness.AUTO);
|
settings.fuzziness(Fuzziness.AUTO);
|
||||||
settings.rewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
|
settings.rewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
|
||||||
|
@ -352,7 +350,6 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
|
||||||
MapperQueryParser queryParser = new MapperQueryParser(createShardContext());
|
MapperQueryParser queryParser = new MapperQueryParser(createShardContext());
|
||||||
QueryParserSettings settings = new QueryParserSettings("first foo-bar-foobar* last");
|
QueryParserSettings settings = new QueryParserSettings("first foo-bar-foobar* last");
|
||||||
settings.defaultField(STRING_FIELD_NAME);
|
settings.defaultField(STRING_FIELD_NAME);
|
||||||
settings.fieldsAndWeights(Collections.emptyMap());
|
|
||||||
settings.analyzeWildcard(true);
|
settings.analyzeWildcard(true);
|
||||||
settings.fuzziness(Fuzziness.AUTO);
|
settings.fuzziness(Fuzziness.AUTO);
|
||||||
settings.rewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
|
settings.rewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
|
||||||
|
@ -390,7 +387,6 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
|
||||||
MapperQueryParser queryParser = new MapperQueryParser(createShardContext());
|
MapperQueryParser queryParser = new MapperQueryParser(createShardContext());
|
||||||
QueryParserSettings settings = new QueryParserSettings("");
|
QueryParserSettings settings = new QueryParserSettings("");
|
||||||
settings.defaultField(STRING_FIELD_NAME);
|
settings.defaultField(STRING_FIELD_NAME);
|
||||||
settings.fieldsAndWeights(Collections.emptyMap());
|
|
||||||
settings.fuzziness(Fuzziness.AUTO);
|
settings.fuzziness(Fuzziness.AUTO);
|
||||||
settings.analyzeWildcard(true);
|
settings.analyzeWildcard(true);
|
||||||
settings.rewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
|
settings.rewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
|
||||||
|
@ -689,6 +685,29 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
|
||||||
assertThat(phraseQuery.getTerms().length, equalTo(2));
|
assertThat(phraseQuery.getTerms().length, equalTo(2));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testToQueryWildcardNonExistingFields() throws IOException {
|
||||||
|
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
|
||||||
|
QueryStringQueryBuilder queryStringQueryBuilder =
|
||||||
|
new QueryStringQueryBuilder("foo bar").field("invalid*");
|
||||||
|
Query query = queryStringQueryBuilder.toQuery(createShardContext());
|
||||||
|
Query expectedQuery = new BooleanQuery.Builder()
|
||||||
|
.add(new MatchNoDocsQuery("empty fields"), Occur.SHOULD)
|
||||||
|
.add(new MatchNoDocsQuery("empty fields"), Occur.SHOULD)
|
||||||
|
.build();
|
||||||
|
assertThat(expectedQuery, equalTo(query));
|
||||||
|
|
||||||
|
queryStringQueryBuilder =
|
||||||
|
new QueryStringQueryBuilder("field:foo bar").field("invalid*");
|
||||||
|
query = queryStringQueryBuilder.toQuery(createShardContext());
|
||||||
|
expectedQuery = new BooleanQuery.Builder()
|
||||||
|
.add(new TermQuery(new Term("field", "foo")), Occur.SHOULD)
|
||||||
|
.add(new MatchNoDocsQuery("empty fields"), Occur.SHOULD)
|
||||||
|
.build();
|
||||||
|
assertThat(expectedQuery, equalTo(query));
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
public void testToQuerySplitOnWhitespace() throws IOException {
|
public void testToQuerySplitOnWhitespace() throws IOException {
|
||||||
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
|
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
|
||||||
// splitOnWhitespace=false
|
// splitOnWhitespace=false
|
||||||
|
|
|
@ -23,25 +23,28 @@ import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.ConstantScoreScorer;
|
import org.apache.lucene.search.ConstantScoreScorer;
|
||||||
import org.apache.lucene.search.ConstantScoreWeight;
|
import org.apache.lucene.search.ConstantScoreWeight;
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
|
import org.apache.lucene.search.Explanation;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
|
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.QueryCachingPolicy;
|
import org.apache.lucene.search.QueryCachingPolicy;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
|
import org.apache.lucene.search.ScorerSupplier;
|
||||||
import org.apache.lucene.search.Weight;
|
import org.apache.lucene.search.Weight;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.IndexModule;
|
|
||||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.indices.IndicesQueryCache;
|
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
public class IndicesQueryCacheTests extends ESTestCase {
|
public class IndicesQueryCacheTests extends ESTestCase {
|
||||||
|
|
||||||
|
@ -328,4 +331,76 @@ public class IndicesQueryCacheTests extends ESTestCase {
|
||||||
cache.close(); // this triggers some assertions
|
cache.close(); // this triggers some assertions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static class DummyWeight extends Weight {
|
||||||
|
|
||||||
|
private final Weight weight;
|
||||||
|
private boolean scorerCalled;
|
||||||
|
private boolean scorerSupplierCalled;
|
||||||
|
|
||||||
|
DummyWeight(Weight weight) {
|
||||||
|
super(weight.getQuery());
|
||||||
|
this.weight = weight;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void extractTerms(Set<Term> terms) {
|
||||||
|
weight.extractTerms(terms);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
|
||||||
|
return weight.explain(context, doc);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||||
|
scorerCalled = true;
|
||||||
|
return weight.scorer(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
|
||||||
|
scorerSupplierCalled = true;
|
||||||
|
return weight.scorerSupplier(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testDelegatesScorerSupplier() throws Exception {
|
||||||
|
Directory dir = newDirectory();
|
||||||
|
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
|
||||||
|
w.addDocument(new Document());
|
||||||
|
DirectoryReader r = DirectoryReader.open(w);
|
||||||
|
w.close();
|
||||||
|
ShardId shard = new ShardId("index", "_na_", 0);
|
||||||
|
r = ElasticsearchDirectoryReader.wrap(r, shard);
|
||||||
|
IndexSearcher s = new IndexSearcher(r);
|
||||||
|
s.setQueryCachingPolicy(new QueryCachingPolicy() {
|
||||||
|
@Override
|
||||||
|
public boolean shouldCache(Query query) throws IOException {
|
||||||
|
return false; // never cache
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public void onUse(Query query) {}
|
||||||
|
});
|
||||||
|
|
||||||
|
Settings settings = Settings.builder()
|
||||||
|
.put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10)
|
||||||
|
.put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true)
|
||||||
|
.build();
|
||||||
|
IndicesQueryCache cache = new IndicesQueryCache(settings);
|
||||||
|
s.setQueryCache(cache);
|
||||||
|
Query query = new MatchAllDocsQuery();
|
||||||
|
final DummyWeight weight = new DummyWeight(s.createNormalizedWeight(query, false));
|
||||||
|
final Weight cached = cache.doCache(weight, s.getQueryCachingPolicy());
|
||||||
|
assertNotSame(weight, cached);
|
||||||
|
assertFalse(weight.scorerCalled);
|
||||||
|
assertFalse(weight.scorerSupplierCalled);
|
||||||
|
cached.scorerSupplier(s.getIndexReader().leaves().get(0));
|
||||||
|
assertFalse(weight.scorerCalled);
|
||||||
|
assertTrue(weight.scorerSupplierCalled);
|
||||||
|
IOUtils.close(r, dir);
|
||||||
|
cache.onClose(shard);
|
||||||
|
cache.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,13 +32,12 @@ public class InternalStatsTests extends InternalAggregationTestCase<InternalStat
|
||||||
@Override
|
@Override
|
||||||
protected InternalStats createTestInstance(String name, List<PipelineAggregator> pipelineAggregators,
|
protected InternalStats createTestInstance(String name, List<PipelineAggregator> pipelineAggregators,
|
||||||
Map<String, Object> metaData) {
|
Map<String, Object> metaData) {
|
||||||
long count = randomIntBetween(1, 50);
|
long count = frequently() ? randomIntBetween(1, Integer.MAX_VALUE) : 0;
|
||||||
double[] minMax = new double[2];
|
double min = randomDoubleBetween(-1000000, 1000000, true);
|
||||||
minMax[0] = randomDouble();
|
double max = randomDoubleBetween(-1000000, 1000000, true);
|
||||||
minMax[0] = randomDouble();
|
double sum = randomDoubleBetween(-1000000, 1000000, true);
|
||||||
double sum = randomDoubleBetween(0, 100, true);
|
DocValueFormat format = randomNumericDocValueFormat();
|
||||||
return new InternalStats(name, count, sum, minMax[0], minMax[1], DocValueFormat.RAW,
|
return new InternalStats(name, count, sum, min, max, format, pipelineAggregators, Collections.emptyMap());
|
||||||
pipelineAggregators, Collections.emptyMap());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -58,7 +57,7 @@ public class InternalStatsTests extends InternalAggregationTestCase<InternalStat
|
||||||
expectedSum += stats.getSum();
|
expectedSum += stats.getSum();
|
||||||
}
|
}
|
||||||
assertEquals(expectedCount, reduced.getCount());
|
assertEquals(expectedCount, reduced.getCount());
|
||||||
assertEquals(expectedSum, reduced.getSum(), 1e-10);
|
assertEquals(expectedSum, reduced.getSum(), 1e-7);
|
||||||
assertEquals(expectedMin, reduced.getMin(), 0d);
|
assertEquals(expectedMin, reduced.getMin(), 0d);
|
||||||
assertEquals(expectedMax, reduced.getMax(), 0d);
|
assertEquals(expectedMax, reduced.getMax(), 0d);
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,12 +48,6 @@ GET my_index/_search
|
||||||
<2> The `name.length` field is a `token_count` <<multi-fields,multi-field>> which will index the number of tokens in the `name` field.
|
<2> The `name.length` field is a `token_count` <<multi-fields,multi-field>> which will index the number of tokens in the `name` field.
|
||||||
<3> This query matches only the document containing `Rachel Alice Williams`, as it contains three tokens.
|
<3> This query matches only the document containing `Rachel Alice Williams`, as it contains three tokens.
|
||||||
|
|
||||||
[NOTE]
|
|
||||||
===================================================================
|
|
||||||
Technically the `token_count` type sums position increments rather than
|
|
||||||
counting tokens. This means that even if the analyzer filters out stop
|
|
||||||
words they are included in the count.
|
|
||||||
===================================================================
|
|
||||||
|
|
||||||
[[token-count-params]]
|
[[token-count-params]]
|
||||||
==== Parameters for `token_count` fields
|
==== Parameters for `token_count` fields
|
||||||
|
@ -68,6 +62,12 @@ The following parameters are accepted by `token_count` fields:
|
||||||
value. Required. For best performance, use an analyzer without token
|
value. Required. For best performance, use an analyzer without token
|
||||||
filters.
|
filters.
|
||||||
|
|
||||||
|
`enable_position_increments`::
|
||||||
|
|
||||||
|
Indicates if position increments should be counted.
|
||||||
|
Set to `false` if you don't want to count tokens removed by analyzer filters (like <<analysis-stop-tokenfilter,`stop`>>).
|
||||||
|
Defaults to `true`.
|
||||||
|
|
||||||
<<mapping-boost,`boost`>>::
|
<<mapping-boost,`boost`>>::
|
||||||
|
|
||||||
Mapping field-level query time boosting. Accepts a floating point number, defaults
|
Mapping field-level query time boosting. Accepts a floating point number, defaults
|
||||||
|
|
|
@ -1338,9 +1338,6 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
||||||
indexRandom(forceRefresh, dummyDocuments, Arrays.asList(builders));
|
indexRandom(forceRefresh, dummyDocuments, Arrays.asList(builders));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private static final String RANDOM_BOGUS_TYPE = "RANDOM_BOGUS_TYPE______";
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
|
* Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
|
||||||
* indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document
|
* indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document
|
||||||
|
@ -1388,31 +1385,33 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
||||||
* @param builders the documents to index.
|
* @param builders the documents to index.
|
||||||
*/
|
*/
|
||||||
public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
|
public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
|
||||||
|
|
||||||
Random random = random();
|
Random random = random();
|
||||||
Set<String> indicesSet = new HashSet<>();
|
Map<String, Set<String>> indicesAndTypes = new HashMap<>();
|
||||||
for (IndexRequestBuilder builder : builders) {
|
for (IndexRequestBuilder builder : builders) {
|
||||||
indicesSet.add(builder.request().index());
|
final Set<String> types = indicesAndTypes.computeIfAbsent(builder.request().index(), index -> new HashSet<>());
|
||||||
|
types.add(builder.request().type());
|
||||||
}
|
}
|
||||||
Set<Tuple<String, String>> bogusIds = new HashSet<>();
|
Set<List<String>> bogusIds = new HashSet<>(); // (index, type, id)
|
||||||
if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) {
|
if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) {
|
||||||
builders = new ArrayList<>(builders);
|
builders = new ArrayList<>(builders);
|
||||||
final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
|
|
||||||
// inject some bogus docs
|
// inject some bogus docs
|
||||||
final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2);
|
final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2);
|
||||||
final int unicodeLen = between(1, 10);
|
final int unicodeLen = between(1, 10);
|
||||||
for (int i = 0; i < numBogusDocs; i++) {
|
for (int i = 0; i < numBogusDocs; i++) {
|
||||||
String id = randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet());
|
String id = "bogus_doc_" + randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet());
|
||||||
String index = RandomPicks.randomFrom(random, indices);
|
Map.Entry<String, Set<String>> indexAndTypes = RandomPicks.randomFrom(random, indicesAndTypes.entrySet());
|
||||||
bogusIds.add(new Tuple<>(index, id));
|
String index = indexAndTypes.getKey();
|
||||||
builders.add(client().prepareIndex(index, RANDOM_BOGUS_TYPE, id).setSource("{}", XContentType.JSON));
|
String type = RandomPicks.randomFrom(random, indexAndTypes.getValue());
|
||||||
|
bogusIds.add(Arrays.asList(index, type, id));
|
||||||
|
// We configure a routing key in case the mapping requires it
|
||||||
|
builders.add(client().prepareIndex(index, type, id).setSource("{}", XContentType.JSON).setRouting(id));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
|
|
||||||
Collections.shuffle(builders, random());
|
Collections.shuffle(builders, random());
|
||||||
final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Exception>> errors = new CopyOnWriteArrayList<>();
|
final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Exception>> errors = new CopyOnWriteArrayList<>();
|
||||||
List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>();
|
List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>();
|
||||||
// If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk.
|
// If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk.
|
||||||
|
final String[] indices = indicesAndTypes.keySet().toArray(new String[0]);
|
||||||
if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) {
|
if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) {
|
||||||
if (frequently()) {
|
if (frequently()) {
|
||||||
logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false);
|
logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false);
|
||||||
|
@ -1454,10 +1453,10 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
||||||
assertThat(actualErrors, emptyIterable());
|
assertThat(actualErrors, emptyIterable());
|
||||||
if (!bogusIds.isEmpty()) {
|
if (!bogusIds.isEmpty()) {
|
||||||
// delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs!
|
// delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs!
|
||||||
for (Tuple<String, String> doc : bogusIds) {
|
for (List<String> doc : bogusIds) {
|
||||||
assertEquals("failed to delete a dummy doc [" + doc.v1() + "][" + doc.v2() + "]",
|
assertEquals("failed to delete a dummy doc [" + doc.get(0) + "][" + doc.get(2) + "]",
|
||||||
DocWriteResponse.Result.DELETED,
|
DocWriteResponse.Result.DELETED,
|
||||||
client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get().getResult());
|
client().prepareDelete(doc.get(0), doc.get(1), doc.get(2)).setRouting(doc.get(2)).get().getResult());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (forceRefresh) {
|
if (forceRefresh) {
|
||||||
|
|
Loading…
Reference in New Issue