Mappings: Remove `index_analyzer` setting to simplify analyzer logic
The `analyzer` setting is now the base setting, and `search_analyzer` is simply an override of the search time analyzer. When setting `search_analyzer`, `analyzer` must be set. closes #9371
This commit is contained in:
parent
cc461a837f
commit
afcedb94ed
|
@ -144,6 +144,16 @@ def generate_index(client, version):
|
|||
# backcompat test for legacy type level analyzer settings, see #8874
|
||||
mappings['analyzer_type1'] = {
|
||||
'analyzer': 'standard',
|
||||
'properties': {
|
||||
'string_with_index_analyzer': {
|
||||
'type': 'string',
|
||||
'index_analyzer': 'standard'
|
||||
},
|
||||
'completion_with_index_analyzer': {
|
||||
'type': 'completion',
|
||||
'index_analyzer': 'standard'
|
||||
}
|
||||
}
|
||||
}
|
||||
mappings['analyzer_type2'] = {
|
||||
'index_analyzer': 'standard',
|
||||
|
|
|
@ -111,12 +111,12 @@ curl -s -XPUT 'http://localhost:9200/twitter/' -d '{
|
|||
"type": "string",
|
||||
"term_vector": "with_positions_offsets_payloads",
|
||||
"store" : true,
|
||||
"index_analyzer" : "fulltext_analyzer"
|
||||
"analyzer" : "fulltext_analyzer"
|
||||
},
|
||||
"fullname": {
|
||||
"type": "string",
|
||||
"term_vector": "with_positions_offsets_payloads",
|
||||
"index_analyzer" : "fulltext_analyzer"
|
||||
"analyzer" : "fulltext_analyzer"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ Here is a sample mapping:
|
|||
--------------------------------------------------
|
||||
|
||||
The `_all` fields allows for `store`, `term_vector` and `analyzer` (with
|
||||
specific `index_analyzer` and `search_analyzer`) to be set.
|
||||
specific `analyzer` and `search_analyzer`) to be set.
|
||||
|
||||
[float]
|
||||
[[highlighting]]
|
||||
|
|
|
@ -130,14 +130,11 @@ is also possible to set it to `offsets` (doc numbers, term
|
|||
frequencies, positions and offsets).
|
||||
|
||||
|`analyzer` |The analyzer used to analyze the text contents when
|
||||
`analyzed` during indexing and when searching using a query string.
|
||||
`analyzed` during indexing and searching.
|
||||
Defaults to the globally configured analyzer.
|
||||
|
||||
|`index_analyzer` |The analyzer used to analyze the text contents when
|
||||
`analyzed` during indexing.
|
||||
|
||||
|`search_analyzer` |The analyzer used to analyze the field when part of
|
||||
a query string. Can be updated on an existing field.
|
||||
|`search_analyzer` |The analyzer used to analyze the field when searching, which
|
||||
overrides the value of `analyzer`. Can be updated on an existing field.
|
||||
|
||||
|`include_in_all` |Should the field be included in the `_all` field (if
|
||||
enabled). If `index` is set to `no` this defaults to `false`, otherwise,
|
||||
|
@ -627,10 +624,10 @@ the tokens aren't copied.
|
|||
[float]
|
||||
==== Updating a field
|
||||
|
||||
In the essence a field can't be updated. However multi fields can be
|
||||
In essence a field cannot be updated. However multi fields can be
|
||||
added to existing fields. This allows for example to have a different
|
||||
`index_analyzer` configuration in addition to the already configured
|
||||
`index_analyzer` configuration specified in the main and other multi fields.
|
||||
`analyzer` configuration in addition to the already configured
|
||||
`analyzer` configuration specified in the main and other multi fields.
|
||||
|
||||
Also the new multi field will only be applied on document that have been
|
||||
added after the multi field has been added and in fact the new multi field
|
||||
|
|
|
@ -27,16 +27,15 @@ explicitly set analyzers on their own. Here is an example:
|
|||
--------------------------------------------------
|
||||
{
|
||||
"tweet" : {
|
||||
"index_analyzer" : "standard",
|
||||
"search_analyzer" : "standard"
|
||||
"analyzer" : "standard",
|
||||
"search_analyzer" : "standard_with_synonyms"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
The above simply explicitly defines both the `index_analyzer` and
|
||||
`search_analyzer` that will be used. There is also an option to use the
|
||||
`analyzer` attribute to set both the `search_analyzer` and
|
||||
`index_analyzer`.
|
||||
The above simply explicitly defines both the `analyzer` and
|
||||
`search_analyzer` that will be used. If `search_analyzer` is not specified,
|
||||
it defaults to the value of `analyzer`.
|
||||
|
||||
[float]
|
||||
==== dynamic_date_formats
|
||||
|
|
|
@ -36,7 +36,7 @@ curl -X PUT localhost:9200/music/song/_mapping -d '{
|
|||
"properties" : {
|
||||
"name" : { "type" : "string" },
|
||||
"suggest" : { "type" : "completion",
|
||||
"index_analyzer" : "simple",
|
||||
"analyzer" : "simple",
|
||||
"search_analyzer" : "simple",
|
||||
"payloads" : true
|
||||
}
|
||||
|
@ -47,16 +47,15 @@ curl -X PUT localhost:9200/music/song/_mapping -d '{
|
|||
|
||||
Mapping supports the following parameters:
|
||||
|
||||
`index_analyzer`::
|
||||
`analyzer`::
|
||||
The index analyzer to use, defaults to `simple`.
|
||||
|
||||
`search_analyzer`::
|
||||
The search analyzer to use, defaults to `simple`.
|
||||
In case you are wondering why we did not opt for the `standard`
|
||||
analyzer: We try to have easy to understand behaviour here, and if you
|
||||
index the field content `At the Drive-in`, you will not get any
|
||||
suggestions for `a`, nor for `d` (the first non stopword).
|
||||
|
||||
`search_analyzer`::
|
||||
The search analyzer to use, defaults to value of `analyzer`.
|
||||
|
||||
`payloads`::
|
||||
Enables the storing of payloads, defaults to `false`
|
||||
|
|
|
@ -305,18 +305,13 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
|
|||
this.fieldType = fieldType;
|
||||
this.fieldType.freeze();
|
||||
|
||||
// automatically set to keyword analyzer if its indexed and not analyzed
|
||||
if (indexAnalyzer == null && !this.fieldType.tokenized() && this.fieldType.indexOptions() != IndexOptions.NONE) {
|
||||
this.indexAnalyzer = Lucene.KEYWORD_ANALYZER;
|
||||
if (indexAnalyzer == null && this.fieldType.tokenized() == false && this.fieldType.indexOptions() != IndexOptions.NONE) {
|
||||
this.indexAnalyzer = this.searchAnalyzer = Lucene.KEYWORD_ANALYZER;
|
||||
} else {
|
||||
this.indexAnalyzer = indexAnalyzer;
|
||||
}
|
||||
// automatically set to keyword analyzer if its indexed and not analyzed
|
||||
if (searchAnalyzer == null && !this.fieldType.tokenized() && this.fieldType.indexOptions() != IndexOptions.NONE) {
|
||||
this.searchAnalyzer = Lucene.KEYWORD_ANALYZER;
|
||||
} else {
|
||||
this.searchAnalyzer = searchAnalyzer;
|
||||
}
|
||||
|
||||
if (postingsFormat == null) {
|
||||
if (defaultPostingFormat() != null) {
|
||||
postingsFormat = PostingFormats.getAsProvider(defaultPostingFormat());
|
||||
|
@ -623,12 +618,12 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
|
|||
// null and "default"-named index analyzers both mean the default is used
|
||||
if (this.indexAnalyzer == null || "default".equals(this.indexAnalyzer.name())) {
|
||||
if (fieldMergeWith.indexAnalyzer != null && !"default".equals(fieldMergeWith.indexAnalyzer.name())) {
|
||||
mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_analyzer");
|
||||
mergeContext.addConflict("mapper [" + names.fullName() + "] has different analyzer");
|
||||
}
|
||||
} else if (fieldMergeWith.indexAnalyzer == null || "default".equals(fieldMergeWith.indexAnalyzer.name())) {
|
||||
mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_analyzer");
|
||||
mergeContext.addConflict("mapper [" + names.fullName() + "] has different analyzer");
|
||||
} else if (!this.indexAnalyzer.name().equals(fieldMergeWith.indexAnalyzer.name())) {
|
||||
mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_analyzer");
|
||||
mergeContext.addConflict("mapper [" + names.fullName() + "] has different analyzer");
|
||||
}
|
||||
|
||||
if (!this.names().equals(fieldMergeWith.names())) {
|
||||
|
@ -733,34 +728,7 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
|
|||
builder.field("index_options", indexOptionToString(fieldType.indexOptions()));
|
||||
}
|
||||
|
||||
if (indexAnalyzer == null && searchAnalyzer == null) {
|
||||
if (includeDefaults) {
|
||||
builder.field("analyzer", "default");
|
||||
}
|
||||
} else if (indexAnalyzer == null) {
|
||||
// searchAnalyzer != null
|
||||
if (includeDefaults || (!searchAnalyzer.name().startsWith("_") && !searchAnalyzer.name().equals("default"))) {
|
||||
builder.field("search_analyzer", searchAnalyzer.name());
|
||||
}
|
||||
} else if (searchAnalyzer == null) {
|
||||
// indexAnalyzer != null
|
||||
if (includeDefaults || (!indexAnalyzer.name().startsWith("_") && !indexAnalyzer.name().equals("default"))) {
|
||||
builder.field("index_analyzer", indexAnalyzer.name());
|
||||
}
|
||||
} else if (indexAnalyzer.name().equals(searchAnalyzer.name())) {
|
||||
// indexAnalyzer == searchAnalyzer
|
||||
if (includeDefaults || (!indexAnalyzer.name().startsWith("_") && !indexAnalyzer.name().equals("default"))) {
|
||||
builder.field("analyzer", indexAnalyzer.name());
|
||||
}
|
||||
} else {
|
||||
// both are there but different
|
||||
if (includeDefaults || (!indexAnalyzer.name().startsWith("_") && !indexAnalyzer.name().equals("default"))) {
|
||||
builder.field("index_analyzer", indexAnalyzer.name());
|
||||
}
|
||||
if (includeDefaults || (!searchAnalyzer.name().startsWith("_") && !searchAnalyzer.name().equals("default"))) {
|
||||
builder.field("search_analyzer", searchAnalyzer.name());
|
||||
}
|
||||
}
|
||||
doXContentAnalyzers(builder, includeDefaults);
|
||||
|
||||
if (postingsFormat != null) {
|
||||
if (includeDefaults || !postingsFormat.name().equals(defaultPostingFormat())) {
|
||||
|
@ -804,6 +772,19 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {
|
|||
}
|
||||
}
|
||||
|
||||
protected void doXContentAnalyzers(XContentBuilder builder, boolean includeDefaults) throws IOException {
|
||||
if (indexAnalyzer == null) {
|
||||
if (includeDefaults) {
|
||||
builder.field("analyzer", "default");
|
||||
}
|
||||
} else if (includeDefaults || indexAnalyzer.name().startsWith("_") == false && indexAnalyzer.name().equals("default") == false) {
|
||||
builder.field("analyzer", indexAnalyzer.name());
|
||||
if (searchAnalyzer.name().equals(indexAnalyzer.name()) == false) {
|
||||
builder.field("search_analyzer", searchAnalyzer.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected static String indexOptionToString(IndexOptions indexOption) {
|
||||
switch (indexOption) {
|
||||
case DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS:
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -87,7 +88,6 @@ public class CompletionFieldMapper extends AbstractFieldMapper<String> {
|
|||
public static class Fields {
|
||||
// Mapping field names
|
||||
public static final String ANALYZER = "analyzer";
|
||||
public static final ParseField INDEX_ANALYZER = new ParseField("index_analyzer");
|
||||
public static final ParseField SEARCH_ANALYZER = new ParseField("search_analyzer");
|
||||
public static final ParseField PRESERVE_SEPARATORS = new ParseField("preserve_separators");
|
||||
public static final ParseField PRESERVE_POSITION_INCREMENTS = new ParseField("preserve_position_increments");
|
||||
|
@ -159,6 +159,8 @@ public class CompletionFieldMapper extends AbstractFieldMapper<String> {
|
|||
@Override
|
||||
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
CompletionFieldMapper.Builder builder = completionField(name);
|
||||
NamedAnalyzer indexAnalyzer = null;
|
||||
NamedAnalyzer searchAnalyzer = null;
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String fieldName = entry.getKey();
|
||||
|
@ -166,16 +168,13 @@ public class CompletionFieldMapper extends AbstractFieldMapper<String> {
|
|||
if (fieldName.equals("type")) {
|
||||
continue;
|
||||
}
|
||||
if (fieldName.equals("analyzer")) {
|
||||
NamedAnalyzer analyzer = getNamedAnalyzer(parserContext, fieldNode.toString());
|
||||
builder.indexAnalyzer(analyzer);
|
||||
builder.searchAnalyzer(analyzer);
|
||||
iterator.remove();
|
||||
} else if (Fields.INDEX_ANALYZER.match(fieldName)) {
|
||||
builder.indexAnalyzer(getNamedAnalyzer(parserContext, fieldNode.toString()));
|
||||
if (Fields.ANALYZER.equals(fieldName) || // index_analyzer is for backcompat, remove for v3.0
|
||||
fieldName.equals("index_analyzer") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) {
|
||||
|
||||
indexAnalyzer = getNamedAnalyzer(parserContext, fieldNode.toString());
|
||||
iterator.remove();
|
||||
} else if (Fields.SEARCH_ANALYZER.match(fieldName)) {
|
||||
builder.searchAnalyzer(getNamedAnalyzer(parserContext, fieldNode.toString()));
|
||||
searchAnalyzer = getNamedAnalyzer(parserContext, fieldNode.toString());
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals(Fields.PAYLOADS)) {
|
||||
builder.payloads(Boolean.parseBoolean(fieldNode.toString()));
|
||||
|
@ -199,13 +198,17 @@ public class CompletionFieldMapper extends AbstractFieldMapper<String> {
|
|||
}
|
||||
}
|
||||
|
||||
if (builder.searchAnalyzer == null) {
|
||||
builder.searchAnalyzer(parserContext.analysisService().analyzer("simple"));
|
||||
if (indexAnalyzer == null) {
|
||||
if (searchAnalyzer != null) {
|
||||
throw new MapperParsingException("analyzer on completion field [" + name + "] must be set when search_analyzer is set");
|
||||
}
|
||||
indexAnalyzer = searchAnalyzer = parserContext.analysisService().analyzer("simple");
|
||||
} else if (searchAnalyzer == null) {
|
||||
searchAnalyzer = indexAnalyzer;
|
||||
}
|
||||
builder.indexAnalyzer(indexAnalyzer);
|
||||
builder.searchAnalyzer(searchAnalyzer);
|
||||
|
||||
if (builder.indexAnalyzer == null) {
|
||||
builder.indexAnalyzer(parserContext.analysisService().analyzer("simple"));
|
||||
}
|
||||
// we are just using this as the default to be wrapped by the CompletionPostingsFormatProvider in the SuggesteFieldMapper ctor
|
||||
builder.postingsFormat(parserContext.postingFormatService().get("default"));
|
||||
return builder;
|
||||
|
@ -444,11 +447,10 @@ public class CompletionFieldMapper extends AbstractFieldMapper<String> {
|
|||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(name())
|
||||
.field(Fields.TYPE, CONTENT_TYPE);
|
||||
if (indexAnalyzer.name().equals(searchAnalyzer.name())) {
|
||||
|
||||
builder.field(Fields.ANALYZER, indexAnalyzer.name());
|
||||
} else {
|
||||
builder.field(Fields.INDEX_ANALYZER.getPreferredName(), indexAnalyzer.name())
|
||||
.field(Fields.SEARCH_ANALYZER.getPreferredName(), searchAnalyzer.name());
|
||||
if (indexAnalyzer.name().equals(searchAnalyzer.name()) == false) {
|
||||
builder.field(Fields.SEARCH_ANALYZER.getPreferredName(), searchAnalyzer.name());
|
||||
}
|
||||
builder.field(Fields.PAYLOADS, this.payloads);
|
||||
builder.field(Fields.PRESERVE_SEPARATORS.getPreferredName(), this.preserveSeparators);
|
||||
|
|
|
@ -182,6 +182,8 @@ public class TypeParsers {
|
|||
}
|
||||
|
||||
public static void parseField(AbstractFieldMapper.Builder builder, String name, Map<String, Object> fieldNode, Mapper.TypeParser.ParserContext parserContext) {
|
||||
NamedAnalyzer indexAnalyzer = null;
|
||||
NamedAnalyzer searchAnalyzer = null;
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
final String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
|
@ -249,27 +251,21 @@ public class TypeParsers {
|
|||
} else if (propName.equals("index_options")) {
|
||||
builder.indexOptions(nodeIndexOptionValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("analyzer")) {
|
||||
} else if (propName.equals("analyzer") || // for backcompat, reading old indexes, remove for v3.0
|
||||
propName.equals("index_analyzer") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) {
|
||||
|
||||
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
|
||||
if (analyzer == null) {
|
||||
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
|
||||
}
|
||||
builder.indexAnalyzer(analyzer);
|
||||
builder.searchAnalyzer(analyzer);
|
||||
iterator.remove();
|
||||
} else if (propName.equals("index_analyzer")) {
|
||||
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
|
||||
if (analyzer == null) {
|
||||
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
|
||||
}
|
||||
builder.indexAnalyzer(analyzer);
|
||||
indexAnalyzer = analyzer;
|
||||
iterator.remove();
|
||||
} else if (propName.equals("search_analyzer")) {
|
||||
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
|
||||
if (analyzer == null) {
|
||||
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
|
||||
}
|
||||
builder.searchAnalyzer(analyzer);
|
||||
searchAnalyzer = analyzer;
|
||||
iterator.remove();
|
||||
} else if (propName.equals("include_in_all")) {
|
||||
builder.includeInAll(nodeBooleanValue(propNode));
|
||||
|
@ -294,6 +290,16 @@ public class TypeParsers {
|
|||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
||||
if (indexAnalyzer == null) {
|
||||
if (searchAnalyzer != null) {
|
||||
throw new MapperParsingException("analyzer on field [" + name + "] must be set when search_analyzer is set");
|
||||
}
|
||||
} else if (searchAnalyzer == null) {
|
||||
searchAnalyzer = indexAnalyzer;
|
||||
}
|
||||
builder.indexAnalyzer(indexAnalyzer);
|
||||
builder.searchAnalyzer(searchAnalyzer);
|
||||
}
|
||||
|
||||
public static boolean parseMultiField(AbstractFieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) {
|
||||
|
|
|
@ -305,35 +305,7 @@ public class AllFieldMapper extends AbstractFieldMapper<String> implements Inter
|
|||
builder.field("omit_norms", fieldType.omitNorms());
|
||||
}
|
||||
|
||||
|
||||
if (indexAnalyzer == null && searchAnalyzer == null) {
|
||||
if (includeDefaults) {
|
||||
builder.field("analyzer", "default");
|
||||
}
|
||||
} else if (indexAnalyzer == null) {
|
||||
// searchAnalyzer != null
|
||||
if (includeDefaults || !searchAnalyzer.name().startsWith("_")) {
|
||||
builder.field("search_analyzer", searchAnalyzer.name());
|
||||
}
|
||||
} else if (searchAnalyzer == null) {
|
||||
// indexAnalyzer != null
|
||||
if (includeDefaults || !indexAnalyzer.name().startsWith("_")) {
|
||||
builder.field("index_analyzer", indexAnalyzer.name());
|
||||
}
|
||||
} else if (indexAnalyzer.name().equals(searchAnalyzer.name())) {
|
||||
// indexAnalyzer == searchAnalyzer
|
||||
if (includeDefaults || !indexAnalyzer.name().startsWith("_")) {
|
||||
builder.field("analyzer", indexAnalyzer.name());
|
||||
}
|
||||
} else {
|
||||
// both are there but different
|
||||
if (includeDefaults || !indexAnalyzer.name().startsWith("_")) {
|
||||
builder.field("index_analyzer", indexAnalyzer.name());
|
||||
}
|
||||
if (includeDefaults || !searchAnalyzer.name().startsWith("_")) {
|
||||
builder.field("search_analyzer", searchAnalyzer.name());
|
||||
}
|
||||
}
|
||||
doXContentAnalyzers(builder, includeDefaults);
|
||||
|
||||
if (similarity() != null) {
|
||||
builder.field("similarity", similarity().name());
|
||||
|
|
|
@ -57,7 +57,7 @@ public class CompletionFieldMapperTests extends ElasticsearchSingleNodeTest {
|
|||
String mapping = jsonBuilder().startObject().startObject("type1")
|
||||
.startObject("properties").startObject("completion")
|
||||
.field("type", "completion")
|
||||
.field("index_analyzer", "simple")
|
||||
.field("analyzer", "simple")
|
||||
.field("search_analyzer", "standard")
|
||||
.field("payloads", true)
|
||||
.field("preserve_separators", false)
|
||||
|
@ -78,7 +78,7 @@ public class CompletionFieldMapperTests extends ElasticsearchSingleNodeTest {
|
|||
builder.close();
|
||||
Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
|
||||
Map<String, Object> configMap = (Map<String, Object>) serializedMap.get("completion");
|
||||
assertThat(configMap.get("index_analyzer").toString(), is("simple"));
|
||||
assertThat(configMap.get("analyzer").toString(), is("simple"));
|
||||
assertThat(configMap.get("search_analyzer").toString(), is("standard"));
|
||||
assertThat(Boolean.valueOf(configMap.get("payloads").toString()), is(true));
|
||||
assertThat(Boolean.valueOf(configMap.get("preserve_separators").toString()), is(false));
|
||||
|
@ -91,7 +91,7 @@ public class CompletionFieldMapperTests extends ElasticsearchSingleNodeTest {
|
|||
String mapping = jsonBuilder().startObject().startObject("type1")
|
||||
.startObject("properties").startObject("completion")
|
||||
.field("type", "completion")
|
||||
.field("index_analyzer", "simple")
|
||||
.field("analyzer", "simple")
|
||||
.field("search_analyzer", "simple")
|
||||
.endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
|
|
@ -106,10 +106,10 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest {
|
|||
public void testMergeSearchAnalyzer() throws Exception {
|
||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||
String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "string").field("search_analyzer", "whitespace").endObject().endObject()
|
||||
.startObject("properties").startObject("field").field("type", "string").field("analyzer", "standard").field("search_analyzer", "whitespace").endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "string").field("search_analyzer", "keyword").endObject().endObject()
|
||||
.startObject("properties").startObject("field").field("type", "string").field("analyzer", "standard").field("search_analyzer", "keyword").endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper existing = parser.parse(mapping1);
|
||||
|
@ -123,13 +123,13 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testNotChangeSearchAnalyzer() throws Exception {
|
||||
public void testChangeSearchAnalyzerToDefault() throws Exception {
|
||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||
String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "string").field("search_analyzer", "whitespace").endObject().endObject()
|
||||
.startObject("properties").startObject("field").field("type", "string").field("analyzer", "standard").field("search_analyzer", "whitespace").endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "string").field("postings_format", "Lucene41").endObject().endObject()
|
||||
.startObject("properties").startObject("field").field("type", "string").field("analyzer", "standard").field("postings_format", "Lucene41").endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper existing = parser.parse(mapping1);
|
||||
|
@ -139,7 +139,7 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest {
|
|||
DocumentMapper.MergeResult mergeResult = existing.merge(changed, mergeFlags().simulate(false));
|
||||
|
||||
assertThat(mergeResult.hasConflicts(), equalTo(false));
|
||||
assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("whitespace"));
|
||||
assertThat(((NamedAnalyzer) existing.mappers().name("field").mapper().searchAnalyzer()).name(), equalTo("standard"));
|
||||
assertThat((existing.mappers().name("field").mapper().postingsFormatProvider()).name(), equalTo("Lucene41"));
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ public class UpdateMappingOnClusterTests extends ElasticsearchIntegrationTest {
|
|||
"[_all] has different store_term_vector_offsets values",
|
||||
"[_all] has different store_term_vector_positions values",
|
||||
"[_all] has different store_term_vector_payloads values",
|
||||
"[_all] has different index_analyzer",
|
||||
"[_all] has different analyzer",
|
||||
"[_all] has different similarity"};
|
||||
// auto_boost and fielddata and search_analyzer should not report conflict
|
||||
testConflict(mapping, mappingUpdate, errorMessage);
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
"store_term_vector_positions": true,
|
||||
"store_term_vector_payloads": true,
|
||||
"omit_norms": true,
|
||||
"index_analyzer": "standard",
|
||||
"analyzer": "standard",
|
||||
"search_analyzer": "whitespace",
|
||||
"similarity": "my_similarity",
|
||||
"fielddata": {
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
"store_term_vector_positions": false,
|
||||
"store_term_vector_payloads": false,
|
||||
"omit_norms": false,
|
||||
"index_analyzer": "whitespace",
|
||||
"analyzer": "whitespace",
|
||||
"search_analyzer": "standard",
|
||||
"similarity": "bm25",
|
||||
"fielddata": {
|
||||
|
|
|
@ -91,7 +91,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest {
|
|||
.startObject("fields")
|
||||
.startObject("autocomplete")
|
||||
.field("type", "string")
|
||||
.field("index_analyzer", "autocomplete")
|
||||
.field("analyzer", "autocomplete")
|
||||
.field("search_analyzer", "search_autocomplete")
|
||||
.field("term_vector", "with_positions_offsets")
|
||||
.endObject()
|
||||
|
@ -143,7 +143,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest {
|
|||
* query. We cut off and extract terms if there are more than 16 terms in the query
|
||||
*/
|
||||
assertAcked(prepareCreate("test")
|
||||
.addMapping("test", "body", "type=string,index_analyzer=custom_analyzer,search_analyzer=custom_analyzer,term_vector=with_positions_offsets")
|
||||
.addMapping("test", "body", "type=string,analyzer=custom_analyzer,search_analyzer=custom_analyzer,term_vector=with_positions_offsets")
|
||||
.setSettings(
|
||||
settingsBuilder().put(indexSettings())
|
||||
.put("analysis.filter.wordDelimiter.type", "word_delimiter")
|
||||
|
@ -173,8 +173,8 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
assertAcked(prepareCreate("test")
|
||||
.addMapping("test",
|
||||
"name", "type=string,index_analyzer=name_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets",
|
||||
"name2", "type=string,index_analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets")
|
||||
"name", "type=string,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets",
|
||||
"name2", "type=string,analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets")
|
||||
.setSettings(settingsBuilder()
|
||||
.put(indexSettings())
|
||||
.put("analysis.filter.my_ngram.max_gram", 20)
|
||||
|
@ -240,8 +240,8 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest {
|
|||
public void testNgramHighlighting() throws ElasticsearchException, IOException {
|
||||
assertAcked(prepareCreate("test")
|
||||
.addMapping("test",
|
||||
"name", "type=string,index_analyzer=name_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets",
|
||||
"name2", "type=string,index_analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets")
|
||||
"name", "type=string,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets",
|
||||
"name2", "type=string,analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets")
|
||||
.setSettings(settingsBuilder()
|
||||
.put(indexSettings())
|
||||
.put("analysis.filter.my_ngram.max_gram", 20)
|
||||
|
|
|
@ -1665,7 +1665,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest {
|
|||
.putArray("index.analysis.analyzer.search.filter", "lowercase", "synonym")
|
||||
.put("index.analysis.filter.synonym.type", "synonym")
|
||||
.putArray("index.analysis.filter.synonym.synonyms", "fast, quick"));
|
||||
assertAcked(builder.addMapping("test", "text", "type=string,index_analyzer=index,search_analyzer=search"));
|
||||
assertAcked(builder.addMapping("test", "text", "type=string,analyzer=index,search_analyzer=search"));
|
||||
ensureGreen();
|
||||
client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get();
|
||||
refresh();
|
||||
|
@ -1696,7 +1696,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest {
|
|||
.putArray("index.analysis.analyzer.search.filter", "lowercase", "keyword_repeat", "porterStem", "unique_stem")
|
||||
.put("index.analysis.filter.unique_stem.type", "unique")
|
||||
.put("index.analysis.filter.unique_stem.only_on_same_position", true));
|
||||
assertAcked(builder.addMapping("test", "text", "type=string,index_analyzer=index,search_analyzer=search"));
|
||||
assertAcked(builder.addMapping("test", "text", "type=string,analyzer=index,search_analyzer=search"));
|
||||
ensureGreen();
|
||||
client().prepareIndex("test", "test", "1").setSource("text", "the fox runs across the street").get();
|
||||
refresh();
|
||||
|
@ -1721,7 +1721,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest {
|
|||
.putArray("index.analysis.analyzer.search.filter", "lowercase", "synonym")
|
||||
.put("index.analysis.filter.synonym.type", "synonym")
|
||||
.putArray("index.analysis.filter.synonym.synonyms", "fast, quick"));
|
||||
assertAcked(builder.addMapping("test", "text", "type=string,index_analyzer=index,search_analyzer=search"));
|
||||
assertAcked(builder.addMapping("test", "text", "type=string,analyzer=index,search_analyzer=search"));
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get();
|
||||
|
@ -2346,7 +2346,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest {
|
|||
.put("index.analysis.tokenizer.my_ngram_tokenizer.min_gram", "1")
|
||||
.put("index.analysis.tokenizer.my_ngram_tokenizer.max_gram", "10")
|
||||
.putArray("index.analysis.tokenizer.my_ngram_tokenizer.token_chars", new String[0]));
|
||||
assertAcked(builder.addMapping("test", "origin", "type=string,copy_to=meta", "meta", "type=string,index_analyzer=my_ngram_analyzer"));
|
||||
assertAcked(builder.addMapping("test", "origin", "type=string,copy_to=meta", "meta", "type=string,analyzer=my_ngram_analyzer"));
|
||||
// we only have ngrams as the index analyzer so searches will get standard analyzer
|
||||
ensureGreen();
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ public class QueryRescorerTests extends ElasticsearchIntegrationTest {
|
|||
builder.putArray("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
||||
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||
.startObject("field1").field("type", "string").field("index_analyzer", "whitespace").field("search_analyzer", "synonym")
|
||||
.startObject("field1").field("type", "string").field("analyzer", "whitespace").field("search_analyzer", "synonym")
|
||||
.endObject().endObject().endObject().endObject();
|
||||
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(builder.put("index.number_of_shards", 1)));
|
||||
|
@ -221,7 +221,7 @@ public class QueryRescorerTests extends ElasticsearchIntegrationTest {
|
|||
builder.putArray("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
||||
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||
.startObject("field1").field("type", "string").field("index_analyzer", "whitespace").field("search_analyzer", "synonym")
|
||||
.startObject("field1").field("type", "string").field("analyzer", "whitespace").field("search_analyzer", "synonym")
|
||||
.endObject().endObject().endObject().endObject();
|
||||
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(builder.put("index.number_of_shards", 1)));
|
||||
|
@ -292,7 +292,7 @@ public class QueryRescorerTests extends ElasticsearchIntegrationTest {
|
|||
builder.putArray("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
||||
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||
.startObject("field1").field("type", "string").field("index_analyzer", "whitespace").field("search_analyzer", "synonym")
|
||||
.startObject("field1").field("type", "string").field("analyzer", "whitespace").field("search_analyzer", "synonym")
|
||||
.endObject().endObject().endObject().endObject();
|
||||
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(builder.put("index.number_of_shards", 1)));
|
||||
|
|
|
@ -503,7 +503,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
.field("path", "just_name")
|
||||
.startObject("fields")
|
||||
.startObject(FIELD).field("type", "string").endObject()
|
||||
.startObject("suggest").field("type", "completion").field("index_analyzer", "simple").field("search_analyzer", "simple").endObject()
|
||||
.startObject("suggest").field("type", "completion").field("analyzer", "simple").endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject().endObject()
|
||||
|
@ -548,7 +548,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
.field("type", "string")
|
||||
.field("path", "just_name") // Need to specify path again, to make sure that the `path` is known when this mapping is parsed and turned into DocumentMapper that we merge with.
|
||||
.startObject("fields")
|
||||
.startObject("suggest").field("type", "completion").field("index_analyzer", "simple").field("search_analyzer", "simple").endObject()
|
||||
.startObject("suggest").field("type", "completion").field("analyzer", "simple").endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject().endObject()
|
||||
|
@ -899,7 +899,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
.startObject(TYPE).startObject("properties")
|
||||
.startObject(FIELD)
|
||||
.field("type", "completion")
|
||||
.field("index_analyzer", completionMappingBuilder.indexAnalyzer)
|
||||
.field("analyzer", completionMappingBuilder.indexAnalyzer)
|
||||
.field("search_analyzer", completionMappingBuilder.searchAnalyzer)
|
||||
.field("payloads", completionMappingBuilder.payloads)
|
||||
.field("preserve_separators", completionMappingBuilder.preserveSeparators)
|
||||
|
|
|
@ -167,8 +167,7 @@ public class ContextSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
mapping.endObject();
|
||||
mapping.startObject(FIELD);
|
||||
mapping.field("type", "completion");
|
||||
mapping.field("index_analyzer", "simple");
|
||||
mapping.field("search_analyzer", "simple");
|
||||
mapping.field("analyzer", "simple");
|
||||
|
||||
mapping.startObject("context");
|
||||
mapping.value(ContextBuilder.location("st", 5, true).field("pin").build());
|
||||
|
@ -972,7 +971,7 @@ public class ContextSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
mapping.startObject("properties");
|
||||
mapping.startObject(FIELD);
|
||||
mapping.field("type", "completion");
|
||||
mapping.field("index_analyzer", indexAnalyzer);
|
||||
mapping.field("analyzer", indexAnalyzer);
|
||||
mapping.field("search_analyzer", searchAnalyzer);
|
||||
mapping.field("payloads", payloads);
|
||||
mapping.field("preserve_separators", preserveSeparators);
|
||||
|
|
|
@ -175,7 +175,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
.endObject()
|
||||
.startObject("name_shingled")
|
||||
.field("type", "string")
|
||||
.field("index_analyzer", "biword")
|
||||
.field("analyzer", "biword")
|
||||
.field("search_analyzer", "standard")
|
||||
.endObject()
|
||||
.endObject()
|
||||
|
@ -251,7 +251,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
.endObject()
|
||||
.startObject("name_shingled")
|
||||
.field("type", "string")
|
||||
.field("index_analyzer", "biword")
|
||||
.field("analyzer", "biword")
|
||||
.field("search_analyzer", "standard")
|
||||
.endObject()
|
||||
.endObject()
|
||||
|
|
|
@ -1480,7 +1480,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
|||
assertAcked(prepareCreate("test-idx", 2, indexSettings));
|
||||
|
||||
int numberOfShards = getNumShards("test-idx").numPrimaries;
|
||||
assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("type1").setSource("field1", "type=string,search_analyzer=my_analyzer"));
|
||||
assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("type1").setSource("field1", "type=string,analyzer=standard,search_analyzer=my_analyzer"));
|
||||
final int numdocs = randomIntBetween(10, 100);
|
||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
||||
for (int i = 0; i < builders.length; i++) {
|
||||
|
|
Binary file not shown.
Binary file not shown.
Loading…
Reference in New Issue