mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-16 18:04:52 +00:00
As a result of this, we can remove a chunk of code from TypeParsers as well. Tests for search/index mode analyzers have moved into their own file. This commit also rationalises the serialization checks for parameters into a single SerializerCheck interface that takes the values includeDefaults, isConfigured and the value itself. Relates to #62988
This commit is contained in:
parent
5534a60fa0
commit
88b45dfa61
@ -38,14 +38,14 @@ Dynamic templates are specified as an array of named objects:
|
||||
<3> The mapping that the matched field should use.
|
||||
|
||||
If a provided mapping contains an invalid mapping snippet, a validation error
|
||||
is returned. Validation occurs when applying the dynamic template at index time,
|
||||
and, in most cases, when the dynamic template is updated. Providing an invalid mapping
|
||||
is returned. Validation occurs when applying the dynamic template at index time,
|
||||
and, in most cases, when the dynamic template is updated. Providing an invalid mapping
|
||||
snippet may cause the update or validation of a dynamic template to fail under certain conditions:
|
||||
|
||||
* If no `match_mapping_type` has been specified but the template is valid for at least one predefined mapping type,
|
||||
the mapping snippet is considered valid. However, a validation error is returned at index time if a field matching
|
||||
the template is indexed as a different type. For example, configuring a dynamic template with no `match_mapping_type`
|
||||
is considered valid as string type, but if a field matching the dynamic template is indexed as a long, a validation
|
||||
* If no `match_mapping_type` has been specified but the template is valid for at least one predefined mapping type,
|
||||
the mapping snippet is considered valid. However, a validation error is returned at index time if a field matching
|
||||
the template is indexed as a different type. For example, configuring a dynamic template with no `match_mapping_type`
|
||||
is considered valid as string type, but if a field matching the dynamic template is indexed as a long, a validation
|
||||
error is returned at index time.
|
||||
|
||||
* If the `{name}` placeholder is used in the mapping snippet, validation is skipped when updating the dynamic
|
||||
@ -284,6 +284,7 @@ PUT my-index-000001/_doc/1
|
||||
"count": 5 <2>
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[warning:Parameter [doc_values] has no effect on type [text] and will be removed in future]
|
||||
|
||||
<1> The `english` field is mapped as a `string` field with the `english` analyzer.
|
||||
<2> The `count` field is mapped as a `long` field with `doc_values` disabled.
|
||||
|
@ -19,6 +19,12 @@
|
||||
|
||||
package org.elasticsearch.index.mapper.annotatedtext;
|
||||
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.core.KeywordAnalyzer;
|
||||
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.analysis.en.EnglishAnalyzer;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
@ -28,113 +34,163 @@ import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsRequest;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsResponse;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalyzerScope;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.StandardTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MapperTestCase;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.termvectors.TermVectorsService;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.plugin.mapper.AnnotatedTextPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
|
||||
IndexService indexService;
|
||||
DocumentMapperParser parser;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
Settings settings = Settings.builder()
|
||||
.put("index.analysis.analyzer.my_stop_analyzer.tokenizer", "standard")
|
||||
.put("index.analysis.analyzer.my_stop_analyzer.filter", "stop")
|
||||
.build();
|
||||
indexService = createIndex("test", settings);
|
||||
parser = indexService.mapperService().documentMapperParser();
|
||||
}
|
||||
|
||||
|
||||
public class AnnotatedTextFieldMapperTests extends MapperTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
List<Class<? extends Plugin>> classpathPlugins = new ArrayList<>();
|
||||
classpathPlugins.add(AnnotatedTextPlugin.class);
|
||||
return classpathPlugins;
|
||||
protected Collection<Plugin> getPlugins() {
|
||||
return Collections.singletonList(new AnnotatedTextPlugin());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void minimalMapping(XContentBuilder b) throws IOException {
|
||||
b.field("type", "annotated_text");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeFieldValue(XContentBuilder builder) throws IOException {
|
||||
builder.value("some text");
|
||||
}
|
||||
|
||||
protected String getFieldType() {
|
||||
return "annotated_text";
|
||||
@Override
|
||||
protected void assertParseMaximalWarnings() {
|
||||
assertWarnings("Parameter [boost] on field [field] is deprecated and will be removed in 8.0");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void registerParameters(ParameterChecker checker) throws IOException {
|
||||
|
||||
checker.registerUpdateCheck(b -> {
|
||||
b.field("analyzer", "default");
|
||||
b.field("search_analyzer", "keyword");
|
||||
},
|
||||
m -> assertEquals("keyword", m.fieldType().getTextSearchInfo().getSearchAnalyzer().name()));
|
||||
checker.registerUpdateCheck(b -> {
|
||||
b.field("analyzer", "default");
|
||||
b.field("search_analyzer", "keyword");
|
||||
b.field("search_quote_analyzer", "keyword");
|
||||
},
|
||||
m -> assertEquals("keyword", m.fieldType().getTextSearchInfo().getSearchQuoteAnalyzer().name()));
|
||||
|
||||
checker.registerConflictCheck("store", b -> b.field("store", true));
|
||||
checker.registerConflictCheck("index_options", b -> b.field("index_options", "docs"));
|
||||
checker.registerConflictCheck("similarity", b -> b.field("similarity", "boolean"));
|
||||
checker.registerConflictCheck("analyzer", b -> b.field("analyzer", "keyword"));
|
||||
checker.registerConflictCheck("term_vector", b -> b.field("term_vector", "yes"));
|
||||
|
||||
checker.registerConflictCheck("position_increment_gap", b -> b.field("position_increment_gap", 10));
|
||||
|
||||
// norms can be set from true to false, but not vice versa
|
||||
checker.registerConflictCheck("norms",
|
||||
fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("norms", false);
|
||||
}),
|
||||
fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("norms", true);
|
||||
}));
|
||||
checker.registerUpdateCheck(
|
||||
b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("norms", true);
|
||||
},
|
||||
b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("norms", false);
|
||||
},
|
||||
m -> assertFalse(m.fieldType().getTextSearchInfo().hasNorms())
|
||||
);
|
||||
checker.registerUpdateCheck(b -> b.field("boost", 2.0), m -> assertEquals(m.fieldType().boost(), 2.0, 0));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexAnalyzers createIndexAnalyzers(IndexSettings indexSettings) {
|
||||
NamedAnalyzer dflt = new NamedAnalyzer(
|
||||
"default",
|
||||
AnalyzerScope.INDEX,
|
||||
new StandardAnalyzer(),
|
||||
TextFieldMapper.Defaults.POSITION_INCREMENT_GAP
|
||||
);
|
||||
NamedAnalyzer standard = new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer());
|
||||
NamedAnalyzer keyword = new NamedAnalyzer("keyword", AnalyzerScope.INDEX, new KeywordAnalyzer());
|
||||
NamedAnalyzer whitespace = new NamedAnalyzer("whitespace", AnalyzerScope.INDEX, new WhitespaceAnalyzer());
|
||||
NamedAnalyzer stop = new NamedAnalyzer(
|
||||
"my_stop_analyzer",
|
||||
AnalyzerScope.INDEX,
|
||||
new CustomAnalyzer(
|
||||
new StandardTokenizerFactory(indexSettings, null, "standard", indexSettings.getSettings()),
|
||||
new CharFilterFactory[0],
|
||||
new TokenFilterFactory[] { new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return "stop";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return new StopFilter(tokenStream, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||
}
|
||||
} }
|
||||
)
|
||||
);
|
||||
Map<String, NamedAnalyzer> analyzers = new HashMap<>();
|
||||
analyzers.put("default", dflt);
|
||||
analyzers.put("standard", standard);
|
||||
analyzers.put("keyword", keyword);
|
||||
analyzers.put("whitespace", whitespace);
|
||||
analyzers.put("my_stop_analyzer", stop);
|
||||
return new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
}
|
||||
|
||||
public void testAnnotationInjection() throws IOException {
|
||||
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = indexService.mapperService().merge("type",
|
||||
new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
|
||||
MapperService mapperService = createMapperService(fieldMapping(this::minimalMapping));
|
||||
|
||||
// Use example of typed and untyped annotations
|
||||
String annotatedText = "He paid [Stormy Daniels](Stephanie+Clifford&Payee) hush money";
|
||||
SourceToParse sourceToParse = new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", annotatedText)
|
||||
.endObject()),
|
||||
XContentType.JSON);
|
||||
ParsedDocument doc = mapper.parse(sourceToParse);
|
||||
ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.field("field", annotatedText)));
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(1, fields.length);
|
||||
|
||||
assertEquals(annotatedText, fields[0].stringValue());
|
||||
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
|
||||
sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
|
||||
shard.refresh("test");
|
||||
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
|
||||
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
|
||||
withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> {
|
||||
|
||||
LeafReader leaf = reader.leaves().get(0).reader();
|
||||
TermsEnum terms = leaf.terms("field").iterator();
|
||||
|
||||
assertTrue(terms.seekExact(new BytesRef("stormy")));
|
||||
@ -157,39 +213,23 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
postings = terms.postings(null, PostingsEnum.POSITIONS);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(4, postings.nextPosition());
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testToleranceForBadAnnotationMarkup() throws IOException {
|
||||
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = indexService.mapperService().merge("type",
|
||||
new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
|
||||
MapperService mapperService = createMapperService(fieldMapping(this::minimalMapping));
|
||||
|
||||
String annotatedText = "foo [bar](MissingEndBracket baz";
|
||||
SourceToParse sourceToParse = new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", annotatedText)
|
||||
.endObject()),
|
||||
XContentType.JSON);
|
||||
ParsedDocument doc = mapper.parse(sourceToParse);
|
||||
ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.field("field", annotatedText)));
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(1, fields.length);
|
||||
|
||||
assertEquals(annotatedText, fields[0].stringValue());
|
||||
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
|
||||
sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
|
||||
shard.refresh("test");
|
||||
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
|
||||
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
|
||||
withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> {
|
||||
LeafReader leaf = reader.leaves().get(0).reader();
|
||||
TermsEnum terms = leaf.terms("field").iterator();
|
||||
|
||||
assertTrue(terms.seekExact(new BytesRef("foo")));
|
||||
@ -206,66 +246,41 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
// Bad markup means value is treated as plain text and fed through tokenisation
|
||||
assertTrue(terms.seekExact(new BytesRef("missingendbracket")));
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testAgainstTermVectorsAPI() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("tvfield").field("type", getFieldType())
|
||||
.field("term_vector", "with_positions_offsets_payloads")
|
||||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
|
||||
public void testIndexedTermVectors() throws IOException {
|
||||
|
||||
MapperService mapperService = createMapperService(fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("term_vector", "with_positions_offsets_payloads");
|
||||
}));
|
||||
|
||||
int max = between(3, 10);
|
||||
BulkRequestBuilder bulk = client().prepareBulk();
|
||||
for (int i = 0; i < max; i++) {
|
||||
bulk.add(client().prepareIndex("test", "type", Integer.toString(i))
|
||||
.setSource("tvfield", "the quick [brown](Color) fox jumped over the lazy dog"));
|
||||
}
|
||||
bulk.get();
|
||||
|
||||
TermVectorsRequest request = new TermVectorsRequest("test", "type", "0").termStatistics(true);
|
||||
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
assertThat(shard, notNullValue());
|
||||
TermVectorsResponse response = TermVectorsService.getTermVectors(shard, request);
|
||||
assertEquals(1, response.getFields().size());
|
||||
|
||||
Terms terms = response.getFields().terms("tvfield");
|
||||
TermsEnum iterator = terms.iterator();
|
||||
BytesRef term;
|
||||
Set<String> foundTerms = new HashSet<>();
|
||||
while ((term = iterator.next()) != null) {
|
||||
foundTerms.add(term.utf8ToString());
|
||||
}
|
||||
//Check we have both text and annotation tokens
|
||||
assertTrue(foundTerms.contains("brown"));
|
||||
assertTrue(foundTerms.contains("Color"));
|
||||
assertTrue(foundTerms.contains("fox"));
|
||||
String text = "the quick [brown](Color) fox jumped over the lazy dog";
|
||||
ParsedDocument doc
|
||||
= mapperService.documentMapper().parse(source(b -> b.field("field", text)));
|
||||
|
||||
withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> {
|
||||
LeafReader leaf = reader.leaves().get(0).reader();
|
||||
Terms terms = leaf.getTermVector(0, "field");
|
||||
TermsEnum iterator = terms.iterator();
|
||||
BytesRef term;
|
||||
Set<String> foundTerms = new HashSet<>();
|
||||
while ((term = iterator.next()) != null) {
|
||||
foundTerms.add(term.utf8ToString());
|
||||
}
|
||||
//Check we have both text and annotation tokens
|
||||
assertTrue(foundTerms.contains("brown"));
|
||||
assertTrue(foundTerms.contains("Color"));
|
||||
assertTrue(foundTerms.contains("fox"));
|
||||
});
|
||||
}
|
||||
|
||||
// ===== Code below copied from TextFieldMapperTests ========
|
||||
|
||||
public void testDefaults() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "1234")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234")));
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(1, fields.length);
|
||||
@ -284,20 +299,13 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
}
|
||||
|
||||
public void testEnableStore() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", getFieldType()).field("store", true).endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("store", true);
|
||||
}));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "1234")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234")));
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(1, fields.length);
|
||||
@ -305,23 +313,13 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
}
|
||||
|
||||
public void testDisableNorms() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("norms", false)
|
||||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("norms", false);
|
||||
}));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "1234")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234")));
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(1, fields.length);
|
||||
@ -335,47 +333,23 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
supportedOptions.put("positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
|
||||
supportedOptions.put("offsets", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
|
||||
|
||||
XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties");
|
||||
for (String option : supportedOptions.keySet()) {
|
||||
mappingBuilder.startObject(option).field("type", getFieldType()).field("index_options", option).endObject();
|
||||
}
|
||||
String mapping = Strings.toString(mappingBuilder.endObject().endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
XContentBuilder jsonDoc = XContentFactory.jsonBuilder().startObject();
|
||||
for (String option : supportedOptions.keySet()) {
|
||||
jsonDoc.field(option, "1234");
|
||||
}
|
||||
ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference.bytes(jsonDoc.endObject()),
|
||||
XContentType.JSON));
|
||||
|
||||
for (Map.Entry<String, IndexOptions> entry : supportedOptions.entrySet()) {
|
||||
String field = entry.getKey();
|
||||
IndexOptions options = entry.getValue();
|
||||
IndexableField[] fields = doc.rootDoc().getFields(field);
|
||||
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("index_options", option);
|
||||
}));
|
||||
ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234")));
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(1, fields.length);
|
||||
assertEquals(options, fields[0].fieldType().indexOptions());
|
||||
assertEquals(supportedOptions.get(option), fields[0].fieldType().indexOptions());
|
||||
}
|
||||
}
|
||||
|
||||
public void testDefaultPositionIncrementGap() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", getFieldType()).endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = indexService.mapperService().merge("type",
|
||||
new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
|
||||
MapperService mapperService = createMapperService(fieldMapping(this::minimalMapping));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
SourceToParse sourceToParse = new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.array("field", new String[] {"a", "b"})
|
||||
.endObject()),
|
||||
XContentType.JSON);
|
||||
ParsedDocument doc = mapper.parse(sourceToParse);
|
||||
ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.array("field", "a", "b")));
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(2, fields.length);
|
||||
@ -383,120 +357,99 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
assertEquals("a", fields[0].stringValue());
|
||||
assertEquals("b", fields[1].stringValue());
|
||||
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
|
||||
sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
|
||||
shard.refresh("test");
|
||||
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
|
||||
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
|
||||
withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> {
|
||||
LeafReader leaf = reader.leaves().get(0).reader();
|
||||
TermsEnum terms = leaf.terms("field").iterator();
|
||||
assertTrue(terms.seekExact(new BytesRef("b")));
|
||||
PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + 1, postings.nextPosition());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testPositionIncrementGap() throws IOException {
|
||||
final int positionIncrementGap = randomIntBetween(1, 1000);
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("position_increment_gap", positionIncrementGap)
|
||||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = indexService.mapperService().merge("type",
|
||||
new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
|
||||
MapperService mapperService = createMapperService(fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("position_increment_gap", positionIncrementGap);
|
||||
}));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
SourceToParse sourceToParse = new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.array("field", new String[]{"a", "b"})
|
||||
.endObject()),
|
||||
XContentType.JSON);
|
||||
ParsedDocument doc = mapper.parse(sourceToParse);
|
||||
ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.array("field", "a", "b")));
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(2, fields.length);
|
||||
|
||||
assertEquals("a", fields[0].stringValue());
|
||||
assertEquals("b", fields[1].stringValue());
|
||||
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
|
||||
sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
|
||||
shard.refresh("test");
|
||||
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
|
||||
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
|
||||
withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> {
|
||||
LeafReader leaf = reader.leaves().get(0).reader();
|
||||
TermsEnum terms = leaf.terms("field").iterator();
|
||||
assertTrue(terms.seekExact(new BytesRef("b")));
|
||||
PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS);
|
||||
assertEquals(0, postings.nextDoc());
|
||||
assertEquals(positionIncrementGap + 1, postings.nextPosition());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testSearchAnalyzerSerialization() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("type", "annotated_text")
|
||||
.field("analyzer", "standard")
|
||||
.field("search_analyzer", "keyword")
|
||||
.endObject()
|
||||
.endObject().endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
DocumentMapper mapper = createDocumentMapper("_doc", mapping);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
// special case: default index analyzer
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("type", "annotated_text")
|
||||
.field("analyzer", "default")
|
||||
.field("search_analyzer", "keyword")
|
||||
.endObject()
|
||||
.endObject().endObject().endObject());
|
||||
|
||||
mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
mapper = createDocumentMapper("_doc", mapping);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("type", "annotated_text")
|
||||
.field("analyzer", "keyword")
|
||||
.endObject()
|
||||
.endObject().endObject().endObject());
|
||||
|
||||
mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
mapper = createDocumentMapper("_doc", mapping);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
// special case: default search analyzer
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("type", "annotated_text")
|
||||
.field("analyzer", "keyword")
|
||||
.field("search_analyzer", "default")
|
||||
.endObject()
|
||||
.endObject().endObject().endObject());
|
||||
|
||||
mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
mapper = createDocumentMapper("_doc", mapping);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("type", "annotated_text")
|
||||
.field("analyzer", "keyword")
|
||||
.endObject()
|
||||
.endObject().endObject().endObject());
|
||||
mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
mapper = createDocumentMapper("_doc", mapping);
|
||||
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
@ -510,77 +463,53 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
}
|
||||
|
||||
public void testSearchQuoteAnalyzerSerialization() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("type","annotated_text")
|
||||
.field("analyzer", "standard")
|
||||
.field("search_analyzer", "standard")
|
||||
.field("search_quote_analyzer", "keyword")
|
||||
.endObject()
|
||||
.endObject().endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
DocumentMapper mapper = createDocumentMapper("_doc", mapping);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
// special case: default index/search analyzer
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("type", "annotated_text")
|
||||
.field("analyzer", "default")
|
||||
.field("search_analyzer", "default")
|
||||
.field("search_quote_analyzer", "keyword")
|
||||
.endObject()
|
||||
.endObject().endObject().endObject());
|
||||
|
||||
mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
mapper = createDocumentMapper("_doc", mapping);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
}
|
||||
|
||||
public void testTermVectors() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties")
|
||||
.startObject("field1")
|
||||
.field("type", getFieldType())
|
||||
.field("term_vector", "no")
|
||||
.endObject()
|
||||
.startObject("field2")
|
||||
.field("type", getFieldType())
|
||||
.field("term_vector", "yes")
|
||||
.endObject()
|
||||
.startObject("field3")
|
||||
.field("type", getFieldType())
|
||||
.field("term_vector", "with_offsets")
|
||||
.endObject()
|
||||
.startObject("field4")
|
||||
.field("type", getFieldType())
|
||||
.field("term_vector", "with_positions")
|
||||
.endObject()
|
||||
.startObject("field5")
|
||||
.field("type", getFieldType())
|
||||
.field("term_vector", "with_positions_offsets")
|
||||
.endObject()
|
||||
.startObject("field6")
|
||||
.field("type", getFieldType())
|
||||
.field("term_vector", "with_positions_offsets_payloads")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
DocumentMapper defaultMapper = createDocumentMapper(mapping(b -> {
|
||||
b.startObject("field1").field("type", "annotated_text").field("term_vector", "no").endObject();
|
||||
b.startObject("field2").field("type", "annotated_text").field("term_vector", "yes").endObject();
|
||||
b.startObject("field3").field("type", "annotated_text").field("term_vector", "with_offsets").endObject();
|
||||
b.startObject("field4").field("type", "annotated_text").field("term_vector", "with_positions").endObject();
|
||||
b.startObject("field5").field("type", "annotated_text").field("term_vector", "with_positions_offsets").endObject();
|
||||
b.startObject("field6").field("type", "annotated_text").field("term_vector", "with_positions_offsets_payloads").endObject();
|
||||
}));
|
||||
|
||||
ParsedDocument doc = defaultMapper.parse(new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field1", "1234")
|
||||
.field("field2", "1234")
|
||||
.field("field3", "1234")
|
||||
.field("field4", "1234")
|
||||
.field("field5", "1234")
|
||||
.field("field6", "1234")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
ParsedDocument doc = defaultMapper.parse(source(b -> {
|
||||
b.field("field1", "1234");
|
||||
b.field("field2", "1234");
|
||||
b.field("field3", "1234");
|
||||
b.field("field4", "1234");
|
||||
b.field("field5", "1234");
|
||||
b.field("field6", "1234");
|
||||
}));
|
||||
|
||||
assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectors(), equalTo(false));
|
||||
assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorOffsets(), equalTo(false));
|
||||
@ -613,62 +542,32 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPayloads(), equalTo(true));
|
||||
}
|
||||
|
||||
public void testNullConfigValuesFail() throws MapperParsingException, IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject()
|
||||
.startObject("type")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("analyzer", (String) null)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
Exception e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping)));
|
||||
assertEquals("[analyzer] must not have a [null] value", e.getMessage());
|
||||
public void testNullConfigValuesFail() throws MapperParsingException {
|
||||
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.nullField("analyzer");
|
||||
})));
|
||||
assertThat(e.getMessage(), containsString("must not have a [null] value"));
|
||||
}
|
||||
|
||||
public void testNotIndexedField() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("index", false)
|
||||
.endObject().endObject().endObject().endObject());
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> parser.parse("type", new CompressedXContent(mapping)));
|
||||
assertEquals("[annotated_text] fields must be indexed", e.getMessage());
|
||||
createMapperService(fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("index", false);
|
||||
}));
|
||||
assertWarnings("Parameter [index] has no effect on type [annotated_text] and will be removed in future");
|
||||
}
|
||||
|
||||
public void testAnalyzedFieldPositionIncrementWithoutPositions() throws IOException {
|
||||
public void testAnalyzedFieldPositionIncrementWithoutPositions() {
|
||||
for (String indexOptions : Arrays.asList("docs", "freqs")) {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field")
|
||||
.field("type", getFieldType())
|
||||
.field("index_options", indexOptions)
|
||||
.field("position_increment_gap", 10)
|
||||
.endObject().endObject().endObject().endObject());
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> parser.parse("type", new CompressedXContent(mapping)));
|
||||
assertEquals("Cannot set position_increment_gap on field [field] without positions enabled", e.getMessage());
|
||||
Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
|
||||
b.field("type", "annotated_text");
|
||||
b.field("index_options", indexOptions);
|
||||
b.field("position_increment_gap", 0);
|
||||
})));
|
||||
assertThat(e.getMessage(),
|
||||
containsString("Cannot set position_increment_gap on field [field] without positions enabled"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testEmptyName() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject()
|
||||
.startObject("type")
|
||||
.startObject("properties")
|
||||
.startObject("")
|
||||
.field("type", getFieldType())
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
// Empty name not allowed in index created after 5.0
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> parser.parse("type", new CompressedXContent(mapping))
|
||||
);
|
||||
assertThat(e.getMessage(), containsString("name cannot be empty string"));
|
||||
}
|
||||
}
|
||||
|
@ -32,16 +32,15 @@ import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.analysis.AnalyzerScope;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.ParametrizedFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText.AnnotationToken;
|
||||
import org.elasticsearch.index.mapper.TextParams;
|
||||
import org.elasticsearch.index.mapper.TextSearchInfo;
|
||||
import org.elasticsearch.index.similarity.SimilarityProvider;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -51,16 +50,15 @@ import java.io.UncheckedIOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.elasticsearch.index.mapper.TypeParsers.parseTextField;
|
||||
|
||||
/** A {@link FieldMapper} for full-text fields with annotation markup e.g.
|
||||
*
|
||||
* "New mayor is [John Smith](type=person&value=John%20Smith) "
|
||||
@ -72,34 +70,48 @@ import static org.elasticsearch.index.mapper.TypeParsers.parseTextField;
|
||||
* This code is largely a copy of TextFieldMapper which is less than ideal -
|
||||
* my attempts to subclass TextFieldMapper failed but we can revisit this.
|
||||
**/
|
||||
public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
public class AnnotatedTextFieldMapper extends ParametrizedFieldMapper {
|
||||
|
||||
public static final String CONTENT_TYPE = "annotated_text";
|
||||
private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1;
|
||||
|
||||
public static class Builder extends TextFieldMapper.Builder {
|
||||
private static Builder builder(FieldMapper in) {
|
||||
return ((AnnotatedTextFieldMapper)in).builder;
|
||||
}
|
||||
|
||||
private int positionIncrementGap = POSITION_INCREMENT_GAP_USE_ANALYZER;
|
||||
public static class Builder extends ParametrizedFieldMapper.Builder {
|
||||
|
||||
public Builder(String name) {
|
||||
private final Parameter<Boolean> store = Parameter.storeParam(m -> builder(m).store.getValue(), false);
|
||||
|
||||
final TextParams.Analyzers analyzers;
|
||||
final Parameter<SimilarityProvider> similarity
|
||||
= TextParams.similarity(m -> builder(m).similarity.getValue());
|
||||
|
||||
final Parameter<String> indexOptions = TextParams.indexOptions(m -> builder(m).indexOptions.getValue());
|
||||
final Parameter<Boolean> norms = TextParams.norms(true, m -> builder(m).norms.getValue());
|
||||
final Parameter<String> termVectors = TextParams.termVectors(m -> builder(m).termVectors.getValue());
|
||||
|
||||
final Parameter<Integer> positionIncrementGap = Parameter.intParam("position_increment_gap", false,
|
||||
m -> builder(m).positionIncrementGap.getValue(), POSITION_INCREMENT_GAP_USE_ANALYZER)
|
||||
.setValidator(v -> {
|
||||
if (v != POSITION_INCREMENT_GAP_USE_ANALYZER && v < 0) {
|
||||
throw new MapperParsingException("[positions_increment_gap] must be positive, got [" + v + "]");
|
||||
}
|
||||
});
|
||||
|
||||
private final Parameter<Float> boost = Parameter.boostParam();
|
||||
private final Parameter<Map<String, String>> meta = Parameter.metaParam();
|
||||
|
||||
public Builder(String name, Supplier<NamedAnalyzer> defaultAnalyzer) {
|
||||
super(name);
|
||||
builder = this;
|
||||
}
|
||||
|
||||
public Builder positionIncrementGap(int positionIncrementGap) {
|
||||
if (positionIncrementGap < 0) {
|
||||
throw new MapperParsingException("[positions_increment_gap] must be positive, got " + positionIncrementGap);
|
||||
}
|
||||
this.positionIncrementGap = positionIncrementGap;
|
||||
return this;
|
||||
this.analyzers = new TextParams.Analyzers(defaultAnalyzer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder docValues(boolean docValues) {
|
||||
if (docValues) {
|
||||
throw new IllegalArgumentException("[" + CONTENT_TYPE + "] fields do not support doc values");
|
||||
}
|
||||
return this;
|
||||
protected List<Parameter<?>> getParameters() {
|
||||
return Arrays.asList(store, indexOptions, norms, termVectors, similarity,
|
||||
analyzers.indexAnalyzer, analyzers.searchAnalyzer, analyzers.searchQuoteAnalyzer, positionIncrementGap,
|
||||
boost, meta);
|
||||
}
|
||||
|
||||
private NamedAnalyzer wrapAnalyzer(NamedAnalyzer in, int positionIncrementGap) {
|
||||
@ -107,57 +119,45 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
new AnnotationAnalyzerWrapper(in.analyzer()), positionIncrementGap);
|
||||
}
|
||||
|
||||
private AnnotatedTextFieldType buildFieldType(BuilderContext context) {
|
||||
private AnnotatedTextFieldType buildFieldType(FieldType fieldType, BuilderContext context) {
|
||||
int posGap;
|
||||
if (positionIncrementGap == POSITION_INCREMENT_GAP_USE_ANALYZER) {
|
||||
if (positionIncrementGap.get() == POSITION_INCREMENT_GAP_USE_ANALYZER) {
|
||||
posGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
|
||||
} else {
|
||||
if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
|
||||
throw new IllegalArgumentException("Cannot set position_increment_gap on field [" + name()
|
||||
+ "] without positions enabled");
|
||||
}
|
||||
posGap = positionIncrementGap;
|
||||
posGap = positionIncrementGap.get();
|
||||
}
|
||||
AnnotatedTextFieldType ft = new AnnotatedTextFieldType(buildFullName(context), fieldType, similarity,
|
||||
wrapAnalyzer(searchAnalyzer, posGap), wrapAnalyzer(searchQuoteAnalyzer, posGap), meta);
|
||||
ft.setIndexAnalyzer(indexAnalyzer, posGap);
|
||||
TextSearchInfo tsi = new TextSearchInfo(
|
||||
fieldType,
|
||||
similarity.get(),
|
||||
wrapAnalyzer(analyzers.getSearchAnalyzer(), posGap),
|
||||
wrapAnalyzer(analyzers.getSearchQuoteAnalyzer(), posGap));
|
||||
AnnotatedTextFieldType ft = new AnnotatedTextFieldType(
|
||||
buildFullName(context),
|
||||
store.getValue(),
|
||||
tsi,
|
||||
meta.getValue());
|
||||
ft.setIndexAnalyzer(wrapAnalyzer(analyzers.getIndexAnalyzer(), posGap));
|
||||
ft.setBoost(boost.getValue());
|
||||
return ft;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnnotatedTextFieldMapper build(BuilderContext context) {
|
||||
FieldType fieldType = TextParams.buildFieldType(() -> true, store, indexOptions, norms, termVectors);
|
||||
if (fieldType.indexOptions() == IndexOptions.NONE ) {
|
||||
throw new IllegalArgumentException("[" + CONTENT_TYPE + "] fields must be indexed");
|
||||
}
|
||||
return new AnnotatedTextFieldMapper(
|
||||
name, fieldType, buildFieldType(context), positionIncrementGap,
|
||||
multiFieldsBuilder.build(this, context), copyTo);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder<?> parse(
|
||||
String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
AnnotatedTextFieldMapper.Builder builder = new AnnotatedTextFieldMapper.Builder(fieldName);
|
||||
builder.indexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer());
|
||||
builder.searchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer());
|
||||
builder.searchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer());
|
||||
parseTextField(builder, fieldName, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String propName = entry.getKey();
|
||||
Object propNode = entry.getValue();
|
||||
if (propName.equals("position_increment_gap")) {
|
||||
int newPositionIncrementGap = XContentMapValues.nodeIntegerValue(propNode, -1);
|
||||
builder.positionIncrementGap(newPositionIncrementGap);
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
name, fieldType, buildFieldType(fieldType, context),
|
||||
multiFieldsBuilder.build(this, context), copyTo.build(), this);
|
||||
}
|
||||
}
|
||||
|
||||
public static TypeParser PARSER = new TypeParser((n, c) -> new Builder(n, () -> c.getIndexAnalyzers().getDefaultIndexAnalyzer()));
|
||||
|
||||
/**
|
||||
* Parses markdown-like syntax into plain text and AnnotationTokens with offsets for
|
||||
@ -170,7 +170,7 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
|
||||
// Format is markdown-like syntax for URLs eg:
|
||||
// "New mayor is [John Smith](type=person&value=John%20Smith) "
|
||||
static Pattern markdownPattern = Pattern.compile("\\[([^\\]\\[]*)\\]\\(([^\\)\\(]*)\\)");
|
||||
static Pattern markdownPattern = Pattern.compile("\\[([^]\\[]*)]\\(([^)(]*)\\)");
|
||||
|
||||
public static AnnotatedText parse (String textPlusMarkup) {
|
||||
List<AnnotationToken> annotations =new ArrayList<>();
|
||||
@ -179,7 +179,7 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
while(m.find()){
|
||||
if(m.start() > lastPos){
|
||||
sb.append(textPlusMarkup.substring(lastPos, m.start()));
|
||||
sb.append(textPlusMarkup, lastPos, m.start());
|
||||
}
|
||||
|
||||
int startOffset = sb.length();
|
||||
@ -192,21 +192,21 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
for (String pair : pairs) {
|
||||
String[] kv = pair.split("=");
|
||||
try {
|
||||
if(kv.length == 2){
|
||||
if (kv.length == 2) {
|
||||
throw new ElasticsearchParseException("key=value pairs are not supported in annotations");
|
||||
}
|
||||
if(kv.length == 1) {
|
||||
if (kv.length == 1) {
|
||||
//Check "=" sign wasn't in the pair string
|
||||
if(kv[0].length() == pair.length()) {
|
||||
if (kv[0].length() == pair.length()) {
|
||||
//untyped value
|
||||
value = URLDecoder.decode(kv[0], "UTF-8");
|
||||
}
|
||||
}
|
||||
if (value!=null && value.length() > 0) {
|
||||
if (value != null && value.length() > 0) {
|
||||
annotations.add(new AnnotationToken(startOffset, endOffset, value));
|
||||
}
|
||||
} catch (UnsupportedEncodingException uee){
|
||||
throw new ElasticsearchParseException("Unsupported encoding parsing annotated text", uee);
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
throw new ElasticsearchParseException("Unsupported encoding parsing annotated text", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -464,7 +464,7 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
}
|
||||
}
|
||||
|
||||
private void setType(AnnotationToken token) {
|
||||
private void setType() {
|
||||
//Default annotation type - in future AnnotationTokens may contain custom type info
|
||||
typeAtt.setType("annotation");
|
||||
}
|
||||
@ -473,7 +473,7 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
// Set the annotation's attributes
|
||||
posLenAtt.setPositionLength(annotationPosLen);
|
||||
textOffsetAtt.setOffset(nextAnnotationForInjection.offset, nextAnnotationForInjection.endOffset);
|
||||
setType(nextAnnotationForInjection);
|
||||
setType();
|
||||
|
||||
// We may have multiple annotations at this location - stack them up
|
||||
final int annotationOffset = nextAnnotationForInjection.offset;
|
||||
@ -481,7 +481,7 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
while (nextAnnotationForInjection != null && nextAnnotationForInjection.offset == annotationOffset) {
|
||||
|
||||
|
||||
setType(nextAnnotationForInjection);
|
||||
setType();
|
||||
termAtt.resizeBuffer(nextAnnotationForInjection.value.length());
|
||||
termAtt.copyBuffer(nextAnnotationForInjection.value.toCharArray(), 0, nextAnnotationForInjection.value.length());
|
||||
|
||||
@ -512,38 +512,29 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
|
||||
public static final class AnnotatedTextFieldType extends TextFieldMapper.TextFieldType {
|
||||
|
||||
private AnnotatedTextFieldType(String name, FieldType fieldType, SimilarityProvider similarity,
|
||||
NamedAnalyzer searchAnalyzer, NamedAnalyzer searchQuoteAnalyzer, Map<String, String> meta) {
|
||||
super(name, fieldType, similarity, searchAnalyzer, searchQuoteAnalyzer, meta);
|
||||
private AnnotatedTextFieldType(String name, boolean store, TextSearchInfo tsi, Map<String, String> meta) {
|
||||
super(name, true, store, tsi, meta);
|
||||
}
|
||||
|
||||
public AnnotatedTextFieldType(String name, Map<String, String> meta) {
|
||||
super(name, true, false, meta);
|
||||
}
|
||||
|
||||
public void setIndexAnalyzer(NamedAnalyzer delegate, int positionIncrementGap) {
|
||||
if(delegate.analyzer() instanceof AnnotationAnalyzerWrapper){
|
||||
// Already wrapped the Analyzer with an AnnotationAnalyzer
|
||||
super.setIndexAnalyzer(delegate);
|
||||
} else {
|
||||
// Wrap the analyzer with an AnnotationAnalyzer that will inject required annotations
|
||||
super.setIndexAnalyzer(new NamedAnalyzer(delegate.name(), AnalyzerScope.INDEX,
|
||||
new AnnotationAnalyzerWrapper(delegate.analyzer()), positionIncrementGap));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return CONTENT_TYPE;
|
||||
}
|
||||
}
|
||||
|
||||
private int positionIncrementGap;
|
||||
private final FieldType fieldType;
|
||||
private final Builder builder;
|
||||
|
||||
protected AnnotatedTextFieldMapper(String simpleName, FieldType fieldType, AnnotatedTextFieldType mappedFieldType,
|
||||
int positionIncrementGap, MultiFields multiFields, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, mappedFieldType, multiFields, copyTo);
|
||||
MultiFields multiFields, CopyTo copyTo, Builder builder) {
|
||||
super(simpleName, mappedFieldType, multiFields, copyTo);
|
||||
assert fieldType.tokenized();
|
||||
this.positionIncrementGap = positionIncrementGap;
|
||||
this.fieldType = fieldType;
|
||||
this.builder = builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -551,15 +542,6 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
return (AnnotatedTextFieldMapper) super.clone();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void mergeOptions(FieldMapper other, List<String> conflicts) {
|
||||
|
||||
}
|
||||
|
||||
public int getPositionIncrementGap() {
|
||||
return this.positionIncrementGap;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void parseCreateField(ParseContext context) throws IOException {
|
||||
final String value;
|
||||
@ -588,24 +570,7 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnnotatedTextFieldType fieldType() {
|
||||
return (AnnotatedTextFieldType) super.fieldType();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean docValuesByDefault() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
super.doXContentBody(builder, includeDefaults, params);
|
||||
doXContentAnalyzers(builder, includeDefaults);
|
||||
if (includeDefaults || fieldType.omitNorms()) {
|
||||
builder.field("norms", fieldType.omitNorms() == false);
|
||||
}
|
||||
if (includeDefaults || positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) {
|
||||
builder.field("position_increment_gap", positionIncrementGap);
|
||||
}
|
||||
public ParametrizedFieldMapper.Builder getMergeBuilder() {
|
||||
return new Builder(simpleName(), builder.analyzers.indexAnalyzer::getDefaultValue).init(this);
|
||||
}
|
||||
}
|
||||
|
@ -19,9 +19,6 @@
|
||||
|
||||
package org.elasticsearch.plugin.mapper;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper;
|
||||
import org.elasticsearch.plugins.MapperPlugin;
|
||||
@ -30,15 +27,18 @@ import org.elasticsearch.plugins.SearchPlugin;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.AnnotatedTextHighlighter;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.Highlighter;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
public class AnnotatedTextPlugin extends Plugin implements MapperPlugin, SearchPlugin {
|
||||
|
||||
@Override
|
||||
public Map<String, Mapper.TypeParser> getMappers() {
|
||||
return Collections.singletonMap(AnnotatedTextFieldMapper.CONTENT_TYPE, new AnnotatedTextFieldMapper.TypeParser());
|
||||
return Collections.singletonMap(AnnotatedTextFieldMapper.CONTENT_TYPE, AnnotatedTextFieldMapper.PARSER);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Map<String, Highlighter> getHighlighters() {
|
||||
return Collections.singletonMap(AnnotatedTextHighlighter.NAME, new AnnotatedTextHighlighter());
|
||||
return Collections.singletonMap(AnnotatedTextHighlighter.NAME, new AnnotatedTextHighlighter());
|
||||
}
|
||||
}
|
||||
|
@ -49,10 +49,7 @@ public class AnnotatedTextFieldTypeTests extends FieldTypeTestCase {
|
||||
Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT.id).build();
|
||||
Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath());
|
||||
|
||||
MappedFieldType fieldType = new AnnotatedTextFieldMapper.Builder("field")
|
||||
.indexAnalyzer(Lucene.STANDARD_ANALYZER)
|
||||
.searchAnalyzer(Lucene.STANDARD_ANALYZER)
|
||||
.searchQuoteAnalyzer(Lucene.STANDARD_ANALYZER)
|
||||
MappedFieldType fieldType = new AnnotatedTextFieldMapper.Builder("field", () -> Lucene.STANDARD_ANALYZER)
|
||||
.build(context)
|
||||
.fieldType();
|
||||
|
||||
|
@ -184,7 +184,6 @@ setup:
|
||||
doc_values: false
|
||||
text:
|
||||
type: text
|
||||
doc_values: false
|
||||
|
||||
- do:
|
||||
headers:
|
||||
|
@ -36,7 +36,6 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.MockKeywordPlugin;
|
||||
|
||||
@ -219,6 +218,28 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public static String termVectorOptionsToString(FieldType fieldType) {
|
||||
if (!fieldType.storeTermVectors()) {
|
||||
return "no";
|
||||
} else if (!fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
|
||||
return "yes";
|
||||
} else if (fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
|
||||
return "with_offsets";
|
||||
} else {
|
||||
StringBuilder builder = new StringBuilder("with");
|
||||
if (fieldType.storeTermVectorPositions()) {
|
||||
builder.append("_positions");
|
||||
}
|
||||
if (fieldType.storeTermVectorOffsets()) {
|
||||
builder.append("_offsets");
|
||||
}
|
||||
if (fieldType.storeTermVectorPayloads()) {
|
||||
builder.append("_payloads");
|
||||
}
|
||||
return builder.toString();
|
||||
}
|
||||
}
|
||||
|
||||
public void testRandomSingleTermVectors() throws IOException {
|
||||
FieldType ft = new FieldType();
|
||||
int config = randomInt(4);
|
||||
@ -257,8 +278,8 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
||||
ft.setStoreTermVectorOffsets(storeOffsets);
|
||||
ft.setStoreTermVectorPositions(storePositions);
|
||||
|
||||
String optionString = FieldMapper.termVectorOptionsToString(ft);
|
||||
XContentBuilder mapping = jsonBuilder().startObject().startObject("type1")
|
||||
String optionString = termVectorOptionsToString(ft);
|
||||
XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", "text")
|
||||
@ -267,12 +288,12 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject().endObject();
|
||||
assertAcked(prepareCreate("test").addMapping("type1", mapping)
|
||||
assertAcked(prepareCreate("test").addMapping("_doc", mapping)
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.analysis.analyzer.tv_test.tokenizer", "standard")
|
||||
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
client().prepareIndex("test", "type1", Integer.toString(i))
|
||||
client().prepareIndex("test", "_doc", Integer.toString(i))
|
||||
.setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
|
||||
// 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
|
||||
// 31the34 35lazy39 40dog43
|
||||
@ -289,7 +310,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
||||
boolean isPositionsRequested = randomBoolean();
|
||||
String infoString = createInfoString(isPositionsRequested, isOffsetRequested, optionString);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(i))
|
||||
TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "_doc", Integer.toString(i))
|
||||
.setOffsets(isOffsetRequested).setPositions(isPositionsRequested).setSelectedFields();
|
||||
TermVectorsResponse response = resp.execute().actionGet();
|
||||
assertThat(infoString + "doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
|
||||
@ -993,7 +1014,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testArtificialDocWithPreference() throws ExecutionException, InterruptedException, IOException {
|
||||
public void testArtificialDocWithPreference() throws InterruptedException, IOException {
|
||||
// setup indices
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(indexSettings())
|
||||
|
@ -469,7 +469,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||
assertEquals(ex.getMessage(), "Failed to verify index " + metadata.getIndex());
|
||||
assertNotNull(ex.getCause());
|
||||
assertEquals(MapperParsingException.class, ex.getCause().getClass());
|
||||
assertThat(ex.getCause().getMessage(), containsString("analyzer [test] not found for field [field1]"));
|
||||
assertThat(ex.getCause().getMessage(), containsString("analyzer [test] has not been configured in mappings"));
|
||||
}
|
||||
|
||||
public void testArchiveBrokenClusterSettings() throws Exception {
|
||||
|
@ -182,7 +182,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
||||
.actionGet();
|
||||
fail("Expected MergeMappingException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("mapper [body] has different [norms]"));
|
||||
assertThat(e.getMessage(), containsString("Cannot update parameter [norms] from [false] to [true]"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -694,7 +694,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
||||
.startObject("field2").field("type", "text").field("analyzer", "custom_1").endObject()
|
||||
.endObject().endObject().endObject())
|
||||
.get());
|
||||
assertThat(e.getMessage(), containsString("analyzer [custom_1] not found for field [field2]"));
|
||||
assertThat(e.getMessage(), containsString("analyzer [custom_1] has not been configured in mappings"));
|
||||
|
||||
response = client().admin().indices().prepareGetTemplates().get();
|
||||
assertThat(response.getIndexTemplates(), hasSize(1));
|
||||
|
@ -116,7 +116,8 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper {
|
||||
TokenFilterFactory[] tokenFilters = ((AnalyzerComponentsProvider) analyzer).getComponents().getTokenFilters();
|
||||
List<String> offendingFilters = new ArrayList<>();
|
||||
for (TokenFilterFactory tokenFilter : tokenFilters) {
|
||||
if (tokenFilter.getAnalysisMode() != mode) {
|
||||
AnalysisMode filterMode = tokenFilter.getAnalysisMode();
|
||||
if (filterMode != AnalysisMode.ALL && filterMode != mode) {
|
||||
offendingFilters.add(tokenFilter.name());
|
||||
}
|
||||
}
|
||||
|
@ -703,7 +703,8 @@ final class DocumentParser {
|
||||
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, XContentFieldType.STRING);
|
||||
if (builder == null) {
|
||||
builder = new TextFieldMapper.Builder(currentFieldName)
|
||||
builder = new TextFieldMapper.Builder(currentFieldName,
|
||||
() -> context.mapperService().getIndexAnalyzers().getDefaultIndexAnalyzer())
|
||||
.addMultiField(new KeywordFieldMapper.Builder("keyword").ignoreAbove(256));
|
||||
}
|
||||
return builder;
|
||||
|
@ -511,28 +511,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
}
|
||||
}
|
||||
|
||||
public static String termVectorOptionsToString(FieldType fieldType) {
|
||||
if (!fieldType.storeTermVectors()) {
|
||||
return "no";
|
||||
} else if (!fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
|
||||
return "yes";
|
||||
} else if (fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
|
||||
return "with_offsets";
|
||||
} else {
|
||||
StringBuilder builder = new StringBuilder("with");
|
||||
if (fieldType.storeTermVectorPositions()) {
|
||||
builder.append("_positions");
|
||||
}
|
||||
if (fieldType.storeTermVectorOffsets()) {
|
||||
builder.append("_offsets");
|
||||
}
|
||||
if (fieldType.storeTermVectorPayloads()) {
|
||||
builder.append("_payloads");
|
||||
}
|
||||
return builder.toString();
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract String contentType();
|
||||
|
||||
public static class MultiFields implements Iterable<Mapper> {
|
||||
|
@ -44,7 +44,6 @@ import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.BiPredicate;
|
||||
import java.util.function.BooleanSupplier;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
@ -119,7 +118,7 @@ public abstract class ParametrizedFieldMapper extends FieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
builder.field("type", contentType());
|
||||
getMergeBuilder().toXContent(builder, includeDefaults);
|
||||
multiFields.toXContent(builder, params);
|
||||
@ -133,11 +132,25 @@ public abstract class ParametrizedFieldMapper extends FieldMapper {
|
||||
void serialize(XContentBuilder builder, String name, T value) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check on whether or not a parameter should be serialized
|
||||
*/
|
||||
protected interface SerializerCheck<T> {
|
||||
/**
|
||||
* Check on whether or not a parameter should be serialized
|
||||
* @param includeDefaults if defaults have been requested
|
||||
* @param isConfigured if the parameter has a different value to the default
|
||||
* @param value the parameter value
|
||||
* @return {@code true} if the value should be serialized
|
||||
*/
|
||||
boolean check(boolean includeDefaults, boolean isConfigured, T value);
|
||||
}
|
||||
|
||||
/**
|
||||
* A configurable parameter for a field mapper
|
||||
* @param <T> the type of the value the parameter holds
|
||||
*/
|
||||
public static final class Parameter<T> {
|
||||
public static final class Parameter<T> implements Supplier<T> {
|
||||
|
||||
public final String name;
|
||||
private final List<String> deprecatedNames = new ArrayList<>();
|
||||
@ -147,8 +160,7 @@ public abstract class ParametrizedFieldMapper extends FieldMapper {
|
||||
private boolean acceptsNull = false;
|
||||
private Consumer<T> validator = null;
|
||||
private Serializer<T> serializer = XContentBuilder::field;
|
||||
private BooleanSupplier serializerPredicate = () -> true;
|
||||
private boolean alwaysSerialize = false;
|
||||
private SerializerCheck<T> serializerCheck = (includeDefaults, isConfigured, value) -> includeDefaults || isConfigured;
|
||||
private Function<T, String> conflictSerializer = Objects::toString;
|
||||
private BiPredicate<T, T> mergeValidator;
|
||||
private T value;
|
||||
@ -179,6 +191,11 @@ public abstract class ParametrizedFieldMapper extends FieldMapper {
|
||||
return isSet ? value : defaultValue.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public T get() {
|
||||
return getValue();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the default value of the parameter
|
||||
*/
|
||||
@ -235,19 +252,26 @@ public abstract class ParametrizedFieldMapper extends FieldMapper {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets an additional check on whether or not this parameter should be serialized,
|
||||
* after the existing 'set' and 'include_defaults' checks.
|
||||
* Configure a custom serialization check for this parameter
|
||||
*/
|
||||
public Parameter<T> setShouldSerialize(BooleanSupplier shouldSerialize) {
|
||||
this.serializerPredicate = shouldSerialize;
|
||||
public Parameter<T> setSerializerCheck(SerializerCheck<T> check) {
|
||||
this.serializerCheck = check;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures that this parameter is always serialized, no matter its value
|
||||
* Always serialize this parameter, no matter its value
|
||||
*/
|
||||
public Parameter<T> alwaysSerialize() {
|
||||
this.alwaysSerialize = true;
|
||||
this.serializerCheck = (id, ic, v) -> true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Never serialize this parameter, no matter its value
|
||||
*/
|
||||
public Parameter<T> neverSerialize() {
|
||||
this.serializerCheck = (id, ic, v) -> false;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -284,8 +308,8 @@ public abstract class ParametrizedFieldMapper extends FieldMapper {
|
||||
}
|
||||
}
|
||||
|
||||
private void toXContent(XContentBuilder builder, boolean includeDefaults) throws IOException {
|
||||
if (alwaysSerialize || ((includeDefaults || isConfigured()) && serializerPredicate.getAsBoolean())) {
|
||||
protected void toXContent(XContentBuilder builder, boolean includeDefaults) throws IOException {
|
||||
if (serializerCheck.check(includeDefaults, isConfigured(), get())) {
|
||||
serializer.serialize(builder, name, getValue());
|
||||
}
|
||||
}
|
||||
|
@ -105,8 +105,8 @@ public class RangeFieldMapper extends ParametrizedFieldMapper {
|
||||
this.type = type;
|
||||
this.coerce = Parameter.explicitBoolParam("coerce", true, m -> toType(m).coerce, coerceByDefault);
|
||||
if (this.type != RangeType.DATE) {
|
||||
format.setShouldSerialize(() -> false);
|
||||
locale.setShouldSerialize(() -> false);
|
||||
format.neverSerialize();
|
||||
locale.neverSerialize();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,16 +61,17 @@ import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.search.AutomatonQueries;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.analysis.AnalyzerScope;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.Mapper.TypeParser.ParserContext;
|
||||
import org.elasticsearch.index.query.IntervalBuilder;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.similarity.SimilarityProvider;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.search.aggregations.support.CoreValuesSourceType;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
@ -85,11 +86,8 @@ import java.util.Objects;
|
||||
import java.util.function.IntPredicate;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.index.mapper.TypeParsers.checkNull;
|
||||
import static org.elasticsearch.index.mapper.TypeParsers.parseTextField;
|
||||
|
||||
/** A {@link FieldMapper} for full-text fields. */
|
||||
public class TextFieldMapper extends FieldMapper {
|
||||
public class TextFieldMapper extends ParametrizedFieldMapper {
|
||||
|
||||
public static final String CONTENT_TYPE = "text";
|
||||
private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1;
|
||||
@ -121,68 +119,17 @@ public class TextFieldMapper extends FieldMapper {
|
||||
public static final int POSITION_INCREMENT_GAP = 100;
|
||||
}
|
||||
|
||||
public static class Builder extends FieldMapper.Builder<Builder> {
|
||||
private static Builder builder(FieldMapper in) {
|
||||
return ((TextFieldMapper) in).builder;
|
||||
}
|
||||
|
||||
private int positionIncrementGap = POSITION_INCREMENT_GAP_USE_ANALYZER;
|
||||
private int minPrefixChars = -1;
|
||||
private int maxPrefixChars = -1;
|
||||
private boolean fielddata = false;
|
||||
private boolean indexPhrases = false;
|
||||
private boolean eagerGlobalOrdinals = false;
|
||||
private double fielddataMinFreq = Defaults.FIELDDATA_MIN_FREQUENCY;
|
||||
private double fielddataMaxFreq = Defaults.FIELDDATA_MAX_FREQUENCY;
|
||||
private int fielddataMinSegSize = Defaults.FIELDDATA_MIN_SEGMENT_SIZE;
|
||||
protected SimilarityProvider similarity;
|
||||
|
||||
public Builder(String name) {
|
||||
super(name, Defaults.FIELD_TYPE);
|
||||
builder = this;
|
||||
}
|
||||
|
||||
public Builder positionIncrementGap(int positionIncrementGap) {
|
||||
if (positionIncrementGap < 0) {
|
||||
throw new MapperParsingException("[positions_increment_gap] must be positive, got " + positionIncrementGap);
|
||||
}
|
||||
this.positionIncrementGap = positionIncrementGap;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder fielddata(boolean fielddata) {
|
||||
this.fielddata = fielddata;
|
||||
return builder;
|
||||
}
|
||||
|
||||
public Builder indexPhrases(boolean indexPhrases) {
|
||||
this.indexPhrases = indexPhrases;
|
||||
return builder;
|
||||
}
|
||||
|
||||
public void similarity(SimilarityProvider similarity) {
|
||||
this.similarity = similarity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder docValues(boolean docValues) {
|
||||
if (docValues) {
|
||||
throw new IllegalArgumentException("[text] fields do not support doc values");
|
||||
}
|
||||
return super.docValues(docValues);
|
||||
}
|
||||
|
||||
public Builder eagerGlobalOrdinals(boolean eagerGlobalOrdinals) {
|
||||
this.eagerGlobalOrdinals = eagerGlobalOrdinals;
|
||||
return builder;
|
||||
}
|
||||
|
||||
public Builder fielddataFrequencyFilter(double minFreq, double maxFreq, int minSegmentSize) {
|
||||
this.fielddataMinFreq = minFreq;
|
||||
this.fielddataMaxFreq = maxFreq;
|
||||
this.fielddataMinSegSize = minSegmentSize;
|
||||
return builder;
|
||||
}
|
||||
|
||||
public Builder indexPrefixes(int minChars, int maxChars) {
|
||||
private static final class PrefixConfig implements ToXContent {
|
||||
final int minChars;
|
||||
final int maxChars;
|
||||
|
||||
private PrefixConfig(int minChars, int maxChars) {
|
||||
this.minChars = minChars;
|
||||
this.maxChars = maxChars;
|
||||
if (minChars > maxChars) {
|
||||
throw new IllegalArgumentException("min_chars [" + minChars + "] must be less than max_chars [" + maxChars + "]");
|
||||
}
|
||||
@ -192,30 +139,213 @@ public class TextFieldMapper extends FieldMapper {
|
||||
if (maxChars >= 20) {
|
||||
throw new IllegalArgumentException("max_chars [" + maxChars + "] must be less than 20");
|
||||
}
|
||||
this.minPrefixChars = minChars;
|
||||
this.maxPrefixChars = maxChars;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
PrefixConfig that = (PrefixConfig) o;
|
||||
return minChars == that.minChars &&
|
||||
maxChars == that.maxChars;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(minChars, maxChars);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "{ min_chars=" + minChars + ", max_chars=" + maxChars + " }";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("min_chars", minChars);
|
||||
builder.field("max_chars", maxChars);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
private static PrefixConfig parsePrefixConfig(String propName, ParserContext parserContext, Object propNode) {
|
||||
if (propNode == null) {
|
||||
return null;
|
||||
}
|
||||
Map<?, ?> indexPrefix = (Map<?, ?>) propNode;
|
||||
int minChars = XContentMapValues.nodeIntegerValue(indexPrefix.remove("min_chars"),
|
||||
Defaults.INDEX_PREFIX_MIN_CHARS);
|
||||
int maxChars = XContentMapValues.nodeIntegerValue(indexPrefix.remove("max_chars"),
|
||||
Defaults.INDEX_PREFIX_MAX_CHARS);
|
||||
DocumentMapperParser.checkNoRemainingFields(propName, indexPrefix, parserContext.indexVersionCreated());
|
||||
return new PrefixConfig(minChars, maxChars);
|
||||
}
|
||||
|
||||
private static final class FielddataFrequencyFilter implements ToXContent {
|
||||
final double minFreq;
|
||||
final double maxFreq;
|
||||
final int minSegmentSize;
|
||||
|
||||
private FielddataFrequencyFilter(double minFreq, double maxFreq, int minSegmentSize) {
|
||||
this.minFreq = minFreq;
|
||||
this.maxFreq = maxFreq;
|
||||
this.minSegmentSize = minSegmentSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
FielddataFrequencyFilter that = (FielddataFrequencyFilter) o;
|
||||
return Double.compare(that.minFreq, minFreq) == 0 &&
|
||||
Double.compare(that.maxFreq, maxFreq) == 0 &&
|
||||
minSegmentSize == that.minSegmentSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(minFreq, maxFreq, minSegmentSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("min", minFreq);
|
||||
builder.field("max", maxFreq);
|
||||
builder.field("min_segment_size", minSegmentSize);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "{ min=" + minFreq + ", max=" + maxFreq + ", min_segment_size=" + minSegmentSize + " }";
|
||||
}
|
||||
}
|
||||
|
||||
private static final FielddataFrequencyFilter DEFAULT_FILTER = new FielddataFrequencyFilter(
|
||||
Defaults.FIELDDATA_MIN_FREQUENCY, Defaults.FIELDDATA_MAX_FREQUENCY, Defaults.FIELDDATA_MIN_SEGMENT_SIZE
|
||||
);
|
||||
|
||||
private static FielddataFrequencyFilter parseFrequencyFilter(String name, ParserContext parserContext, Object node) {
|
||||
Map<?,?> frequencyFilter = (Map<?, ?>) node;
|
||||
double minFrequency = XContentMapValues.nodeDoubleValue(frequencyFilter.remove("min"), 0);
|
||||
double maxFrequency = XContentMapValues.nodeDoubleValue(frequencyFilter.remove("max"), Integer.MAX_VALUE);
|
||||
int minSegmentSize = XContentMapValues.nodeIntegerValue(frequencyFilter.remove("min_segment_size"), 0);
|
||||
DocumentMapperParser.checkNoRemainingFields(name, frequencyFilter, parserContext.indexVersionCreated());
|
||||
return new FielddataFrequencyFilter(minFrequency, maxFrequency, minSegmentSize);
|
||||
}
|
||||
|
||||
public static class Builder extends ParametrizedFieldMapper.Builder {
|
||||
|
||||
private final Version indexCreatedVersion;
|
||||
|
||||
private final Parameter<Boolean> index = Parameter.indexParam(m -> builder(m).index.getValue(), true);
|
||||
private final Parameter<Boolean> store = Parameter.storeParam(m -> builder(m).store.getValue(), false);
|
||||
|
||||
final Parameter<SimilarityProvider> similarity
|
||||
= TextParams.similarity(m -> builder(m).similarity.getValue());
|
||||
|
||||
final Parameter<String> indexOptions = TextParams.indexOptions(m -> builder(m).indexOptions.getValue());
|
||||
final Parameter<Boolean> norms = TextParams.norms(true, m -> builder(m).norms.getValue());
|
||||
final Parameter<String> termVectors = TextParams.termVectors(m -> builder(m).termVectors.getValue());
|
||||
|
||||
final Parameter<Integer> positionIncrementGap = Parameter.intParam("position_increment_gap", false,
|
||||
m -> builder(m).positionIncrementGap.getValue(), POSITION_INCREMENT_GAP_USE_ANALYZER);
|
||||
|
||||
final Parameter<Boolean> fieldData
|
||||
= Parameter.boolParam("fielddata", true, m -> builder(m).fieldData.getValue(), false);
|
||||
final Parameter<FielddataFrequencyFilter> freqFilter = new Parameter<>("fielddata_frequency_filter", true,
|
||||
() -> DEFAULT_FILTER, TextFieldMapper::parseFrequencyFilter, m -> builder(m).freqFilter.getValue());
|
||||
final Parameter<Boolean> eagerGlobalOrdinals
|
||||
= Parameter.boolParam("eager_global_ordinals", true, m -> builder(m).eagerGlobalOrdinals.getValue(), false);
|
||||
|
||||
final Parameter<Boolean> indexPhrases
|
||||
= Parameter.boolParam("index_phrases", false, m -> builder(m).indexPhrases.getValue(), false);
|
||||
final Parameter<PrefixConfig> indexPrefixes = new Parameter<>("index_prefixes", false,
|
||||
() -> null, TextFieldMapper::parsePrefixConfig, m -> builder(m).indexPrefixes.getValue()).acceptsNull();
|
||||
|
||||
private final Parameter<Float> boost = Parameter.boostParam();
|
||||
private final Parameter<Map<String, String>> meta = Parameter.metaParam();
|
||||
|
||||
final TextParams.Analyzers analyzers;
|
||||
|
||||
public Builder(String name, Supplier<NamedAnalyzer> defaultAnalyzer) {
|
||||
this(name, Version.CURRENT, defaultAnalyzer);
|
||||
}
|
||||
|
||||
public Builder(String name, Version indexCreatedVersion, Supplier<NamedAnalyzer> defaultAnalyzer) {
|
||||
super(name);
|
||||
this.indexCreatedVersion = indexCreatedVersion;
|
||||
this.analyzers = new TextParams.Analyzers(defaultAnalyzer);
|
||||
}
|
||||
|
||||
public Builder index(boolean index) {
|
||||
this.index.setValue(index);
|
||||
return this;
|
||||
}
|
||||
|
||||
private TextFieldType buildFieldType(BuilderContext context) {
|
||||
TextFieldType ft
|
||||
= new TextFieldType(buildFullName(context), fieldType, similarity, searchAnalyzer, searchQuoteAnalyzer, meta);
|
||||
public Builder store(boolean store) {
|
||||
this.store.setValue(store);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder fielddata(boolean fielddata) {
|
||||
this.fieldData.setValue(fielddata);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder fielddataFrequencyFilter(double min, double max, int segs) {
|
||||
this.freqFilter.setValue(new FielddataFrequencyFilter(min, max, segs));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder addMultiField(Mapper.Builder<?> builder) {
|
||||
this.multiFieldsBuilder.add(builder);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<Parameter<?>> getParameters() {
|
||||
return Arrays.asList(index, store, indexOptions, norms, termVectors,
|
||||
analyzers.indexAnalyzer, analyzers.searchAnalyzer, analyzers.searchQuoteAnalyzer, similarity,
|
||||
positionIncrementGap,
|
||||
fieldData, freqFilter, eagerGlobalOrdinals,
|
||||
indexPhrases, indexPrefixes,
|
||||
boost, meta);
|
||||
}
|
||||
|
||||
private TextFieldType buildFieldType(FieldType fieldType, BuilderContext context) {
|
||||
NamedAnalyzer indexAnalyzer = analyzers.getIndexAnalyzer();
|
||||
NamedAnalyzer searchAnalyzer = analyzers.getSearchAnalyzer();
|
||||
NamedAnalyzer searchQuoteAnalyzer = analyzers.getSearchQuoteAnalyzer();
|
||||
if (positionIncrementGap.get() != POSITION_INCREMENT_GAP_USE_ANALYZER) {
|
||||
if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
|
||||
throw new IllegalArgumentException("Cannot set position_increment_gap on field ["
|
||||
+ name + "] without positions enabled");
|
||||
}
|
||||
indexAnalyzer = new NamedAnalyzer(indexAnalyzer, positionIncrementGap.get());
|
||||
searchAnalyzer = new NamedAnalyzer(searchAnalyzer, positionIncrementGap.get());
|
||||
searchQuoteAnalyzer = new NamedAnalyzer(searchQuoteAnalyzer, positionIncrementGap.get());
|
||||
}
|
||||
TextSearchInfo tsi = new TextSearchInfo(fieldType, similarity.getValue(), searchAnalyzer, searchQuoteAnalyzer);
|
||||
TextFieldType ft = new TextFieldType(buildFullName(context), index.getValue(), store.getValue(), tsi, meta.getValue());
|
||||
ft.setIndexAnalyzer(indexAnalyzer);
|
||||
ft.setEagerGlobalOrdinals(eagerGlobalOrdinals);
|
||||
if (fielddata) {
|
||||
ft.setFielddata(true);
|
||||
ft.setFielddataMinFrequency(fielddataMinFreq);
|
||||
ft.setFielddataMaxFrequency(fielddataMaxFreq);
|
||||
ft.setFielddataMinSegmentSize(fielddataMinSegSize);
|
||||
ft.setEagerGlobalOrdinals(eagerGlobalOrdinals.getValue());
|
||||
ft.setBoost(boost.getValue());
|
||||
if (fieldData.getValue()) {
|
||||
ft.setFielddata(true, freqFilter.getValue());
|
||||
}
|
||||
return ft;
|
||||
}
|
||||
|
||||
private PrefixFieldMapper buildPrefixMapper(BuilderContext context, TextFieldType tft) {
|
||||
if (minPrefixChars == -1) {
|
||||
private PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fieldType, TextFieldType tft) {
|
||||
if (indexPrefixes.get() == null) {
|
||||
return null;
|
||||
}
|
||||
if (indexed == false) {
|
||||
if (index.getValue() == false) {
|
||||
throw new IllegalArgumentException("Cannot set index_prefixes on unindexed field [" + name() + "]");
|
||||
}
|
||||
/*
|
||||
@ -225,7 +355,7 @@ public class TextFieldMapper extends FieldMapper {
|
||||
* or a multi-field). This way search will continue to work on old indices and new indices
|
||||
* will use the expected full name.
|
||||
*/
|
||||
String fullName = context.indexCreatedVersion().before(Version.V_7_2_1) ? name() : buildFullName(context);
|
||||
String fullName = indexCreatedVersion.before(Version.V_7_2_1) ? name() : buildFullName(context);
|
||||
// Copy the index options of the main field to allow phrase queries on
|
||||
// the prefix field.
|
||||
FieldType pft = new FieldType(fieldType);
|
||||
@ -239,96 +369,39 @@ public class TextFieldMapper extends FieldMapper {
|
||||
if (fieldType.storeTermVectorOffsets()) {
|
||||
pft.setStoreTermVectorOffsets(true);
|
||||
}
|
||||
PrefixFieldType prefixFieldType = new PrefixFieldType(tft, fullName + "._index_prefix",
|
||||
minPrefixChars, maxPrefixChars, pft.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0);
|
||||
prefixFieldType.setAnalyzer(indexAnalyzer);
|
||||
PrefixFieldType prefixFieldType = new PrefixFieldType(tft, fullName + "._index_prefix", indexPrefixes.get());
|
||||
prefixFieldType.setAnalyzer(analyzers.getIndexAnalyzer());
|
||||
tft.setPrefixFieldType(prefixFieldType);
|
||||
return new PrefixFieldMapper(pft, prefixFieldType);
|
||||
}
|
||||
|
||||
private PhraseFieldMapper buildPhraseMapper(BuilderContext context, TextFieldType parent) {
|
||||
if (indexPhrases == false) {
|
||||
private PhraseFieldMapper buildPhraseMapper(FieldType fieldType, TextFieldType parent) {
|
||||
if (indexPhrases.get() == false) {
|
||||
return null;
|
||||
}
|
||||
if (indexed == false) {
|
||||
if (index.get() == false) {
|
||||
throw new IllegalArgumentException("Cannot set index_phrases on unindexed field [" + name() + "]");
|
||||
}
|
||||
if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
|
||||
throw new IllegalArgumentException("Cannot set index_phrases on field [" + name() + "] if positions are not enabled");
|
||||
}
|
||||
FieldType phraseFieldType = new FieldType(fieldType);
|
||||
parent.setIndexPhrases();
|
||||
return new PhraseFieldMapper(phraseFieldType, new PhraseFieldType(parent));
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldMapper build(BuilderContext context) {
|
||||
if (positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) {
|
||||
if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
|
||||
throw new IllegalArgumentException("Cannot set position_increment_gap on field ["
|
||||
+ name + "] without positions enabled");
|
||||
}
|
||||
indexAnalyzer = new NamedAnalyzer(indexAnalyzer, positionIncrementGap);
|
||||
searchAnalyzer = new NamedAnalyzer(searchAnalyzer, positionIncrementGap);
|
||||
searchQuoteAnalyzer = new NamedAnalyzer(searchQuoteAnalyzer, positionIncrementGap);
|
||||
}
|
||||
TextFieldType tft = buildFieldType(context);
|
||||
return new TextFieldMapper(name,
|
||||
fieldType, tft, positionIncrementGap, buildPrefixMapper(context, tft), buildPhraseMapper(context, tft),
|
||||
multiFieldsBuilder.build(this, context), copyTo);
|
||||
public TextFieldMapper build(BuilderContext context) {
|
||||
FieldType fieldType = TextParams.buildFieldType(index, store, indexOptions, norms, termVectors);
|
||||
TextFieldType tft = buildFieldType(fieldType, context);
|
||||
return new TextFieldMapper(name, fieldType, tft,
|
||||
buildPrefixMapper(context, fieldType, tft), buildPhraseMapper(fieldType, tft),
|
||||
multiFieldsBuilder.build(this, context), copyTo.build(), this);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(fieldName);
|
||||
builder.indexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer());
|
||||
builder.searchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer());
|
||||
builder.searchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer());
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String propName = entry.getKey();
|
||||
Object propNode = entry.getValue();
|
||||
checkNull(propName, propNode);
|
||||
if (propName.equals("position_increment_gap")) {
|
||||
int newPositionIncrementGap = XContentMapValues.nodeIntegerValue(propNode, -1);
|
||||
builder.positionIncrementGap(newPositionIncrementGap);
|
||||
iterator.remove();
|
||||
} else if (propName.equals("fielddata")) {
|
||||
builder.fielddata(XContentMapValues.nodeBooleanValue(propNode, "fielddata"));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("eager_global_ordinals")) {
|
||||
builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode, "eager_global_ordinals"));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("fielddata_frequency_filter")) {
|
||||
Map<?,?> frequencyFilter = (Map<?, ?>) propNode;
|
||||
double minFrequency = XContentMapValues.nodeDoubleValue(frequencyFilter.remove("min"), 0);
|
||||
double maxFrequency = XContentMapValues.nodeDoubleValue(frequencyFilter.remove("max"), Integer.MAX_VALUE);
|
||||
int minSegmentSize = XContentMapValues.nodeIntegerValue(frequencyFilter.remove("min_segment_size"), 0);
|
||||
builder.fielddataFrequencyFilter(minFrequency, maxFrequency, minSegmentSize);
|
||||
DocumentMapperParser.checkNoRemainingFields(propName, frequencyFilter, parserContext.indexVersionCreated());
|
||||
iterator.remove();
|
||||
} else if (propName.equals("index_prefixes")) {
|
||||
Map<?, ?> indexPrefix = (Map<?, ?>) propNode;
|
||||
int minChars = XContentMapValues.nodeIntegerValue(indexPrefix.remove("min_chars"),
|
||||
Defaults.INDEX_PREFIX_MIN_CHARS);
|
||||
int maxChars = XContentMapValues.nodeIntegerValue(indexPrefix.remove("max_chars"),
|
||||
Defaults.INDEX_PREFIX_MAX_CHARS);
|
||||
builder.indexPrefixes(minChars, maxChars);
|
||||
DocumentMapperParser.checkNoRemainingFields(propName, indexPrefix, parserContext.indexVersionCreated());
|
||||
iterator.remove();
|
||||
} else if (propName.equals("index_phrases")) {
|
||||
builder.indexPhrases(XContentMapValues.nodeBooleanValue(propNode, "index_phrases"));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("similarity")) {
|
||||
SimilarityProvider similarityProvider = TypeParsers.resolveSimilarity(parserContext, fieldName, propNode.toString());
|
||||
builder.similarity(similarityProvider);
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
parseTextField(builder, fieldName, node, parserContext);
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
public static final TypeParser PARSER
|
||||
= new TypeParser((n, c) -> new Builder(n, c.indexVersionCreated(), () -> c.getIndexAnalyzers().getDefaultIndexAnalyzer()));
|
||||
|
||||
private static class PhraseWrappedAnalyzer extends AnalyzerWrapper {
|
||||
|
||||
@ -410,14 +483,16 @@ public class TextFieldMapper extends FieldMapper {
|
||||
final int minChars;
|
||||
final int maxChars;
|
||||
final TextFieldType parentField;
|
||||
final boolean hasPositions;
|
||||
|
||||
PrefixFieldType(TextFieldType parentField, String name, int minChars, int maxChars, boolean hasPositions) {
|
||||
PrefixFieldType(TextFieldType parentField, String name, PrefixConfig config) {
|
||||
this(parentField, name, config.minChars, config.maxChars);
|
||||
}
|
||||
|
||||
PrefixFieldType(TextFieldType parentField, String name, int minChars, int maxChars) {
|
||||
super(name, true, false, false, parentField.getTextSearchInfo(), Collections.emptyMap());
|
||||
this.minChars = minChars;
|
||||
this.maxChars = maxChars;
|
||||
this.parentField = parentField;
|
||||
this.hasPositions = hasPositions;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -425,13 +500,6 @@ public class TextFieldMapper extends FieldMapper {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
static boolean canMerge(PrefixFieldType first, PrefixFieldType second) {
|
||||
if (first == null) {
|
||||
return second == null;
|
||||
}
|
||||
return second != null && first.minChars == second.minChars && first.maxChars == second.maxChars;
|
||||
}
|
||||
|
||||
void setAnalyzer(NamedAnalyzer delegate) {
|
||||
setIndexAnalyzer(new NamedAnalyzer(delegate.name(), AnalyzerScope.INDEX,
|
||||
new PrefixWrappedAnalyzer(delegate.analyzer(), minChars, maxChars)));
|
||||
@ -441,13 +509,6 @@ public class TextFieldMapper extends FieldMapper {
|
||||
return length >= minChars - 1 && length <= maxChars;
|
||||
}
|
||||
|
||||
void doXContent(XContentBuilder builder) throws IOException {
|
||||
builder.startObject("index_prefixes");
|
||||
builder.field("min_chars", minChars);
|
||||
builder.field("max_chars", maxChars);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, boolean caseInsensitive, QueryShardContext context) {
|
||||
if (value.length() >= minChars) {
|
||||
@ -476,7 +537,7 @@ public class TextFieldMapper extends FieldMapper {
|
||||
}
|
||||
|
||||
public IntervalsSource intervals(BytesRef term) {
|
||||
if (hasPositions == false) {
|
||||
if (getTextSearchInfo().hasPositions() == false) {
|
||||
throw new IllegalArgumentException("Cannot create intervals over a field [" + name() + "] without indexed positions");
|
||||
}
|
||||
if (term.length > maxChars) {
|
||||
@ -516,7 +577,7 @@ public class TextFieldMapper extends FieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void parseCreateField(ParseContext context) throws IOException {
|
||||
protected void parseCreateField(ParseContext context) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@ -565,20 +626,13 @@ public class TextFieldMapper extends FieldMapper {
|
||||
public static class TextFieldType extends StringFieldType {
|
||||
|
||||
private boolean fielddata;
|
||||
private double fielddataMinFrequency;
|
||||
private double fielddataMaxFrequency;
|
||||
private int fielddataMinSegmentSize;
|
||||
private FielddataFrequencyFilter filter;
|
||||
private PrefixFieldType prefixFieldType;
|
||||
private boolean indexPhrases = false;
|
||||
|
||||
public TextFieldType(String name, FieldType indexedFieldType, SimilarityProvider similarity, NamedAnalyzer searchAnalyzer,
|
||||
NamedAnalyzer searchQuoteAnalyzer, Map<String, String> meta) {
|
||||
super(name, indexedFieldType.indexOptions() != IndexOptions.NONE, indexedFieldType.stored(), false,
|
||||
new TextSearchInfo(indexedFieldType, similarity, searchAnalyzer, searchQuoteAnalyzer), meta);
|
||||
public TextFieldType(String name, boolean indexed, boolean stored, TextSearchInfo tsi, Map<String, String> meta) {
|
||||
super(name, indexed, stored, false, tsi, meta);
|
||||
fielddata = false;
|
||||
fielddataMinFrequency = Defaults.FIELDDATA_MIN_FREQUENCY;
|
||||
fielddataMaxFrequency = Defaults.FIELDDATA_MAX_FREQUENCY;
|
||||
fielddataMinSegmentSize = Defaults.FIELDDATA_MIN_SEGMENT_SIZE;
|
||||
}
|
||||
|
||||
public TextFieldType(String name, boolean indexed, boolean stored, Map<String, String> meta) {
|
||||
@ -588,47 +642,42 @@ public class TextFieldMapper extends FieldMapper {
|
||||
}
|
||||
|
||||
public TextFieldType(String name) {
|
||||
this(name, Defaults.FIELD_TYPE, null, Lucene.STANDARD_ANALYZER, Lucene.STANDARD_ANALYZER, Collections.emptyMap());
|
||||
this(name, true, false,
|
||||
new TextSearchInfo(Defaults.FIELD_TYPE, null, Lucene.STANDARD_ANALYZER, Lucene.STANDARD_ANALYZER),
|
||||
Collections.emptyMap());
|
||||
}
|
||||
|
||||
public boolean fielddata() {
|
||||
return fielddata;
|
||||
}
|
||||
|
||||
public void setFielddata(boolean fielddata) {
|
||||
public void setFielddata(boolean fielddata, FielddataFrequencyFilter filter) {
|
||||
this.fielddata = fielddata;
|
||||
this.filter = filter;
|
||||
}
|
||||
|
||||
public double fielddataMinFrequency() {
|
||||
return fielddataMinFrequency;
|
||||
public void setFielddata(boolean fielddata) {
|
||||
this.setFielddata(fielddata, DEFAULT_FILTER);
|
||||
}
|
||||
|
||||
public void setFielddataMinFrequency(double fielddataMinFrequency) {
|
||||
this.fielddataMinFrequency = fielddataMinFrequency;
|
||||
double fielddataMinFrequency() {
|
||||
return filter.minFreq;
|
||||
}
|
||||
|
||||
public double fielddataMaxFrequency() {
|
||||
return fielddataMaxFrequency;
|
||||
double fielddataMaxFrequency() {
|
||||
return filter.maxFreq;
|
||||
}
|
||||
|
||||
public void setFielddataMaxFrequency(double fielddataMaxFrequency) {
|
||||
this.fielddataMaxFrequency = fielddataMaxFrequency;
|
||||
}
|
||||
|
||||
public int fielddataMinSegmentSize() {
|
||||
return fielddataMinSegmentSize;
|
||||
}
|
||||
|
||||
public void setFielddataMinSegmentSize(int fielddataMinSegmentSize) {
|
||||
this.fielddataMinSegmentSize = fielddataMinSegmentSize;
|
||||
int fielddataMinSegmentSize() {
|
||||
return filter.minSegmentSize;
|
||||
}
|
||||
|
||||
void setPrefixFieldType(PrefixFieldType prefixFieldType) {
|
||||
this.prefixFieldType = prefixFieldType;
|
||||
}
|
||||
|
||||
void setIndexPhrases(boolean indexPhrases) {
|
||||
this.indexPhrases = indexPhrases;
|
||||
void setIndexPhrases() {
|
||||
this.indexPhrases = true;
|
||||
}
|
||||
|
||||
public PrefixFieldType getPrefixFieldType() {
|
||||
@ -780,36 +829,35 @@ public class TextFieldMapper extends FieldMapper {
|
||||
}
|
||||
return new PagedBytesIndexFieldData.Builder(
|
||||
name(),
|
||||
fielddataMinFrequency,
|
||||
fielddataMaxFrequency,
|
||||
fielddataMinSegmentSize,
|
||||
filter.minFreq,
|
||||
filter.maxFreq,
|
||||
filter.minSegmentSize,
|
||||
CoreValuesSourceType.BYTES
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final int positionIncrementGap;
|
||||
private PrefixFieldMapper prefixFieldMapper;
|
||||
private PhraseFieldMapper phraseFieldMapper;
|
||||
private final Builder builder;
|
||||
private final FieldType fieldType;
|
||||
private final PrefixFieldMapper prefixFieldMapper;
|
||||
private final PhraseFieldMapper phraseFieldMapper;
|
||||
|
||||
protected TextFieldMapper(String simpleName, FieldType fieldType, TextFieldType mappedFieldType,
|
||||
int positionIncrementGap, PrefixFieldMapper prefixFieldMapper,
|
||||
PhraseFieldMapper phraseFieldMapper,
|
||||
MultiFields multiFields, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, mappedFieldType, multiFields, copyTo);
|
||||
assert fieldType.tokenized();
|
||||
protected TextFieldMapper(String simpleName, FieldType fieldType,
|
||||
TextFieldType mappedFieldType,
|
||||
PrefixFieldMapper prefixFieldMapper,
|
||||
PhraseFieldMapper phraseFieldMapper,
|
||||
MultiFields multiFields, CopyTo copyTo, Builder builder) {
|
||||
super(simpleName, mappedFieldType, multiFields, copyTo);
|
||||
assert mappedFieldType.getTextSearchInfo().isTokenized();
|
||||
assert mappedFieldType.hasDocValues() == false;
|
||||
if (fieldType.indexOptions() == IndexOptions.NONE && fieldType().fielddata()) {
|
||||
throw new IllegalArgumentException("Cannot enable fielddata on a [text] field that is not indexed: [" + name() + "]");
|
||||
}
|
||||
this.positionIncrementGap = positionIncrementGap;
|
||||
this.fieldType = fieldType;
|
||||
this.prefixFieldMapper = prefixFieldMapper;
|
||||
this.phraseFieldMapper = phraseFieldMapper;
|
||||
if (prefixFieldMapper != null) {
|
||||
mappedFieldType.setPrefixFieldType((PrefixFieldType)prefixFieldMapper.mappedFieldType);
|
||||
}
|
||||
mappedFieldType.setIndexPhrases(phraseFieldMapper != null);
|
||||
this.builder = builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -817,8 +865,9 @@ public class TextFieldMapper extends FieldMapper {
|
||||
return (TextFieldMapper) super.clone();
|
||||
}
|
||||
|
||||
public int getPositionIncrementGap() {
|
||||
return this.positionIncrementGap;
|
||||
@Override
|
||||
public ParametrizedFieldMapper.Builder getMergeBuilder() {
|
||||
return new Builder(simpleName(), builder.indexCreatedVersion, builder.analyzers.indexAnalyzer::getDefaultValue).init(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -869,92 +918,11 @@ public class TextFieldMapper extends FieldMapper {
|
||||
return CONTENT_TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void mergeOptions(FieldMapper other, List<String> conflicts) {
|
||||
TextFieldMapper mw = (TextFieldMapper) other;
|
||||
if (Objects.equals(mw.fieldType().getTextSearchInfo().getSimilarity(),
|
||||
this.fieldType().getTextSearchInfo().getSimilarity()) == false) {
|
||||
conflicts.add("mapper [" + name() + "] has different [similarity] settings");
|
||||
}
|
||||
if (mw.fieldType().indexPhrases != this.fieldType().indexPhrases) {
|
||||
conflicts.add("mapper [" + name() + "] has different [index_phrases] settings");
|
||||
}
|
||||
if (PrefixFieldType.canMerge(mw.fieldType().prefixFieldType, this.fieldType().prefixFieldType) == false) {
|
||||
conflicts.add("mapper [" + name() + "] has different [index_prefixes] settings");
|
||||
}
|
||||
if (this.prefixFieldMapper != null && mw.prefixFieldMapper != null) {
|
||||
this.prefixFieldMapper = (PrefixFieldMapper) this.prefixFieldMapper.merge(mw.prefixFieldMapper);
|
||||
}
|
||||
if (this.phraseFieldMapper != null && mw.phraseFieldMapper != null) {
|
||||
this.phraseFieldMapper = (PhraseFieldMapper) this.phraseFieldMapper.merge(mw.phraseFieldMapper);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TextFieldType fieldType() {
|
||||
return (TextFieldType) super.fieldType();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean docValuesByDefault() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
super.doXContentBody(builder, includeDefaults, params);
|
||||
if (fieldType.indexOptions() != IndexOptions.NONE
|
||||
&& (includeDefaults || fieldType.indexOptions() != Defaults.FIELD_TYPE.indexOptions())) {
|
||||
builder.field("index_options", indexOptionToString(fieldType.indexOptions()));
|
||||
}
|
||||
if (includeDefaults || fieldType.storeTermVectors() != Defaults.FIELD_TYPE.storeTermVectors()) {
|
||||
builder.field("term_vector", termVectorOptionsToString(fieldType));
|
||||
}
|
||||
if (includeDefaults || fieldType.omitNorms()) {
|
||||
builder.field("norms", fieldType.omitNorms() == false);
|
||||
}
|
||||
doXContentAnalyzers(builder, includeDefaults);
|
||||
if (fieldType().getTextSearchInfo().getSimilarity() != null) {
|
||||
builder.field("similarity", fieldType().getTextSearchInfo().getSimilarity().name());
|
||||
} else if (includeDefaults) {
|
||||
builder.field("similarity", SimilarityService.DEFAULT_SIMILARITY);
|
||||
}
|
||||
if (includeDefaults || fieldType().eagerGlobalOrdinals()) {
|
||||
builder.field("eager_global_ordinals", fieldType().eagerGlobalOrdinals());
|
||||
}
|
||||
if (positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) {
|
||||
builder.field("position_increment_gap", positionIncrementGap);
|
||||
}
|
||||
|
||||
if (includeDefaults || fieldType().fielddata() != false) {
|
||||
builder.field("fielddata", fieldType().fielddata());
|
||||
}
|
||||
if (fieldType().fielddata()) {
|
||||
if (includeDefaults
|
||||
|| fieldType().fielddataMinFrequency() != Defaults.FIELDDATA_MIN_FREQUENCY
|
||||
|| fieldType().fielddataMaxFrequency() != Defaults.FIELDDATA_MAX_FREQUENCY
|
||||
|| fieldType().fielddataMinSegmentSize() != Defaults.FIELDDATA_MIN_SEGMENT_SIZE) {
|
||||
builder.startObject("fielddata_frequency_filter");
|
||||
if (includeDefaults || fieldType().fielddataMinFrequency() != Defaults.FIELDDATA_MIN_FREQUENCY) {
|
||||
builder.field("min", fieldType().fielddataMinFrequency());
|
||||
}
|
||||
if (includeDefaults || fieldType().fielddataMaxFrequency() != Defaults.FIELDDATA_MAX_FREQUENCY) {
|
||||
builder.field("max", fieldType().fielddataMaxFrequency());
|
||||
}
|
||||
if (includeDefaults || fieldType().fielddataMinSegmentSize() != Defaults.FIELDDATA_MIN_SEGMENT_SIZE) {
|
||||
builder.field("min_segment_size", fieldType().fielddataMinSegmentSize());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
if (fieldType().prefixFieldType != null) {
|
||||
fieldType().prefixFieldType.doXContent(builder);
|
||||
}
|
||||
if (fieldType().indexPhrases) {
|
||||
builder.field("index_phrases", fieldType().indexPhrases);
|
||||
}
|
||||
}
|
||||
|
||||
public static Query createPhraseQuery(TokenStream stream, String field, int slop, boolean enablePositionIncrements) throws IOException {
|
||||
MultiPhraseQuery.Builder mpqb = new MultiPhraseQuery.Builder();
|
||||
mpqb.setSlop(slop);
|
||||
@ -1070,4 +1038,29 @@ public class TextFieldMapper extends FieldMapper {
|
||||
}
|
||||
return spanQuery.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
// this is a pain, but we have to do this to maintain BWC
|
||||
builder.field("type", contentType());
|
||||
this.builder.boost.toXContent(builder, includeDefaults);
|
||||
this.builder.index.toXContent(builder, includeDefaults);
|
||||
this.builder.store.toXContent(builder, includeDefaults);
|
||||
this.multiFields.toXContent(builder, params);
|
||||
this.copyTo.toXContent(builder, params);
|
||||
this.builder.meta.toXContent(builder, includeDefaults);
|
||||
this.builder.indexOptions.toXContent(builder, includeDefaults);
|
||||
this.builder.termVectors.toXContent(builder, includeDefaults);
|
||||
this.builder.norms.toXContent(builder, includeDefaults);
|
||||
this.builder.analyzers.indexAnalyzer.toXContent(builder, includeDefaults);
|
||||
this.builder.analyzers.searchAnalyzer.toXContent(builder, includeDefaults);
|
||||
this.builder.analyzers.searchQuoteAnalyzer.toXContent(builder, includeDefaults);
|
||||
this.builder.similarity.toXContent(builder, includeDefaults);
|
||||
this.builder.eagerGlobalOrdinals.toXContent(builder, includeDefaults);
|
||||
this.builder.positionIncrementGap.toXContent(builder, includeDefaults);
|
||||
this.builder.fieldData.toXContent(builder, includeDefaults);
|
||||
this.builder.freqFilter.toXContent(builder, includeDefaults);
|
||||
this.builder.indexPrefixes.toXContent(builder, includeDefaults);
|
||||
this.builder.indexPhrases.toXContent(builder, includeDefaults);
|
||||
}
|
||||
}
|
||||
|
@ -21,10 +21,12 @@ package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.elasticsearch.index.analysis.AnalysisMode;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.mapper.ParametrizedFieldMapper.Parameter;
|
||||
import org.elasticsearch.index.similarity.SimilarityProvider;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
@ -42,13 +44,19 @@ public final class TextParams {
|
||||
|
||||
public Analyzers(Supplier<NamedAnalyzer> defaultAnalyzer) {
|
||||
this.indexAnalyzer = Parameter.analyzerParam("analyzer", false,
|
||||
m -> m.fieldType().indexAnalyzer(), defaultAnalyzer);
|
||||
m -> m.fieldType().indexAnalyzer(), defaultAnalyzer)
|
||||
.setSerializerCheck((id, ic, a) -> id || ic ||
|
||||
Objects.equals(a, getSearchAnalyzer()) == false || Objects.equals(a, getSearchQuoteAnalyzer()) == false)
|
||||
.setValidator(a -> a.checkAllowedInMode(AnalysisMode.INDEX_TIME));
|
||||
this.searchAnalyzer
|
||||
= Parameter.analyzerParam("search_analyzer", true,
|
||||
m -> m.fieldType().getTextSearchInfo().getSearchAnalyzer(), indexAnalyzer::getValue);
|
||||
m -> m.fieldType().getTextSearchInfo().getSearchAnalyzer(), indexAnalyzer::getValue)
|
||||
.setSerializerCheck((id, ic, a) -> id || ic || Objects.equals(a, getSearchQuoteAnalyzer()) == false)
|
||||
.setValidator(a -> a.checkAllowedInMode(AnalysisMode.SEARCH_TIME));
|
||||
this.searchQuoteAnalyzer
|
||||
= Parameter.analyzerParam("search_quote_analyzer", true,
|
||||
m -> m.fieldType().getTextSearchInfo().getSearchQuoteAnalyzer(), searchAnalyzer::getValue);
|
||||
m -> m.fieldType().getTextSearchInfo().getSearchQuoteAnalyzer(), searchAnalyzer::getValue)
|
||||
.setValidator(a -> a.checkAllowedInMode(AnalysisMode.SEARCH_TIME));
|
||||
}
|
||||
|
||||
public NamedAnalyzer getIndexAnalyzer() {
|
||||
@ -81,6 +89,20 @@ public final class TextParams {
|
||||
"positions", "docs", "freqs", "offsets");
|
||||
}
|
||||
|
||||
public static FieldType buildFieldType(Supplier<Boolean> indexed,
|
||||
Supplier<Boolean> stored,
|
||||
Supplier<String> indexOptions,
|
||||
Supplier<Boolean> norms,
|
||||
Supplier<String> termVectors) {
|
||||
FieldType ft = new FieldType();
|
||||
ft.setStored(stored.get());
|
||||
ft.setTokenized(true);
|
||||
ft.setIndexOptions(toIndexOptions(indexed.get(), indexOptions.get()));
|
||||
ft.setOmitNorms(norms.get() == false);
|
||||
setTermVectorParams(termVectors.get(), ft);
|
||||
return ft;
|
||||
}
|
||||
|
||||
public static IndexOptions toIndexOptions(boolean indexed, String indexOptions) {
|
||||
if (indexed == false) {
|
||||
return IndexOptions.NONE;
|
||||
@ -122,6 +144,9 @@ public final class TextParams {
|
||||
fieldType.setStoreTermVectorPositions(true);
|
||||
return;
|
||||
case "with_offsets":
|
||||
fieldType.setStoreTermVectors(true);
|
||||
fieldType.setStoreTermVectorOffsets(true);
|
||||
return;
|
||||
case "with_positions_offsets":
|
||||
fieldType.setStoreTermVectors(true);
|
||||
fieldType.setStoreTermVectorPositions(true);
|
||||
|
@ -24,8 +24,6 @@ import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.time.DateFormatter;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.analysis.AnalysisMode;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.similarity.SimilarityProvider;
|
||||
|
||||
import java.util.ArrayList;
|
||||
@ -49,125 +47,6 @@ public class TypeParsers {
|
||||
public static final String INDEX_OPTIONS_POSITIONS = "positions";
|
||||
public static final String INDEX_OPTIONS_OFFSETS = "offsets";
|
||||
|
||||
private static void parseAnalyzersAndTermVectors(FieldMapper.Builder builder, String name, Map<String, Object> fieldNode,
|
||||
Mapper.TypeParser.ParserContext parserContext) {
|
||||
NamedAnalyzer indexAnalyzer = null;
|
||||
NamedAnalyzer searchAnalyzer = null;
|
||||
NamedAnalyzer searchQuoteAnalyzer = null;
|
||||
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
final String propName = entry.getKey();
|
||||
final Object propNode = entry.getValue();
|
||||
if (propName.equals("term_vector")) {
|
||||
parseTermVector(name, propNode.toString(), builder);
|
||||
iterator.remove();
|
||||
} else if (propName.equals("store_term_vectors")) {
|
||||
builder.storeTermVectors(XContentMapValues.nodeBooleanValue(propNode, name + ".store_term_vectors"));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("store_term_vector_offsets")) {
|
||||
builder.storeTermVectorOffsets(XContentMapValues.nodeBooleanValue(propNode, name + ".store_term_vector_offsets"));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("store_term_vector_positions")) {
|
||||
builder.storeTermVectorPositions(XContentMapValues.nodeBooleanValue(propNode, name + ".store_term_vector_positions"));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("store_term_vector_payloads")) {
|
||||
builder.storeTermVectorPayloads(XContentMapValues.nodeBooleanValue(propNode, name + ".store_term_vector_payloads"));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("analyzer")) {
|
||||
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
|
||||
if (analyzer == null) {
|
||||
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
|
||||
}
|
||||
indexAnalyzer = analyzer;
|
||||
iterator.remove();
|
||||
} else if (propName.equals("search_analyzer")) {
|
||||
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
|
||||
if (analyzer == null) {
|
||||
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
|
||||
}
|
||||
analyzer.checkAllowedInMode(AnalysisMode.SEARCH_TIME);
|
||||
searchAnalyzer = analyzer;
|
||||
iterator.remove();
|
||||
} else if (propName.equals("search_quote_analyzer")) {
|
||||
NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString());
|
||||
if (analyzer == null) {
|
||||
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
|
||||
}
|
||||
analyzer.checkAllowedInMode(AnalysisMode.SEARCH_TIME);
|
||||
searchQuoteAnalyzer = analyzer;
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// check analyzers are allowed to work in the respective AnalysisMode
|
||||
{
|
||||
if (indexAnalyzer != null) {
|
||||
if (searchAnalyzer == null) {
|
||||
indexAnalyzer.checkAllowedInMode(AnalysisMode.ALL);
|
||||
} else {
|
||||
indexAnalyzer.checkAllowedInMode(AnalysisMode.INDEX_TIME);
|
||||
}
|
||||
}
|
||||
if (searchAnalyzer != null) {
|
||||
searchAnalyzer.checkAllowedInMode(AnalysisMode.SEARCH_TIME);
|
||||
}
|
||||
if (searchQuoteAnalyzer != null) {
|
||||
searchQuoteAnalyzer.checkAllowedInMode(AnalysisMode.SEARCH_TIME);
|
||||
}
|
||||
}
|
||||
|
||||
if (indexAnalyzer == null && searchAnalyzer != null) {
|
||||
throw new MapperParsingException("analyzer on field [" + name + "] must be set when search_analyzer is set");
|
||||
}
|
||||
|
||||
if (searchAnalyzer == null && searchQuoteAnalyzer != null) {
|
||||
throw new MapperParsingException("analyzer and search_analyzer on field [" + name +
|
||||
"] must be set when search_quote_analyzer is set");
|
||||
}
|
||||
|
||||
if (searchAnalyzer == null) {
|
||||
searchAnalyzer = indexAnalyzer;
|
||||
}
|
||||
|
||||
if (searchQuoteAnalyzer == null) {
|
||||
searchQuoteAnalyzer = searchAnalyzer;
|
||||
}
|
||||
|
||||
if (indexAnalyzer != null) {
|
||||
builder.indexAnalyzer(indexAnalyzer);
|
||||
}
|
||||
if (searchAnalyzer != null) {
|
||||
builder.searchAnalyzer(searchAnalyzer);
|
||||
}
|
||||
if (searchQuoteAnalyzer != null) {
|
||||
builder.searchQuoteAnalyzer(searchQuoteAnalyzer);
|
||||
}
|
||||
}
|
||||
|
||||
public static void parseNorms(FieldMapper.Builder<?> builder, String fieldName, Object propNode) {
|
||||
builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, fieldName + ".norms") == false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse text field attributes. In addition to {@link #parseField common attributes}
|
||||
* this will parse analysis and term-vectors related settings.
|
||||
*/
|
||||
public static void parseTextField(FieldMapper.Builder<?> builder, String name, Map<String, Object> fieldNode,
|
||||
Mapper.TypeParser.ParserContext parserContext) {
|
||||
parseField(builder, name, fieldNode, parserContext);
|
||||
parseAnalyzersAndTermVectors(builder, name, fieldNode, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = fieldNode.entrySet().iterator(); iterator.hasNext(); ) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
final String propName = entry.getKey();
|
||||
final Object propNode = entry.getValue();
|
||||
if ("norms".equals(propName)) {
|
||||
parseNorms(builder, name, propNode);
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void checkNull(String propName, Object propNode) {
|
||||
if (false == propName.equals("null_value") && propNode == null) {
|
||||
/*
|
||||
@ -219,8 +98,6 @@ public class TypeParsers {
|
||||
return Collections.unmodifiableMap(sortedMeta);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Parse common field attributes such as {@code doc_values} or {@code store}.
|
||||
*/
|
||||
@ -358,30 +235,6 @@ public class TypeParsers {
|
||||
throw new IllegalArgumentException("Invalid format: [" + node.toString() + "]: expected string value");
|
||||
}
|
||||
|
||||
public static void parseTermVector(String fieldName, String termVector, FieldMapper.Builder builder) throws MapperParsingException {
|
||||
if ("no".equals(termVector)) {
|
||||
builder.storeTermVectors(false);
|
||||
} else if ("yes".equals(termVector)) {
|
||||
builder.storeTermVectors(true);
|
||||
} else if ("with_offsets".equals(termVector)) {
|
||||
builder.storeTermVectorOffsets(true);
|
||||
} else if ("with_positions".equals(termVector)) {
|
||||
builder.storeTermVectorPositions(true);
|
||||
} else if ("with_positions_offsets".equals(termVector)) {
|
||||
builder.storeTermVectorPositions(true);
|
||||
builder.storeTermVectorOffsets(true);
|
||||
} else if ("with_positions_payloads".equals(termVector)) {
|
||||
builder.storeTermVectorPositions(true);
|
||||
builder.storeTermVectorPayloads(true);
|
||||
} else if ("with_positions_offsets_payloads".equals(termVector)) {
|
||||
builder.storeTermVectorPositions(true);
|
||||
builder.storeTermVectorOffsets(true);
|
||||
builder.storeTermVectorPayloads(true);
|
||||
} else {
|
||||
throw new MapperParsingException("wrong value for termVector [" + termVector + "] for field [" + fieldName + "]");
|
||||
}
|
||||
}
|
||||
|
||||
public static List<String> parseCopyFields(Object propNode) {
|
||||
List<String> copyFields = new ArrayList<>();
|
||||
if (isArray(propNode)) {
|
||||
|
@ -296,7 +296,8 @@ public class QueryShardContext extends QueryRewriteContext {
|
||||
if (fieldMapping != null || allowUnmappedFields) {
|
||||
return fieldMapping;
|
||||
} else if (mapUnmappedFieldAsString) {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name);
|
||||
TextFieldMapper.Builder builder
|
||||
= new TextFieldMapper.Builder(name, () -> mapperService.getIndexAnalyzers().getDefaultIndexAnalyzer());
|
||||
return builder.build(new Mapper.BuilderContext(indexSettings.getSettings(), new ContentPath(1))).fieldType();
|
||||
} else {
|
||||
throw new QueryShardException(this, "No field mapping can be found for the field with name [{}]", name);
|
||||
|
@ -125,7 +125,7 @@ public class IndicesModule extends AbstractModule {
|
||||
DateFieldMapper.Resolution nanoseconds = DateFieldMapper.Resolution.NANOSECONDS;
|
||||
mappers.put(nanoseconds.type(), DateFieldMapper.NANOS_PARSER);
|
||||
mappers.put(IpFieldMapper.CONTENT_TYPE, IpFieldMapper.PARSER);
|
||||
mappers.put(TextFieldMapper.CONTENT_TYPE, new TextFieldMapper.TypeParser());
|
||||
mappers.put(TextFieldMapper.CONTENT_TYPE, TextFieldMapper.PARSER);
|
||||
mappers.put(KeywordFieldMapper.CONTENT_TYPE, KeywordFieldMapper.PARSER);
|
||||
mappers.put(ObjectMapper.CONTENT_TYPE, new ObjectMapper.TypeParser());
|
||||
mappers.put(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser());
|
||||
|
@ -44,10 +44,6 @@ import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TypeParsers;
|
||||
import org.elasticsearch.rest.action.document.RestTermVectorsAction;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.StreamsUtils;
|
||||
@ -208,7 +204,7 @@ public class TermVectorsUnitTests extends ESTestCase {
|
||||
|
||||
}
|
||||
|
||||
public void testRequestParsingThrowsException() throws Exception {
|
||||
public void testRequestParsingThrowsException() {
|
||||
BytesReference inputBytes = new BytesArray(
|
||||
" {\"fields\" : \"a, b,c \", \"offsets\":false, \"positions\":false, \"payloads\":true, \"meaningless_term\":2}");
|
||||
TermVectorsRequest tvr = new TermVectorsRequest(null, null, null);
|
||||
@ -257,35 +253,6 @@ public class TermVectorsUnitTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testFieldTypeToTermVectorString() throws Exception {
|
||||
FieldType ft = new FieldType();
|
||||
ft.setStoreTermVectorOffsets(false);
|
||||
ft.setStoreTermVectorPayloads(true);
|
||||
ft.setStoreTermVectors(true);
|
||||
ft.setStoreTermVectorPositions(true);
|
||||
String ftOpts = FieldMapper.termVectorOptionsToString(ft);
|
||||
assertThat("with_positions_payloads", equalTo(ftOpts));
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(null);
|
||||
boolean exceptionThrown = false;
|
||||
try {
|
||||
TypeParsers.parseTermVector("", ftOpts, builder);
|
||||
} catch (MapperParsingException e) {
|
||||
exceptionThrown = true;
|
||||
}
|
||||
assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.",
|
||||
exceptionThrown, equalTo(false));
|
||||
}
|
||||
|
||||
public void testTermVectorStringGenerationWithoutPositions() throws Exception {
|
||||
FieldType ft = new FieldType();
|
||||
ft.setStoreTermVectorOffsets(true);
|
||||
ft.setStoreTermVectorPayloads(true);
|
||||
ft.setStoreTermVectors(true);
|
||||
ft.setStoreTermVectorPositions(false);
|
||||
String ftOpts = FieldMapper.termVectorOptionsToString(ft);
|
||||
assertThat(ftOpts, equalTo("with_offsets"));
|
||||
}
|
||||
|
||||
public void testMultiParser() throws Exception {
|
||||
byte[] bytes = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest1.json");
|
||||
XContentParser data = createParser(JsonXContent.jsonXContent, bytes);
|
||||
|
@ -198,7 +198,7 @@ public class MetadataIndexTemplateServiceTests extends ESSingleNodeTestCase {
|
||||
List<Throwable> errors = putTemplateDetail(request);
|
||||
assertThat(errors.size(), equalTo(1));
|
||||
assertThat(errors.get(0), instanceOf(MapperParsingException.class));
|
||||
assertThat(errors.get(0).getMessage(), containsString("analyzer [custom_1] not found for field [field2]"));
|
||||
assertThat(errors.get(0).getMessage(), containsString("analyzer [custom_1] has not been configured in mappings"));
|
||||
}
|
||||
|
||||
public void testBrokenMapping() throws Exception {
|
||||
|
@ -31,6 +31,7 @@ import org.apache.lucene.index.LogByteSizeMergePolicy;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.store.ByteBuffersDirectory;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
@ -99,7 +100,8 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase {
|
||||
if (docValues) {
|
||||
fieldType = new KeywordFieldMapper.Builder(fieldName).build(context).fieldType();
|
||||
} else {
|
||||
fieldType = new TextFieldMapper.Builder(fieldName).fielddata(true).build(context).fieldType();
|
||||
fieldType = new TextFieldMapper.Builder(fieldName, () -> Lucene.STANDARD_ANALYZER)
|
||||
.fielddata(true).build(context).fieldType();
|
||||
}
|
||||
} else if (type.equals("float")) {
|
||||
fieldType = new NumberFieldMapper.Builder(fieldName, NumberFieldMapper.NumberType.FLOAT, false, true)
|
||||
|
@ -23,6 +23,7 @@ import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
@ -64,7 +65,7 @@ public class FilterFieldDataTests extends AbstractFieldDataTestCase {
|
||||
|
||||
{
|
||||
indexService.clearCaches(false, true);
|
||||
MappedFieldType ft = new TextFieldMapper.Builder("high_freq")
|
||||
MappedFieldType ft = new TextFieldMapper.Builder("high_freq", () -> Lucene.STANDARD_ANALYZER)
|
||||
.fielddata(true)
|
||||
.fielddataFrequencyFilter(0, random.nextBoolean() ? 100 : 0.5d, 0)
|
||||
.build(builderCtx).fieldType();
|
||||
@ -79,7 +80,7 @@ public class FilterFieldDataTests extends AbstractFieldDataTestCase {
|
||||
}
|
||||
{
|
||||
indexService.clearCaches(false, true);
|
||||
MappedFieldType ft = new TextFieldMapper.Builder("high_freq")
|
||||
MappedFieldType ft = new TextFieldMapper.Builder("high_freq", () -> Lucene.STANDARD_ANALYZER)
|
||||
.fielddata(true)
|
||||
.fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d/200.0d, 201, 100)
|
||||
.build(builderCtx).fieldType();
|
||||
@ -94,7 +95,7 @@ public class FilterFieldDataTests extends AbstractFieldDataTestCase {
|
||||
|
||||
{
|
||||
indexService.clearCaches(false, true);// test # docs with value
|
||||
MappedFieldType ft = new TextFieldMapper.Builder("med_freq")
|
||||
MappedFieldType ft = new TextFieldMapper.Builder("med_freq", () -> Lucene.STANDARD_ANALYZER)
|
||||
.fielddata(true)
|
||||
.fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d/200.0d, Integer.MAX_VALUE, 101)
|
||||
.build(builderCtx).fieldType();
|
||||
@ -110,7 +111,7 @@ public class FilterFieldDataTests extends AbstractFieldDataTestCase {
|
||||
|
||||
{
|
||||
indexService.clearCaches(false, true);
|
||||
MappedFieldType ft = new TextFieldMapper.Builder("med_freq")
|
||||
MappedFieldType ft = new TextFieldMapper.Builder("med_freq", () -> Lucene.STANDARD_ANALYZER)
|
||||
.fielddata(true)
|
||||
.fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d/200.0d, Integer.MAX_VALUE, 101)
|
||||
.build(builderCtx).fieldType();
|
||||
|
@ -31,6 +31,7 @@ import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.store.ByteBuffersDirectory;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
@ -141,8 +142,10 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
||||
indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
|
||||
|
||||
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
final MappedFieldType mapper1 = new TextFieldMapper.Builder("field_1").fielddata(true).build(ctx).fieldType();
|
||||
final MappedFieldType mapper2 = new TextFieldMapper.Builder("field_2").fielddata(true).build(ctx).fieldType();
|
||||
final MappedFieldType mapper1
|
||||
= new TextFieldMapper.Builder("field_1", () -> Lucene.STANDARD_ANALYZER).fielddata(true).build(ctx).fieldType();
|
||||
final MappedFieldType mapper2
|
||||
= new TextFieldMapper.Builder("field_2", () -> Lucene.STANDARD_ANALYZER).fielddata(true).build(ctx).fieldType();
|
||||
final IndexWriter writer = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("field_1", "thisisastring", Store.NO));
|
||||
@ -205,7 +208,8 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
||||
indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
|
||||
|
||||
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
final MappedFieldType mapper1 = new TextFieldMapper.Builder("s").fielddata(true).build(ctx).fieldType();
|
||||
final MappedFieldType mapper1
|
||||
= new TextFieldMapper.Builder("s", () -> Lucene.STANDARD_ANALYZER).fielddata(true).build(ctx).fieldType();
|
||||
final IndexWriter writer = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("s", "thisisastring", Store.NO));
|
||||
|
@ -612,7 +612,7 @@ public class CopyToMapperTests extends MapperServiceTestCase {
|
||||
}
|
||||
|
||||
public void testCopyFromMultiField() {
|
||||
MapperParsingException e = expectThrows(MapperParsingException.class, () -> createDocumentMapper(fieldMapping(b -> {
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> createDocumentMapper(fieldMapping(b -> {
|
||||
b.field("type", "keyword");
|
||||
b.startObject("fields");
|
||||
{
|
||||
@ -626,7 +626,6 @@ public class CopyToMapperTests extends MapperServiceTestCase {
|
||||
b.endObject();
|
||||
})));
|
||||
assertThat(e.getMessage(),
|
||||
Matchers.containsString("copy_to in multi fields is not allowed. Found the copy_to in field [bar] " +
|
||||
"which is within a multi field."));
|
||||
Matchers.containsString("[copy_to] may not be used to copy from a multi-field: [field.bar]"));
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.builders.PointBuilder;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.geometry.Point;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
@ -63,7 +64,7 @@ public class ExternalMapper extends ParametrizedFieldMapper {
|
||||
|
||||
public Builder(String name, String generatedValue, String mapperName) {
|
||||
super(name);
|
||||
this.stringBuilder = new TextFieldMapper.Builder(name).store(false);
|
||||
this.stringBuilder = new TextFieldMapper.Builder(name, () -> Lucene.STANDARD_ANALYZER).store(false);
|
||||
this.generatedValue = generatedValue;
|
||||
this.mapperName = mapperName;
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ public class ExternalMapperPlugin extends Plugin implements MapperPlugin {
|
||||
mappers.put(EXTERNAL, ExternalMapper.parser(EXTERNAL, "foo"));
|
||||
mappers.put(EXTERNAL_BIS, ExternalMapper.parser(EXTERNAL_BIS, "bar"));
|
||||
mappers.put(EXTERNAL_UPPER, ExternalMapper.parser(EXTERNAL_UPPER, "FOO BAR"));
|
||||
mappers.put(FakeStringFieldMapper.CONTENT_TYPE, new FakeStringFieldMapper.TypeParser());
|
||||
mappers.put(FakeStringFieldMapper.CONTENT_TYPE, FakeStringFieldMapper.PARSER);
|
||||
return Collections.unmodifiableMap(mappers);
|
||||
}
|
||||
|
||||
|
@ -21,65 +21,48 @@ package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.mapper.TypeParsers.parseTextField;
|
||||
|
||||
// Like a String mapper but with very few options. We just use it to test if highlighting on a custom string mapped field works as expected.
|
||||
public class FakeStringFieldMapper extends FieldMapper {
|
||||
public class FakeStringFieldMapper extends ParametrizedFieldMapper {
|
||||
|
||||
public static final String CONTENT_TYPE = "fake_string";
|
||||
|
||||
public static final FieldType FIELD_TYPE = new FieldType();
|
||||
static {
|
||||
FIELD_TYPE.setTokenized(true);
|
||||
FIELD_TYPE.setStored(true);
|
||||
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
|
||||
}
|
||||
|
||||
public static class Builder extends FieldMapper.Builder<Builder> {
|
||||
public static class Builder extends ParametrizedFieldMapper.Builder {
|
||||
|
||||
public Builder(String name) {
|
||||
super(name, FIELD_TYPE);
|
||||
super(name);
|
||||
builder = this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Builder index(boolean index) {
|
||||
throw new UnsupportedOperationException();
|
||||
protected List<Parameter<?>> getParameters() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FakeStringFieldMapper build(BuilderContext context) {
|
||||
return new FakeStringFieldMapper(
|
||||
fieldType,
|
||||
new FakeStringFieldType(name, fieldType.stored(),
|
||||
new TextSearchInfo(fieldType, null, Lucene.STANDARD_ANALYZER, Lucene.STANDARD_ANALYZER)),
|
||||
multiFieldsBuilder.build(this, context), copyTo);
|
||||
new FakeStringFieldType(name, true,
|
||||
new TextSearchInfo(FIELD_TYPE, null, Lucene.STANDARD_ANALYZER, Lucene.STANDARD_ANALYZER)),
|
||||
multiFieldsBuilder.build(this, context), copyTo.build());
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
|
||||
public TypeParser() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mapper.Builder<?> parse(String fieldName, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
FakeStringFieldMapper.Builder builder = new FakeStringFieldMapper.Builder(fieldName);
|
||||
parseTextField(builder, fieldName, node, parserContext);
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
public static TypeParser PARSER = new TypeParser((n, c) -> new Builder(n));
|
||||
|
||||
public static final class FakeStringFieldType extends StringFieldType {
|
||||
|
||||
@ -104,9 +87,9 @@ public class FakeStringFieldMapper extends FieldMapper {
|
||||
}
|
||||
}
|
||||
|
||||
protected FakeStringFieldMapper(FieldType fieldType, MappedFieldType mappedFieldType,
|
||||
protected FakeStringFieldMapper(MappedFieldType mappedFieldType,
|
||||
MultiFields multiFields, CopyTo copyTo) {
|
||||
super(mappedFieldType.name(), fieldType, mappedFieldType, multiFields, copyTo);
|
||||
super(mappedFieldType.name(), mappedFieldType, multiFields, copyTo);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -122,18 +105,8 @@ public class FakeStringFieldMapper extends FieldMapper {
|
||||
return;
|
||||
}
|
||||
|
||||
if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) {
|
||||
Field field = new Field(fieldType().name(), value, fieldType);
|
||||
context.doc().add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
context.doc().add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(value)));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void mergeOptions(FieldMapper other, List<String> conflicts) {
|
||||
|
||||
Field field = new Field(fieldType().name(), value, FIELD_TYPE);
|
||||
context.doc().add(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -142,13 +115,7 @@ public class FakeStringFieldMapper extends FieldMapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public FakeStringFieldType fieldType() {
|
||||
return (FakeStringFieldType) super.fieldType();
|
||||
public ParametrizedFieldMapper.Builder getMergeBuilder() {
|
||||
return new Builder(simpleName()).init(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
|
||||
super.doXContentBody(builder, includeDefaults, params);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ public class GeoPointFieldMapperTests extends FieldMapperTestCase2<GeoPointField
|
||||
b.startObject("fields");
|
||||
{
|
||||
b.startObject("geohash").field("type", "keyword").field("doc_values", false).endObject(); // test geohash as keyword
|
||||
b.startObject("latlon").field("type", "text").field("doc_values", false).endObject(); // test geohash as text
|
||||
b.startObject("latlon").field("type", "text").endObject(); // test geohash as text
|
||||
}
|
||||
b.endObject();
|
||||
}));
|
||||
|
@ -138,8 +138,8 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase {
|
||||
mapperService.merge("person", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE);
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("mapper [name] has different [index] values"));
|
||||
assertThat(e.getMessage(), containsString("mapper [name] has different [store] values"));
|
||||
assertThat(e.getMessage(), containsString("Cannot update parameter [index] from [true] to [false]"));
|
||||
assertThat(e.getMessage(), containsString("Cannot update parameter [store] from [true] to [false]"));
|
||||
}
|
||||
|
||||
// There are conflicts, so the `name.not_indexed3` has not been added
|
||||
|
@ -20,55 +20,32 @@
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.MapperTestUtils;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.core.IsEqual.equalTo;
|
||||
|
||||
public class MultiFieldCopyToMapperTests extends ESTestCase {
|
||||
public class MultiFieldCopyToMapperTests extends MapperServiceTestCase {
|
||||
|
||||
public void testExceptionForCopyToInMultiFields() throws IOException {
|
||||
XContentBuilder mapping = createMappinmgWithCopyToInMultiField();
|
||||
public void testExceptionForCopyToInMultiFields() {
|
||||
|
||||
// first check that for newer versions we throw exception if copy_to is found within multi field
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "test");
|
||||
try {
|
||||
mapperService.parse("type", new CompressedXContent(Strings.toString(mapping)), true);
|
||||
fail("Parsing should throw an exception because the mapping contains a copy_to in a multi field");
|
||||
} catch (MapperParsingException e) {
|
||||
assertThat(e.getMessage(), equalTo("copy_to in multi fields is not allowed. Found the copy_to in field [c]"
|
||||
+ " which is within a multi field."));
|
||||
}
|
||||
}
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> createMapperService(mapping(b -> {
|
||||
b.startObject("a").field("type", "text").endObject();
|
||||
b.startObject("b");
|
||||
{
|
||||
b.field("type", "text");
|
||||
b.startObject("fields");
|
||||
{
|
||||
b.startObject("subfield");
|
||||
{
|
||||
b.field("type", "text");
|
||||
b.field("copy_to", "a");
|
||||
}
|
||||
b.endObject();
|
||||
}
|
||||
b.endObject();
|
||||
}
|
||||
b.endObject();
|
||||
})));
|
||||
|
||||
private static XContentBuilder createMappinmgWithCopyToInMultiField() throws IOException {
|
||||
XContentBuilder mapping = jsonBuilder();
|
||||
mapping.startObject()
|
||||
.startObject("type")
|
||||
.startObject("properties")
|
||||
.startObject("a")
|
||||
.field("type", "text")
|
||||
.endObject()
|
||||
.startObject("b")
|
||||
.field("type", "text")
|
||||
.startObject("fields")
|
||||
.startObject("c")
|
||||
.field("type", "text")
|
||||
.field("copy_to", "a")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
return mapping;
|
||||
assertThat(e.getMessage(), equalTo("[copy_to] may not be used to copy from a multi-field: [b.subfield]"));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -26,11 +26,13 @@ import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
@ -40,6 +42,7 @@ import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
@ -127,11 +130,12 @@ public class MultiFieldTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testBuildThenParse() throws Exception {
|
||||
IndexService indexService = createIndex("test");
|
||||
Supplier<NamedAnalyzer> a = () -> Lucene.STANDARD_ANALYZER;
|
||||
|
||||
DocumentMapper builderDocMapper = new DocumentMapper.Builder(new RootObjectMapper.Builder("person").add(
|
||||
new TextFieldMapper.Builder("name").store(true)
|
||||
.addMultiField(new TextFieldMapper.Builder("indexed").index(true))
|
||||
.addMultiField(new TextFieldMapper.Builder("not_indexed").index(false).store(true))
|
||||
new TextFieldMapper.Builder("name", a).store(true)
|
||||
.addMultiField(new TextFieldMapper.Builder("indexed", a).index(true))
|
||||
.addMultiField(new TextFieldMapper.Builder("not_indexed", a).index(false).store(true))
|
||||
), indexService.mapperService()).build(indexService.mapperService());
|
||||
|
||||
String builtMapping = builderDocMapper.mappingSource().string();
|
||||
|
@ -20,10 +20,8 @@ package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.FieldMapper.CopyTo;
|
||||
import org.elasticsearch.index.mapper.FieldMapper.MultiFields;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
|
||||
@ -154,8 +152,8 @@ public class ObjectMapperMergeTests extends ESTestCase {
|
||||
}
|
||||
|
||||
private static TextFieldMapper createTextFieldMapper(String name) {
|
||||
final TextFieldType fieldType = new TextFieldType(name);
|
||||
return new TextFieldMapper(name, TextFieldMapper.Defaults.FIELD_TYPE, fieldType, -1,
|
||||
null, null, MultiFields.empty(), CopyTo.empty());
|
||||
final Settings indexSettings = Settings.builder().put(SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
final Mapper.BuilderContext context = new Mapper.BuilderContext(indexSettings, new ContentPath());
|
||||
return new TextFieldMapper.Builder(name, () -> Lucene.STANDARD_ANALYZER).build(context);
|
||||
}
|
||||
}
|
||||
|
@ -128,8 +128,8 @@ public class ParametrizedMapperTests extends MapperServiceTestCase {
|
||||
protected Builder(String name) {
|
||||
super(name);
|
||||
// only output search analyzer if different to analyzer
|
||||
searchAnalyzer.setShouldSerialize(
|
||||
() -> Objects.equals(analyzer.getValue().name(), searchAnalyzer.getValue().name()) == false);
|
||||
searchAnalyzer.setSerializerCheck(
|
||||
(id, ic, v) -> Objects.equals(analyzer.getValue().name(), searchAnalyzer.getValue().name()) == false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -368,7 +368,7 @@ public class RootObjectMapperTests extends ESSingleNodeTestCase {
|
||||
DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(Strings.toString(mapping)), MergeReason.MAPPING_UPDATE);
|
||||
assertThat(mapper.mappingSource().toString(), containsString("\"analyzer\":\"foobar\""));
|
||||
assertWarnings("dynamic template [my_template] has invalid content [{\"match_mapping_type\":\"string\",\"mapping\":{" +
|
||||
"\"analyzer\":\"foobar\",\"type\":\"text\"}}], caused by [analyzer [foobar] not found for field [__dynamic__my_template]]");
|
||||
"\"analyzer\":\"foobar\",\"type\":\"text\"}}], caused by [analyzer [foobar] has not been configured in mappings]");
|
||||
}
|
||||
|
||||
public void testIllegalDynamicTemplateNoMappingType() throws Exception {
|
||||
|
@ -0,0 +1,197 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.AnalysisMode;
|
||||
import org.elasticsearch.index.analysis.AnalyzerScope;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_ANALYZER_NAME;
|
||||
import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_ANALYZER_NAME;
|
||||
import static org.elasticsearch.index.analysis.AnalysisRegistry.DEFAULT_SEARCH_QUOTED_ANALYZER_NAME;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class TextFieldAnalyzerModeTests extends ESTestCase {
|
||||
|
||||
private static Map<String, NamedAnalyzer> defaultAnalyzers() {
|
||||
Map<String, NamedAnalyzer> analyzers = new HashMap<>();
|
||||
analyzers.put(DEFAULT_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null));
|
||||
analyzers.put(DEFAULT_SEARCH_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null));
|
||||
analyzers.put(DEFAULT_SEARCH_QUOTED_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null));
|
||||
return analyzers;
|
||||
}
|
||||
|
||||
private static final IndexMetadata EMPTY_INDEX_METADATA = IndexMetadata.builder("")
|
||||
.settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
private static final IndexSettings indexSettings = new IndexSettings(EMPTY_INDEX_METADATA, Settings.EMPTY);
|
||||
|
||||
|
||||
private Analyzer createAnalyzerWithMode(AnalysisMode mode) {
|
||||
TokenFilterFactory tokenFilter = new AbstractTokenFilterFactory(indexSettings, "my_analyzer", Settings.EMPTY) {
|
||||
@Override
|
||||
public AnalysisMode getAnalysisMode() {
|
||||
return mode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
return new CustomAnalyzer(null, new CharFilterFactory[0],
|
||||
new TokenFilterFactory[] { tokenFilter });
|
||||
}
|
||||
|
||||
public void testParseTextFieldCheckAnalyzerAnalysisMode() {
|
||||
|
||||
Map<String, Object> fieldNode = new HashMap<>();
|
||||
fieldNode.put("analyzer", "my_analyzer");
|
||||
Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class);
|
||||
|
||||
// check AnalysisMode.ALL works
|
||||
Map<String, NamedAnalyzer> analyzers = defaultAnalyzers();
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode(AnalysisMode.ALL)));
|
||||
|
||||
IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField", () -> Lucene.STANDARD_ANALYZER);
|
||||
builder.parse("field", parserContext, fieldNode);
|
||||
|
||||
// check that "analyzer" set to something that only supports AnalysisMode.SEARCH_TIME or AnalysisMode.INDEX_TIME is blocked
|
||||
AnalysisMode mode = randomFrom(AnalysisMode.SEARCH_TIME, AnalysisMode.INDEX_TIME);
|
||||
analyzers = defaultAnalyzers();
|
||||
analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX,
|
||||
createAnalyzerWithMode(mode)));
|
||||
indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
fieldNode.put("analyzer", "my_analyzer");
|
||||
MapperException ex = expectThrows(MapperException.class, () -> {
|
||||
TextFieldMapper.Builder bad = new TextFieldMapper.Builder("textField", () -> Lucene.STANDARD_ANALYZER);
|
||||
bad.parse("name", parserContext, fieldNode);
|
||||
});
|
||||
assertThat(ex.getMessage(),
|
||||
containsString("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run"));
|
||||
}
|
||||
|
||||
public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() {
|
||||
|
||||
for (String settingToTest : new String[] { "search_analyzer", "search_quote_analyzer" }) {
|
||||
Map<String, Object> fieldNode = new HashMap<>();
|
||||
fieldNode.put(settingToTest, "my_analyzer");
|
||||
fieldNode.put("analyzer", "standard");
|
||||
if (settingToTest.equals("search_quote_analyzer")) {
|
||||
fieldNode.put("search_analyzer", "standard");
|
||||
}
|
||||
Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class);
|
||||
|
||||
// check AnalysisMode.ALL and AnalysisMode.SEARCH_TIME works
|
||||
Map<String, NamedAnalyzer> analyzers = defaultAnalyzers();
|
||||
AnalysisMode mode = randomFrom(AnalysisMode.ALL, AnalysisMode.SEARCH_TIME);
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode(mode)));
|
||||
analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()));
|
||||
|
||||
IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField", () -> Lucene.STANDARD_ANALYZER);
|
||||
builder.parse("textField", parserContext, fieldNode);
|
||||
|
||||
// check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked
|
||||
mode = AnalysisMode.INDEX_TIME;
|
||||
analyzers = defaultAnalyzers();
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode(mode)));
|
||||
analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()));
|
||||
indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
fieldNode.clear();
|
||||
fieldNode.put(settingToTest, "my_analyzer");
|
||||
fieldNode.put("analyzer", "standard");
|
||||
if (settingToTest.equals("search_quote_analyzer")) {
|
||||
fieldNode.put("search_analyzer", "standard");
|
||||
}
|
||||
MapperException ex = expectThrows(MapperException.class, () -> {
|
||||
TextFieldMapper.Builder bad = new TextFieldMapper.Builder("field", () -> Lucene.STANDARD_ANALYZER);
|
||||
bad.parse("field", parserContext, fieldNode);
|
||||
});
|
||||
assertEquals("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run in search time mode.",
|
||||
ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() {
|
||||
|
||||
Map<String, Object> fieldNode = new HashMap<>();
|
||||
fieldNode.put("analyzer", "my_analyzer");
|
||||
Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class);
|
||||
|
||||
// check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked if there is no search analyzer
|
||||
AnalysisMode mode = AnalysisMode.INDEX_TIME;
|
||||
Map<String, NamedAnalyzer> analyzers = defaultAnalyzers();
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode(mode)));
|
||||
IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
MapperException ex = expectThrows(MapperException.class, () -> {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField", () -> Lucene.STANDARD_ANALYZER);
|
||||
builder.parse("field", parserContext, fieldNode);
|
||||
});
|
||||
assertThat(ex.getMessage(),
|
||||
containsString("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run"));
|
||||
|
||||
// check AnalysisMode.INDEX_TIME is okay if search analyzer is also set
|
||||
fieldNode.put("analyzer", "my_analyzer");
|
||||
fieldNode.put("search_analyzer", "standard");
|
||||
analyzers = defaultAnalyzers();
|
||||
mode = randomFrom(AnalysisMode.ALL, AnalysisMode.INDEX_TIME);
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode(mode)));
|
||||
analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()));
|
||||
|
||||
indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField", () -> Lucene.STANDARD_ANALYZER);
|
||||
builder.parse("field", parserContext, fieldNode);
|
||||
}
|
||||
|
||||
}
|
@ -74,20 +74,24 @@ import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.Builder> {
|
||||
public class TextFieldMapperTests extends MapperTestCase {
|
||||
|
||||
@Override
|
||||
protected void writeFieldValue(XContentBuilder builder) throws IOException {
|
||||
builder.value(1234);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertParseMaximalWarnings() {
|
||||
assertWarnings("Parameter [boost] on field [field] is deprecated and will be removed in 8.0");
|
||||
}
|
||||
|
||||
public final void testExistsQueryIndexDisabled() throws IOException {
|
||||
MapperService mapperService = createMapperService(fieldMapping(b -> {
|
||||
minimalMapping(b);
|
||||
@ -118,13 +122,6 @@ public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.B
|
||||
assertParseMinimalWarnings();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TextFieldMapper.Builder newBuilder() {
|
||||
return new TextFieldMapper.Builder("text")
|
||||
.indexAnalyzer(new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()))
|
||||
.searchAnalyzer(new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void registerParameters(ParameterChecker checker) throws IOException {
|
||||
checker.registerUpdateCheck(b -> b.field("fielddata", true), m -> {
|
||||
@ -170,8 +167,7 @@ public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.B
|
||||
checker.registerConflictCheck("analyzer", b -> b.field("analyzer", "keyword"));
|
||||
checker.registerConflictCheck("term_vector", b -> b.field("term_vector", "yes"));
|
||||
|
||||
// TODO position_increment_gap should not be updateable!
|
||||
//checker.registerConflictCheck("position_increment_gap", b -> b.field("position_increment_gap", 10));
|
||||
checker.registerConflictCheck("position_increment_gap", b -> b.field("position_increment_gap", 10));
|
||||
|
||||
// norms can be set from true to false, but not vice versa
|
||||
checker.registerConflictCheck("norms",
|
||||
@ -195,11 +191,8 @@ public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.B
|
||||
m -> assertFalse(m.fieldType().getTextSearchInfo().hasNorms())
|
||||
);
|
||||
|
||||
}
|
||||
checker.registerUpdateCheck(b -> b.field("boost", 2.0), m -> assertEquals(m.fieldType().boost(), 2.0, 0));
|
||||
|
||||
@Override
|
||||
protected Set<String> unsupportedProperties() {
|
||||
return org.elasticsearch.common.collect.Set.of("doc_values");
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -266,6 +259,22 @@ public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.B
|
||||
assertEquals(DocValuesType.NONE, fieldType.docValuesType());
|
||||
}
|
||||
|
||||
public void testBWCSerialization() throws IOException {
|
||||
MapperService mapperService = createMapperService(fieldMapping(b -> {
|
||||
b.field("type", "text");
|
||||
b.field("fielddata", true);
|
||||
b.startObject("fields");
|
||||
{
|
||||
b.startObject("subfield").field("type", "long").endObject();
|
||||
}
|
||||
b.endObject();
|
||||
}));
|
||||
|
||||
assertEquals(
|
||||
"{\"_doc\":{\"properties\":{\"field\":{\"type\":\"text\",\"fields\":{\"subfield\":{\"type\":\"long\"}},\"fielddata\":true}}}}",
|
||||
Strings.toString(mapperService.documentMapper()));
|
||||
}
|
||||
|
||||
public void testEnableStore() throws IOException {
|
||||
DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("store", true)));
|
||||
ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234")));
|
||||
@ -526,15 +535,15 @@ public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.B
|
||||
assertThat(fieldType.fielddataMinSegmentSize(), equalTo(1000));
|
||||
}
|
||||
|
||||
public void testNullConfigValuesFail() throws MapperParsingException, IOException {
|
||||
public void testNullConfigValuesFail() throws MapperParsingException {
|
||||
Exception e = expectThrows(
|
||||
MapperParsingException.class,
|
||||
() -> createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("analyzer", (String) null)))
|
||||
);
|
||||
assertThat(e.getMessage(), containsString("[analyzer] must not have a [null] value"));
|
||||
assertThat(e.getMessage(), containsString("[analyzer] on mapper [field] of type [text] must not have a [null] value"));
|
||||
}
|
||||
|
||||
public void testNotIndexedFieldPositionIncrement() throws IOException {
|
||||
public void testNotIndexedFieldPositionIncrement() {
|
||||
Exception e = expectThrows(
|
||||
MapperParsingException.class,
|
||||
() -> createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("index", false).field("position_increment_gap", 10)))
|
||||
@ -542,7 +551,7 @@ public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.B
|
||||
assertThat(e.getMessage(), containsString("Cannot set position_increment_gap on field [field] without positions enabled"));
|
||||
}
|
||||
|
||||
public void testAnalyzedFieldPositionIncrementWithoutPositions() throws IOException {
|
||||
public void testAnalyzedFieldPositionIncrementWithoutPositions() {
|
||||
for (String indexOptions : Arrays.asList("docs", "freqs")) {
|
||||
Exception e = expectThrows(
|
||||
MapperParsingException.class,
|
||||
@ -794,6 +803,13 @@ public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.B
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
DocumentMapper mapper = createDocumentMapper(
|
||||
fieldMapping(b -> b.field("type", "text").nullField("index_prefixes"))
|
||||
);
|
||||
assertNull(mapper.mappers().getMapper("field._index_prefix"));
|
||||
}
|
||||
|
||||
{
|
||||
MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
|
||||
b.field("type", "text").field("analyzer", "standard");
|
||||
@ -827,16 +843,6 @@ public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.B
|
||||
assertThat(e.getMessage(), containsString("max_chars [25] must be less than 20"));
|
||||
}
|
||||
|
||||
{
|
||||
MapperParsingException e = expectThrows(
|
||||
MapperParsingException.class,
|
||||
() -> createMapperService(
|
||||
fieldMapping(b -> b.field("type", "text").field("analyzer", "standard").nullField("index_prefixes"))
|
||||
)
|
||||
);
|
||||
assertThat(e.getMessage(), containsString("[index_prefixes] must not have a [null] value"));
|
||||
}
|
||||
|
||||
{
|
||||
MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> {
|
||||
b.field("type", "text").field("analyzer", "standard").field("index", false);
|
||||
@ -974,13 +980,13 @@ public class TextFieldMapperTests extends FieldMapperTestCase2<TextFieldMapper.B
|
||||
b -> b.field("type", "text").startObject("index_prefixes").field("min_chars", "3").endObject().field("index_phrases", true)
|
||||
);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, differentPrefix));
|
||||
assertThat(e.getMessage(), containsString("different [index_prefixes]"));
|
||||
assertThat(e.getMessage(), containsString("Cannot update parameter [index_prefixes]"));
|
||||
|
||||
XContentBuilder differentPhrases = fieldMapping(
|
||||
b -> b.field("type", "text").startObject("index_prefixes").endObject().field("index_phrases", false)
|
||||
);
|
||||
e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, differentPhrases));
|
||||
assertThat(e.getMessage(), containsString("different [index_phrases]"));
|
||||
assertThat(e.getMessage(), containsString("Cannot update parameter [index_phrases]"));
|
||||
|
||||
XContentBuilder newField = mapping(b -> {
|
||||
b.startObject("field").field("type", "text").startObject("index_prefixes").endObject().field("index_phrases", true).endObject();
|
||||
|
@ -38,6 +38,7 @@ import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.search.AutomatonQueries;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
@ -136,7 +137,7 @@ public class TextFieldTypeTests extends FieldTypeTestCase {
|
||||
|
||||
public void testIndexPrefixes() {
|
||||
TextFieldType ft = createFieldType();
|
||||
ft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType(ft, "field._index_prefix", 2, 10, true));
|
||||
ft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType(ft, "field._index_prefix", 2, 10));
|
||||
|
||||
Query q = ft.prefixQuery("goin", CONSTANT_SCORE_REWRITE, false, randomMockShardContext());
|
||||
assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field._index_prefix", "goin"))), q);
|
||||
@ -169,7 +170,7 @@ public class TextFieldTypeTests extends FieldTypeTestCase {
|
||||
Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT.id).build();
|
||||
Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath());
|
||||
|
||||
MappedFieldType mapper = new TextFieldMapper.Builder("field").build(context).fieldType();
|
||||
MappedFieldType mapper = new TextFieldMapper.Builder("field", () -> Lucene.STANDARD_ANALYZER).build(context).fieldType();
|
||||
|
||||
assertEquals(Collections.singletonList("value"), fetchSourceValue(mapper, "value"));
|
||||
assertEquals(Collections.singletonList("42"), fetchSourceValue(mapper, 42L));
|
||||
|
@ -19,25 +19,15 @@
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.AnalysisMode;
|
||||
import org.elasticsearch.index.analysis.AnalyzerScope;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
@ -56,39 +46,6 @@ import static org.mockito.Mockito.when;
|
||||
|
||||
public class TypeParsersTests extends ESTestCase {
|
||||
|
||||
private static final IndexMetadata EMPTY_INDEX_METADATA = IndexMetadata.builder("")
|
||||
.settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
private static final IndexSettings indexSettings = new IndexSettings(EMPTY_INDEX_METADATA, Settings.EMPTY);
|
||||
|
||||
public void testParseTextFieldCheckAnalyzerAnalysisMode() {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField");
|
||||
Map<String, Object> fieldNode = new HashMap<String, Object>();
|
||||
fieldNode.put("analyzer", "my_analyzer");
|
||||
Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class);
|
||||
|
||||
// check AnalysisMode.ALL works
|
||||
Map<String, NamedAnalyzer> analyzers = defaultAnalyzers();
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", AnalysisMode.ALL)));
|
||||
|
||||
IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext);
|
||||
|
||||
// check that "analyzer" set to something that only supports AnalysisMode.SEARCH_TIME or AnalysisMode.INDEX_TIME is blocked
|
||||
AnalysisMode mode = randomFrom(AnalysisMode.SEARCH_TIME, AnalysisMode.INDEX_TIME);
|
||||
analyzers = defaultAnalyzers();
|
||||
analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX,
|
||||
createAnalyzerWithMode("my_analyzer", mode)));
|
||||
indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
MapperException ex = expectThrows(MapperException.class,
|
||||
() -> TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext));
|
||||
assertEquals("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run in all mode.",
|
||||
ex.getMessage());
|
||||
}
|
||||
|
||||
private static Map<String, NamedAnalyzer> defaultAnalyzers() {
|
||||
Map<String, NamedAnalyzer> analyzers = new HashMap<>();
|
||||
analyzers.put(DEFAULT_ANALYZER_NAME, new NamedAnalyzer("default", AnalyzerScope.INDEX, null));
|
||||
@ -97,76 +54,8 @@ public class TypeParsersTests extends ESTestCase {
|
||||
return analyzers;
|
||||
}
|
||||
|
||||
public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField");
|
||||
for (String settingToTest : new String[] { "search_analyzer", "search_quote_analyzer" }) {
|
||||
Map<String, Object> fieldNode = new HashMap<String, Object>();
|
||||
fieldNode.put(settingToTest, "my_analyzer");
|
||||
fieldNode.put("analyzer", "standard");
|
||||
if (settingToTest.equals("search_quote_analyzer")) {
|
||||
fieldNode.put("search_analyzer", "standard");
|
||||
}
|
||||
Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class);
|
||||
|
||||
// check AnalysisMode.ALL and AnalysisMode.SEARCH_TIME works
|
||||
Map<String, NamedAnalyzer> analyzers = defaultAnalyzers();
|
||||
AnalysisMode mode = randomFrom(AnalysisMode.ALL, AnalysisMode.SEARCH_TIME);
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode)));
|
||||
analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()));
|
||||
|
||||
IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext);
|
||||
|
||||
// check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked
|
||||
mode = AnalysisMode.INDEX_TIME;
|
||||
analyzers = defaultAnalyzers();
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode)));
|
||||
analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()));
|
||||
indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
MapperException ex = expectThrows(MapperException.class,
|
||||
() -> TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext));
|
||||
assertEquals("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run in search time mode.",
|
||||
ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField");
|
||||
Map<String, Object> fieldNode = new HashMap<String, Object>();
|
||||
fieldNode.put("analyzer", "my_analyzer");
|
||||
Mapper.TypeParser.ParserContext parserContext = mock(Mapper.TypeParser.ParserContext.class);
|
||||
|
||||
// check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked if there is no search analyzer
|
||||
AnalysisMode mode = AnalysisMode.INDEX_TIME;
|
||||
Map<String, NamedAnalyzer> analyzers = defaultAnalyzers();
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode)));
|
||||
IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
MapperException ex = expectThrows(MapperException.class,
|
||||
() -> TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext));
|
||||
assertEquals("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run in all mode.",
|
||||
ex.getMessage());
|
||||
|
||||
// check AnalysisMode.INDEX_TIME is okay if search analyzer is also set
|
||||
fieldNode.put("search_analyzer", "standard");
|
||||
analyzers = defaultAnalyzers();
|
||||
mode = randomFrom(AnalysisMode.ALL, AnalysisMode.INDEX_TIME);
|
||||
analyzers.put("my_analyzer",
|
||||
new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode("my_analyzer", mode)));
|
||||
analyzers.put("standard", new NamedAnalyzer("standard", AnalyzerScope.INDEX, new StandardAnalyzer()));
|
||||
|
||||
indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap());
|
||||
when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
TypeParsers.parseTextField(builder, "name", new HashMap<>(fieldNode), parserContext);
|
||||
}
|
||||
|
||||
public void testMultiFieldWithinMultiField() throws IOException {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField");
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder("textField", () -> Lucene.STANDARD_ANALYZER);
|
||||
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject()
|
||||
.field("type", "keyword")
|
||||
@ -192,7 +81,8 @@ public class TypeParsersTests extends ESTestCase {
|
||||
when(mapperService.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
Mapper.TypeParser.ParserContext olderContext = new Mapper.TypeParser.ParserContext(
|
||||
null, mapperService, type -> typeParser, Version.CURRENT, null, null, null);
|
||||
TypeParsers.parseField(builder, "some-field", fieldNode, olderContext);
|
||||
|
||||
builder.parse("some-field", olderContext, fieldNode);
|
||||
assertWarnings("At least one multi-field, [sub-field], " +
|
||||
"was encountered that itself contains a multi-field. Defining multi-fields within a multi-field is deprecated " +
|
||||
"and will no longer be supported in 8.0. To resolve the issue, all instances of [fields] " +
|
||||
@ -200,22 +90,6 @@ public class TypeParsersTests extends ESTestCase {
|
||||
"[fields] blocks into a single level, or switching to [copy_to] if appropriate.");
|
||||
}
|
||||
|
||||
private Analyzer createAnalyzerWithMode(String name, AnalysisMode mode) {
|
||||
TokenFilterFactory tokenFilter = new AbstractTokenFilterFactory(indexSettings, name, Settings.EMPTY) {
|
||||
@Override
|
||||
public AnalysisMode getAnalysisMode() {
|
||||
return mode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
return new CustomAnalyzer(null, new CharFilterFactory[0],
|
||||
new TokenFilterFactory[] { tokenFilter });
|
||||
}
|
||||
|
||||
public void testParseMeta() {
|
||||
{
|
||||
MapperParsingException e = expectThrows(MapperParsingException.class,
|
||||
|
@ -43,6 +43,9 @@ import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalyzerScope;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.LeafFieldData;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
@ -290,9 +293,14 @@ public class QueryShardContextTests extends ESTestCase {
|
||||
);
|
||||
IndexMetadata indexMetadata = indexMetadataBuilder.build();
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY);
|
||||
IndexAnalyzers indexAnalyzers = new IndexAnalyzers(
|
||||
Collections.singletonMap("default", new NamedAnalyzer("default", AnalyzerScope.INDEX, null)),
|
||||
Collections.emptyMap(), Collections.emptyMap()
|
||||
);
|
||||
MapperService mapperService = mock(MapperService.class);
|
||||
when(mapperService.getIndexSettings()).thenReturn(indexSettings);
|
||||
when(mapperService.index()).thenReturn(indexMetadata.getIndex());
|
||||
when(mapperService.getIndexAnalyzers()).thenReturn(indexAnalyzers);
|
||||
if (runtimeDocValues != null) {
|
||||
when(mapperService.fieldType(any())).thenAnswer(fieldTypeInv -> {
|
||||
String fieldName = (String)fieldTypeInv.getArguments()[0];
|
||||
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
@ -283,7 +284,7 @@ public class HighlightBuilderTests extends ESTestCase {
|
||||
null, null, System::currentTimeMillis, null, null, () -> true, null) {
|
||||
@Override
|
||||
public MappedFieldType fieldMapper(String name) {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name);
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, () -> Lucene.STANDARD_ANALYZER);
|
||||
return builder.build(new Mapper.BuilderContext(idxSettings.getSettings(), new ContentPath(1))).fieldType();
|
||||
}
|
||||
};
|
||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.xcontent.NamedObjectNotFoundException;
|
||||
@ -147,7 +148,7 @@ public class QueryRescorerBuilderTests extends ESTestCase {
|
||||
xContentRegistry(), namedWriteableRegistry, null, null, () -> nowInMillis, null, null, () -> true, null) {
|
||||
@Override
|
||||
public MappedFieldType fieldMapper(String name) {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name);
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, () -> Lucene.STANDARD_ANALYZER);
|
||||
return builder.build(new Mapper.BuilderContext(idxSettings.getSettings(), new ContentPath(1))).fieldType();
|
||||
}
|
||||
};
|
||||
@ -191,7 +192,7 @@ public class QueryRescorerBuilderTests extends ESTestCase {
|
||||
xContentRegistry(), namedWriteableRegistry, null, null, () -> nowInMillis, null, null, () -> true, null) {
|
||||
@Override
|
||||
public MappedFieldType fieldMapper(String name) {
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name);
|
||||
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, () -> Lucene.STANDARD_ANALYZER);
|
||||
return builder.build(new Mapper.BuilderContext(idxSettings.getSettings(), new ContentPath(1))).fieldType();
|
||||
}
|
||||
};
|
||||
|
@ -41,7 +41,7 @@ public class ReloadSynonymAnalyzerTests extends ESSingleNodeTestCase {
|
||||
return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class);
|
||||
}
|
||||
|
||||
public void testSynonymsUpdateable() throws FileNotFoundException, IOException {
|
||||
public void testSynonymsUpdateable() throws IOException {
|
||||
String synonymsFileName = "synonyms.txt";
|
||||
Path synonymsFile = setupSynonymsFile(synonymsFileName, "foo, baz");
|
||||
|
||||
@ -192,7 +192,7 @@ public class ReloadSynonymAnalyzerTests extends ESSingleNodeTestCase {
|
||||
|
||||
final String indexName = "test";
|
||||
final String analyzerName = "my_synonym_analyzer";
|
||||
|
||||
|
||||
MapperException ex = expectThrows(MapperException.class, () -> client().admin().indices().prepareCreate(indexName)
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", 5)
|
||||
@ -205,7 +205,7 @@ public class ReloadSynonymAnalyzerTests extends ESSingleNodeTestCase {
|
||||
.addMapping("_doc", "field", "type=text,analyzer=" + analyzerName).get());
|
||||
|
||||
assertEquals("Failed to parse mapping [_doc]: analyzer [my_synonym_analyzer] "
|
||||
+ "contains filters [synonym_filter] that are not allowed to run in all mode.", ex.getMessage());
|
||||
+ "contains filters [synonym_filter] that are not allowed to run in index time mode.", ex.getMessage());
|
||||
|
||||
// same for synonym filters in multiplexer chain
|
||||
ex = expectThrows(MapperException.class,
|
||||
@ -222,7 +222,7 @@ public class ReloadSynonymAnalyzerTests extends ESSingleNodeTestCase {
|
||||
.addMapping("_doc", "field", "type=text,analyzer=" + analyzerName).get());
|
||||
|
||||
assertEquals("Failed to parse mapping [_doc]: analyzer [my_synonym_analyzer] "
|
||||
+ "contains filters [my_multiplexer] that are not allowed to run in all mode.", ex.getMessage());
|
||||
+ "contains filters [my_multiplexer] that are not allowed to run in index time mode.", ex.getMessage());
|
||||
}
|
||||
|
||||
private Path setupSynonymsFile(String synonymsFileName, String content) throws IOException {
|
||||
|
@ -80,7 +80,7 @@ public class ConstantKeywordFieldMapper extends ParametrizedFieldMapper {
|
||||
|
||||
public Builder(String name) {
|
||||
super(name);
|
||||
value.setShouldSerialize(() -> value.getValue() != null);
|
||||
value.setSerializerCheck((id, ic, v) -> v != null);
|
||||
value.setMergeValidator((previous, current) -> previous == null || Objects.equals(previous, current));
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user