[DOCS] Correct custom analyzer callouts (#46030)

This commit is contained in:
James Rodewig 2019-08-29 10:07:52 -04:00
parent 322d95f2f6
commit 3e62cf9d74
1 changed files with 13 additions and 10 deletions

View File

@ -162,26 +162,26 @@ PUT my_index
"settings": {
"analysis": {
"analyzer": {
"my_custom_analyzer": {
"my_custom_analyzer": { <1>
"type": "custom",
"char_filter": [
"emoticons" <1>
"emoticons"
],
"tokenizer": "punctuation", <1>
"tokenizer": "punctuation",
"filter": [
"lowercase",
"english_stop" <1>
"english_stop"
]
}
},
"tokenizer": {
"punctuation": { <1>
"punctuation": { <2>
"type": "pattern",
"pattern": "[ .,!?]"
}
},
"char_filter": {
"emoticons": { <1>
"emoticons": { <3>
"type": "mapping",
"mappings": [
":) => _happy_",
@ -190,7 +190,7 @@ PUT my_index
}
},
"filter": {
"english_stop": { <1>
"english_stop": { <4>
"type": "stop",
"stopwords": "_english_"
}
@ -207,9 +207,12 @@ POST my_index/_analyze
--------------------------------------------------
// CONSOLE
<1> The `emoticons` character filter, `punctuation` tokenizer and
`english_stop` token filter are custom implementations which are defined
in the same index settings.
<1> Assigns the index a default custom analyzer, `my_custom_analyzer`. This
analyzer uses a custom tokenizer, character filter, and token filter that
are defined later in the request.
<2> Defines the custom `punctuation` tokenizer.
<3> Defines the custom `emoticons` character filter.
<4> Defines the custom `english_stop` token filter.
/////////////////////