Remove `include_type_name` in asciidoc where possible (#37568)
The "include_type_name" parameter was temporarily introduced in #37285 to facilitate moving the default parameter setting to "false" in many places in the documentation code snippets. Most of the places can simply be reverted without causing errors. In this change I looked for asciidoc files that contained the "include_type_name=true" addition when creating new indices but didn't look likey they made use of the "_doc" type for mappings. This is mostly the case e.g. in the analysis docs where index creating often only contains settings. I manually corrected the use of types in some places where the docs still used an explicit type name and not the dummy "_doc" type.
This commit is contained in:
parent
2f0e0b2426
commit
25aac4f77f
|
@ -43,22 +43,20 @@ the request URL.
|
|||
+
|
||||
[source,js]
|
||||
----
|
||||
PUT /seats?include_type_name=true
|
||||
PUT /seats
|
||||
{
|
||||
"mappings": {
|
||||
"seat": {
|
||||
"properties": {
|
||||
"theatre": { "type": "keyword" },
|
||||
"play": { "type": "text" },
|
||||
"actors": { "type": "text" },
|
||||
"row": { "type": "integer" },
|
||||
"number": { "type": "integer" },
|
||||
"cost": { "type": "double" },
|
||||
"sold": { "type": "boolean" },
|
||||
"datetime": { "type": "date" },
|
||||
"date": { "type": "keyword" },
|
||||
"time": { "type": "keyword" }
|
||||
}
|
||||
"properties": {
|
||||
"theatre": { "type": "keyword" },
|
||||
"play": { "type": "text" },
|
||||
"actors": { "type": "text" },
|
||||
"row": { "type": "integer" },
|
||||
"number": { "type": "integer" },
|
||||
"cost": { "type": "double" },
|
||||
"sold": { "type": "boolean" },
|
||||
"datetime": { "type": "date" },
|
||||
"date": { "type": "keyword" },
|
||||
"time": { "type": "keyword" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -124,7 +124,7 @@ Then create an analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
PUT kuromoji_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -186,7 +186,7 @@ BaseFormAttribute. This acts as a lemmatizer for verbs and adjectives. Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
PUT kuromoji_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -243,7 +243,7 @@ For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
PUT kuromoji_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -317,7 +317,7 @@ katakana reading form:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
PUT kuromoji_sample
|
||||
{
|
||||
"settings": {
|
||||
"index":{
|
||||
|
@ -381,7 +381,7 @@ This token filter accepts the following setting:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
PUT kuromoji_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -434,7 +434,7 @@ predefined list, then use the
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
PUT kuromoji_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -493,7 +493,7 @@ to regular Arabic decimal numbers in half-width characters. For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
PUT kuromoji_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
|
|
@ -90,7 +90,7 @@ Then create an analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT nori_sample?include_type_name=true
|
||||
PUT nori_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -164,7 +164,7 @@ the `user_dictionary_rules` option:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT nori_sample?include_type_name=true
|
||||
PUT nori_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -332,7 +332,7 @@ For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT nori_sample?include_type_name=true
|
||||
PUT nori_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -398,7 +398,7 @@ The `nori_readingform` token filter rewrites tokens written in Hanja to their Ha
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT nori_sample?include_type_name=true
|
||||
PUT nori_sample
|
||||
{
|
||||
"settings": {
|
||||
"index":{
|
||||
|
|
|
@ -29,7 +29,7 @@ The `phonetic` token filter takes the following settings:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT phonetic_sample?include_type_name=true
|
||||
PUT phonetic_sample
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
|
|
@ -46,7 +46,7 @@ It can also be set on a per-index basis at index creation time:
|
|||
|
||||
[source,js]
|
||||
----
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.store.type": "smb_mmap_fs"
|
||||
|
|
|
@ -8,10 +8,9 @@ price for the product. The mapping could look like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /index?include_type_name=true
|
||||
PUT /index
|
||||
{
|
||||
"mappings": {
|
||||
"product" : {
|
||||
"mappings": {
|
||||
"properties" : {
|
||||
"resellers" : { <1>
|
||||
"type" : "nested",
|
||||
|
@ -22,7 +21,6 @@ PUT /index?include_type_name=true
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
@ -52,7 +50,7 @@ GET /_search
|
|||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
||||
// TEST[s/^/PUT index\/product\/0\?refresh\n{"name":"led", "resellers": [{"name": "foo", "price": 350.00}, {"name": "bar", "price": 500.00}]}\n/]
|
||||
// TEST[s/^/PUT index\/_doc\/0\?refresh\n{"name":"led", "resellers": [{"name": "foo", "price": 350.00}, {"name": "bar", "price": 500.00}]}\n/]
|
||||
|
||||
As you can see above, the nested aggregation requires the `path` of the nested documents within the top level documents.
|
||||
Then one can define any type of aggregation over these nested documents.
|
||||
|
|
|
@ -17,21 +17,19 @@ the issue documents as nested documents. The mapping could look like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /issues?include_type_name=true
|
||||
PUT /issues
|
||||
{
|
||||
"mappings": {
|
||||
"issue" : {
|
||||
"properties" : {
|
||||
"tags" : { "type" : "keyword" },
|
||||
"comments" : { <1>
|
||||
"type" : "nested",
|
||||
"properties" : {
|
||||
"username" : { "type" : "keyword" },
|
||||
"comment" : { "type" : "text" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"properties" : {
|
||||
"tags" : { "type" : "keyword" },
|
||||
"comments" : { <1>
|
||||
"type" : "nested",
|
||||
"properties" : {
|
||||
"username" : { "type" : "keyword" },
|
||||
"comment" : { "type" : "text" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
@ -45,7 +43,7 @@ tags of the issues the user has commented on:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /issues/issue/0?refresh
|
||||
POST /issues/_doc/0?refresh
|
||||
{"tags": ["tag_1"], "comments": [{"username": "username_1"}]}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
|
|
@ -19,23 +19,21 @@ that is significant and probably very relevant to their search. 5/10,000,000 vs
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /reports?include_type_name=true
|
||||
PUT /reports
|
||||
{
|
||||
"mappings": {
|
||||
"report": {
|
||||
"properties": {
|
||||
"force": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"crime_type": {
|
||||
"type": "keyword"
|
||||
}
|
||||
"properties": {
|
||||
"force": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"crime_type": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
POST /reports/report/_bulk?refresh
|
||||
POST /reports/_bulk?refresh
|
||||
{"index":{"_id":0}}
|
||||
{"force": "British Transport Police", "crime_type": "Bicycle theft"}
|
||||
{"index":{"_id":1}}
|
||||
|
|
|
@ -7,23 +7,21 @@ A multi-bucket value source based aggregation where buckets are dynamically buil
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /products?include_type_name=true
|
||||
PUT /products
|
||||
{
|
||||
"mappings": {
|
||||
"product": {
|
||||
"properties": {
|
||||
"genre": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"product": {
|
||||
"type": "keyword"
|
||||
}
|
||||
"properties": {
|
||||
"genre": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"product": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
POST /products/product/_bulk?refresh
|
||||
POST /products/_bulk?refresh
|
||||
{"index":{"_id":0}}
|
||||
{"genre": "rock", "product": "Product A"}
|
||||
{"index":{"_id":1}}
|
||||
|
|
|
@ -53,7 +53,7 @@ Token Filters::
|
|||
|
||||
[source,js]
|
||||
--------------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -157,7 +157,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -86,7 +86,7 @@ pre-defined list of English stop words:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -158,7 +158,7 @@ customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /fingerprint_example?include_type_name=true
|
||||
PUT /fingerprint_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -68,7 +68,7 @@ for further customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /keyword_example?include_type_name=true
|
||||
PUT /keyword_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -78,7 +78,7 @@ The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /arabic_example?include_type_name=true
|
||||
PUT /arabic_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -128,7 +128,7 @@ The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /armenian_example?include_type_name=true
|
||||
PUT /armenian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -176,7 +176,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /basque_example?include_type_name=true
|
||||
PUT /basque_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -224,7 +224,7 @@ The `bengali` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /bengali_example?include_type_name=true
|
||||
PUT /bengali_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -275,7 +275,7 @@ The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follow
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /brazilian_example?include_type_name=true
|
||||
PUT /brazilian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -323,7 +323,7 @@ The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follow
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /bulgarian_example?include_type_name=true
|
||||
PUT /bulgarian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -371,7 +371,7 @@ The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /catalan_example?include_type_name=true
|
||||
PUT /catalan_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -428,7 +428,7 @@ The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /cjk_example?include_type_name=true
|
||||
PUT /cjk_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -474,7 +474,7 @@ The `czech` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /czech_example?include_type_name=true
|
||||
PUT /czech_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -522,7 +522,7 @@ The `danish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /danish_example?include_type_name=true
|
||||
PUT /danish_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -570,7 +570,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /dutch_example?include_type_name=true
|
||||
PUT /dutch_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -628,7 +628,7 @@ The `english` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /english_example?include_type_name=true
|
||||
PUT /english_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -681,7 +681,7 @@ The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /finnish_example?include_type_name=true
|
||||
PUT /finnish_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -729,7 +729,7 @@ The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /french_example?include_type_name=true
|
||||
PUT /french_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -787,7 +787,7 @@ The `galician` analyzer could be reimplemented as a `custom` analyzer as follows
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /galician_example?include_type_name=true
|
||||
PUT /galician_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -835,7 +835,7 @@ The `german` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /german_example?include_type_name=true
|
||||
PUT /german_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -884,7 +884,7 @@ The `greek` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /greek_example?include_type_name=true
|
||||
PUT /greek_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -936,7 +936,7 @@ The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /hindi_example?include_type_name=true
|
||||
PUT /hindi_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -987,7 +987,7 @@ The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follow
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /hungarian_example?include_type_name=true
|
||||
PUT /hungarian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1036,7 +1036,7 @@ The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follo
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /indonesian_example?include_type_name=true
|
||||
PUT /indonesian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1084,7 +1084,7 @@ The `irish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /irish_example?include_type_name=true
|
||||
PUT /irish_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1148,7 +1148,7 @@ The `italian` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /italian_example?include_type_name=true
|
||||
PUT /italian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1207,7 +1207,7 @@ The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /latvian_example?include_type_name=true
|
||||
PUT /latvian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1255,7 +1255,7 @@ The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follo
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /lithuanian_example?include_type_name=true
|
||||
PUT /lithuanian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1303,7 +1303,7 @@ The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follow
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /norwegian_example?include_type_name=true
|
||||
PUT /norwegian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1351,7 +1351,7 @@ The `persian` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /persian_example?include_type_name=true
|
||||
PUT /persian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1397,7 +1397,7 @@ The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follo
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /portuguese_example?include_type_name=true
|
||||
PUT /portuguese_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1445,7 +1445,7 @@ The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /romanian_example?include_type_name=true
|
||||
PUT /romanian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1494,7 +1494,7 @@ The `russian` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /russian_example?include_type_name=true
|
||||
PUT /russian_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1542,7 +1542,7 @@ The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /sorani_example?include_type_name=true
|
||||
PUT /sorani_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1592,7 +1592,7 @@ The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /spanish_example?include_type_name=true
|
||||
PUT /spanish_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1640,7 +1640,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /swedish_example?include_type_name=true
|
||||
PUT /swedish_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1688,7 +1688,7 @@ The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /turkish_example?include_type_name=true
|
||||
PUT /turkish_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1741,7 +1741,7 @@ The `thai` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /thai_example?include_type_name=true
|
||||
PUT /thai_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -177,7 +177,7 @@ on non-word characters or on underscores (`\W|_`), and to lower-case the result:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -266,7 +266,7 @@ The following more complicated example splits CamelCase text into tokens:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -386,7 +386,7 @@ customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /pattern_example?include_type_name=true
|
||||
PUT /pattern_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -135,7 +135,7 @@ a starting point for further customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /simple_example?include_type_name=true
|
||||
PUT /simple_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -151,7 +151,7 @@ pre-defined list of English stop words:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -283,7 +283,7 @@ it, usually by adding token filters. This would recreate the built-in
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /standard_example?include_type_name=true
|
||||
PUT /standard_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -130,7 +130,7 @@ words as stop words:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -248,7 +248,7 @@ customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /stop_example?include_type_name=true
|
||||
PUT /stop_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -128,7 +128,7 @@ and you can use it as a starting point for further customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /whitespace_example?include_type_name=true
|
||||
PUT /whitespace_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -73,7 +73,7 @@ tags in place:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -33,7 +33,7 @@ numerals with their Latin equivalents:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -107,7 +107,7 @@ example replaces the `:)` and `:(` emoticons with a text equivalent:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -8,7 +8,7 @@ equivalents, if one exists. Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /asciifold_example?include_type_name=true
|
||||
PUT /asciifold_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -30,7 +30,7 @@ example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /asciifold_example?include_type_name=true
|
||||
PUT /asciifold_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -16,7 +16,7 @@ Bigrams are generated for characters in `han`, `hiragana`, `katakana` and
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /cjk_bigram_example?include_type_name=true
|
||||
PUT /cjk_bigram_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -41,7 +41,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /common_grams_example?include_type_name=true
|
||||
PUT /common_grams_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -168,4 +168,4 @@ And the response will be:
|
|||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
// TESTRESPONSE
|
||||
|
|
|
@ -84,7 +84,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /compound_word_example?include_type_name=true
|
||||
PUT /compound_word_example
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
|
|
@ -20,7 +20,7 @@ You can set it up like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /condition_example?include_type_name=true
|
||||
PUT /condition_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -87,4 +87,4 @@ And it'd respond:
|
|||
// TESTRESPONSE
|
||||
<1> The term `What` has been lowercased, because it is only 4 characters long
|
||||
<2> The term `Flapdoodle` has been left in its original case, because it doesn't pass
|
||||
the predicate
|
||||
the predicate
|
||||
|
|
|
@ -9,7 +9,7 @@ example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /elision_example?include_type_name=true
|
||||
PUT /elision_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -42,7 +42,7 @@ settings:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /hunspell_example?include_type_name=true
|
||||
PUT /hunspell_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
|
|
@ -19,7 +19,7 @@ You can set it up like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keep_types_example?include_type_name=true
|
||||
PUT /keep_types_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -80,7 +80,7 @@ If the `mode` parameter is set to `exclude` like in the following example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keep_types_exclude_example?include_type_name=true
|
||||
PUT /keep_types_exclude_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -139,4 +139,4 @@ The response will be:
|
|||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
// TESTRESPONSE
|
||||
|
|
|
@ -20,7 +20,7 @@ keep_words_case:: a boolean indicating whether to lower case the words (defaults
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keep_words_example?include_type_name=true
|
||||
PUT /keep_words_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -23,7 +23,7 @@ You can configure it like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keyword_marker_example?include_type_name=true
|
||||
PUT /keyword_marker_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -14,7 +14,7 @@ preserve both the stemmed and unstemmed version of tokens:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keyword_repeat_example?include_type_name=true
|
||||
PUT /keyword_repeat_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -18,7 +18,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /limit_example?include_type_name=true
|
||||
PUT /limit_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -10,7 +10,7 @@ custom analyzer
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /lowercase_example?include_type_name=true
|
||||
PUT /lowercase_example
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -31,7 +31,7 @@ You can set it up like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /multiplexer_example?include_type_name=true
|
||||
PUT /multiplexer_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -121,4 +121,4 @@ that produce multiple tokens at the same position. This means that any filters
|
|||
within the multiplexer will be ignored for the purpose of synonyms. If you want to
|
||||
use filters contained within the multiplexer for parsing synonyms (for example, to
|
||||
apply stemming to the synonym lists), then you should append the synonym filter
|
||||
to the relevant multiplexer filter list.
|
||||
to the relevant multiplexer filter list.
|
||||
|
|
|
@ -46,7 +46,7 @@ This is particularly useful for indexing text like camel-case code, eg
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test?include_type_name=true
|
||||
PUT test
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -87,7 +87,7 @@ Another example is analyzing email addresses:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test?include_type_name=true
|
||||
PUT test
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -17,7 +17,7 @@ You can set it up like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /condition_example?include_type_name=true
|
||||
PUT /condition_example
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -76,4 +76,4 @@ And it'd respond:
|
|||
|
||||
<1> The token 'What' has been removed from the tokenstream because it does not
|
||||
match the predicate.
|
||||
<2> The position and offset values are unaffected by the removal of earlier tokens
|
||||
<2> The position and offset values are unaffected by the removal of earlier tokens
|
||||
|
|
|
@ -12,7 +12,7 @@ For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index?include_type_name=true
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
|
|
@ -20,7 +20,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index?include_type_name=true
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
@ -53,7 +53,7 @@ You can also define the overrides rules inline:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index?include_type_name=true
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
|
|
@ -6,7 +6,7 @@ filters through a single unified interface. For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index?include_type_name=true
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
|
|
@ -33,7 +33,7 @@ The `stopwords` parameter accepts either an array of stopwords:
|
|||
|
||||
[source,js]
|
||||
------------------------------------
|
||||
PUT /my_index?include_type_name=true
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -53,7 +53,7 @@ or a predefined language-specific list:
|
|||
|
||||
[source,js]
|
||||
------------------------------------
|
||||
PUT /my_index?include_type_name=true
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -23,7 +23,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index?include_type_name=true
|
||||
PUT /test_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -59,7 +59,7 @@ to note that only those synonym rules which cannot get parsed are ignored. For i
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index?include_type_name=true
|
||||
PUT /test_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -118,7 +118,7 @@ configuration file (note use of `synonyms` instead of `synonyms_path`):
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index?include_type_name=true
|
||||
PUT /test_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -150,7 +150,7 @@ declared using `format`:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index?include_type_name=true
|
||||
PUT /test_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
|
|
@ -7,7 +7,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index?include_type_name=true
|
||||
PUT /test_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -46,7 +46,7 @@ to note that only those synonym rules which cannot get parsed are ignored. For i
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index?include_type_name=true
|
||||
PUT /test_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -106,7 +106,7 @@ configuration file (note use of `synonyms` instead of `synonyms_path`):
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index?include_type_name=true
|
||||
PUT /test_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -138,7 +138,7 @@ declared using `format`:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index?include_type_name=true
|
||||
PUT /test_index
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -175,4 +175,4 @@ positions, some token filters may cause issues here. Token filters that produce
|
|||
multiple versions of a token may choose which version of the token to emit when
|
||||
parsing synonyms, e.g. `asciifolding` will only produce the folded version of the
|
||||
token. Others, e.g. `multiplexer`, `word_delimiter_graph` or `ngram` will throw an
|
||||
error.
|
||||
error.
|
||||
|
|
|
@ -145,7 +145,7 @@ In this example, we configure the `classic` tokenizer to have a
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -209,7 +209,7 @@ digits as tokens, and to produce tri-grams (grams of length `3`):
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -93,7 +93,7 @@ characters, and to replace them with `/`. The first two tokens are skipped:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -125,7 +125,7 @@ tokens when it encounters commas:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -215,7 +215,7 @@ escaped, so the pattern ends up looking like:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -36,7 +36,7 @@ three-digit numbers
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -37,7 +37,7 @@ text on underscores.
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -136,7 +136,7 @@ In this example, we configure the `standard` tokenizer to have a
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -92,7 +92,7 @@ In this example, we configure the `uax_url_email` tokenizer to have a
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -8,7 +8,7 @@ including filter and routing infos.
|
|||
Hidden setup for example:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test1?include_type_name=true
|
||||
PUT test1
|
||||
{
|
||||
"aliases": {
|
||||
"alias1": {},
|
||||
|
|
|
@ -174,7 +174,7 @@ In the following example, we will create a leader index in the remote cluster:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /server-metrics?include_type_name=true
|
||||
PUT /server-metrics
|
||||
{
|
||||
"settings" : {
|
||||
"index" : {
|
||||
|
@ -188,29 +188,27 @@ PUT /server-metrics?include_type_name=true
|
|||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"metric" : {
|
||||
"properties" : {
|
||||
"@timestamp" : {
|
||||
"type" : "date"
|
||||
},
|
||||
"accept" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"deny" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"host" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"response" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"service" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"total" : {
|
||||
"type" : "long"
|
||||
}
|
||||
"properties" : {
|
||||
"@timestamp" : {
|
||||
"type" : "date"
|
||||
},
|
||||
"accept" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"deny" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"host" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"response" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"service" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"total" : {
|
||||
"type" : "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ PUT _ilm/policy/my_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy",
|
||||
|
|
|
@ -72,7 +72,7 @@ PUT _ilm/policy/my_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy"
|
||||
|
|
|
@ -62,7 +62,7 @@ PUT _ilm/policy/my_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy"
|
||||
|
|
|
@ -41,7 +41,7 @@ telling it to use the policy they have created:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /myindex?include_type_name=true
|
||||
PUT /myindex
|
||||
{
|
||||
"settings": {
|
||||
"index.number_of_shards": 2,
|
||||
|
|
|
@ -107,7 +107,7 @@ To begin, we will want to bootstrap our first index to write to.
|
|||
|
||||
[source,js]
|
||||
-----------------------
|
||||
PUT datastream-000001?include_type_name=true
|
||||
PUT datastream-000001
|
||||
{
|
||||
"aliases": {
|
||||
"datastream": {
|
||||
|
|
|
@ -356,7 +356,7 @@ index "my_index" must be the write index for the alias. For more information, re
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy",
|
||||
|
|
|
@ -73,7 +73,7 @@ initial index which will be managed by our policy:
|
|||
|
||||
[source,js]
|
||||
-----------------------
|
||||
PUT test-000001?include_type_name=true
|
||||
PUT test-000001
|
||||
{
|
||||
"aliases": {
|
||||
"test-alias":{
|
||||
|
@ -96,7 +96,7 @@ request so {ilm} immediately starts managing the index:
|
|||
|
||||
[source,js]
|
||||
-----------------------
|
||||
PUT test-index?include_type_name=true
|
||||
PUT test-index
|
||||
{
|
||||
"settings": {
|
||||
"number_of_shards": 1,
|
||||
|
|
|
@ -39,7 +39,7 @@ PUT _ilm/policy/my_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy"
|
||||
|
|
|
@ -168,7 +168,7 @@ PUT _ilm/policy/my_executing_policy
|
|||
////
|
||||
[source,js]
|
||||
------------------------
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_executing_policy"
|
||||
|
@ -486,7 +486,7 @@ PUT _ilm/policy/my_other_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index?include_type_name=true
|
||||
PUT my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy"
|
||||
|
|
|
@ -19,14 +19,14 @@ PUT index_1
|
|||
|
||||
PUT index_2
|
||||
|
||||
PUT index_3?include_type_name=true
|
||||
PUT index_3
|
||||
{
|
||||
"settings": {
|
||||
"index.priority": 10
|
||||
}
|
||||
}
|
||||
|
||||
PUT index_4?include_type_name=true
|
||||
PUT index_4
|
||||
{
|
||||
"settings": {
|
||||
"index.priority": 5
|
||||
|
|
|
@ -24,7 +24,7 @@ creation time:
|
|||
|
||||
[source,js]
|
||||
---------------------------------
|
||||
PUT /my_index?include_type_name=true
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.store.type": "niofs"
|
||||
|
@ -114,7 +114,7 @@ or in the index settings at index creation time:
|
|||
|
||||
[source,js]
|
||||
---------------------------------
|
||||
PUT /my_index?include_type_name=true
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.store.preload": ["nvd", "dvd"]
|
||||
|
|
|
@ -224,7 +224,7 @@ The following setting allows to limit the number of tokens that can be produced:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT analyze_sample?include_type_name=true
|
||||
PUT analyze_sample
|
||||
{
|
||||
"settings" : {
|
||||
"index.analyze.max_token_count" : 20000
|
||||
|
@ -242,4 +242,4 @@ GET analyze_sample/_analyze
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:analyze_sample]
|
||||
// TEST[setup:analyze_sample]
|
||||
|
|
|
@ -24,7 +24,7 @@ indices recovery result.
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
# create the index
|
||||
PUT index1?include_type_name=true
|
||||
PUT index1
|
||||
{"settings": {"index.number_of_shards": 1}}
|
||||
|
||||
# create the repository
|
||||
|
|
|
@ -187,7 +187,7 @@ override any values set in matching index templates. For example, the following
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /logs-000001?include_type_name=true
|
||||
PUT /logs-000001
|
||||
{
|
||||
"aliases": {
|
||||
"logs_write": {}
|
||||
|
@ -216,7 +216,7 @@ checked without performing the actual rollover:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /logs-000001?include_type_name=true
|
||||
PUT /logs-000001
|
||||
{
|
||||
"aliases": {
|
||||
"logs_write": {}
|
||||
|
@ -258,7 +258,7 @@ Look at the behavior of the aliases in the following example where `is_write_ind
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_logs_index-000001?include_type_name=true
|
||||
PUT my_logs_index-000001
|
||||
{
|
||||
"aliases": {
|
||||
"logs": { "is_write_index": true } <1>
|
||||
|
|
|
@ -86,7 +86,7 @@ Create a new index:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_source_index?include_type_name=true
|
||||
PUT my_source_index
|
||||
{
|
||||
"settings": {
|
||||
"index.number_of_shards" : 1
|
||||
|
|
|
@ -198,14 +198,12 @@ the cell right next to it -- even though the shape is very close to the point.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /example?include_type_name=true
|
||||
PUT /example
|
||||
{
|
||||
"mappings": {
|
||||
"doc": {
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "geo_shape"
|
||||
}
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "geo_shape"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -289,7 +287,7 @@ API. The following is an example of a point in GeoJSON.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "point",
|
||||
|
@ -303,7 +301,7 @@ The following is an example of a point in WKT:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : "POINT (-77.03653 38.897676)"
|
||||
}
|
||||
|
@ -320,7 +318,7 @@ following is an example of a LineString in GeoJSON.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "linestring",
|
||||
|
@ -334,7 +332,7 @@ The following is an example of a LineString in WKT:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : "LINESTRING (-77.03653 38.897676, -77.009051 38.889939)"
|
||||
}
|
||||
|
@ -353,7 +351,7 @@ closed). The following is an example of a Polygon in GeoJSON.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "polygon",
|
||||
|
@ -369,7 +367,7 @@ The following is an example of a Polygon in WKT:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0))"
|
||||
}
|
||||
|
@ -382,7 +380,7 @@ of a polygon with a hole:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "polygon",
|
||||
|
@ -399,7 +397,7 @@ The following is an example of a Polygon with a hole in WKT:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2))"
|
||||
}
|
||||
|
@ -427,7 +425,7 @@ crosses the dateline.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "polygon",
|
||||
|
@ -447,7 +445,7 @@ overriding the orientation on a document:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "polygon",
|
||||
|
@ -467,7 +465,7 @@ The following is an example of a list of geojson points:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "multipoint",
|
||||
|
@ -483,7 +481,7 @@ The following is an example of a list of WKT points:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : "MULTIPOINT (102.0 2.0, 103.0 2.0)"
|
||||
}
|
||||
|
@ -497,7 +495,7 @@ The following is an example of a list of geojson linestrings:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "multilinestring",
|
||||
|
@ -515,7 +513,7 @@ The following is an example of a list of WKT linestrings:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : "MULTILINESTRING ((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0), (100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8))"
|
||||
}
|
||||
|
@ -529,7 +527,7 @@ The following is an example of a list of geojson polygons (second polygon contai
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "multipolygon",
|
||||
|
@ -547,7 +545,7 @@ The following is an example of a list of WKT polygons (second polygon contains a
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : "MULTIPOLYGON (((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0, 102.0 2.0)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))"
|
||||
}
|
||||
|
@ -561,7 +559,7 @@ The following is an example of a collection of geojson geometry objects:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type": "geometrycollection",
|
||||
|
@ -584,7 +582,7 @@ The following is an example of a collection of WKT geometry objects:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : "GEOMETRYCOLLECTION (POINT (100.0 0.0), LINESTRING (101.0 0.0, 102.0 1.0))"
|
||||
}
|
||||
|
@ -601,7 +599,7 @@ bounding rectangle in the format [[minLon, maxLat],[maxLon, minLat]]:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "envelope",
|
||||
|
@ -617,7 +615,7 @@ The following is an example of an envelope using the WKT BBOX format:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : "BBOX (100.0, 102.0, 2.0, 0.0)"
|
||||
}
|
||||
|
@ -635,7 +633,7 @@ a `POLYGON`.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /example/doc
|
||||
POST /example/_doc
|
||||
{
|
||||
"location" : {
|
||||
"type" : "circle",
|
||||
|
|
|
@ -55,7 +55,7 @@ index as follows:
|
|||
|
||||
[source,js]
|
||||
-----------------------------
|
||||
PUT /my_index?include_type_name=true
|
||||
PUT /my_index
|
||||
{
|
||||
"settings": {
|
||||
"index.requests.cache.enable": false
|
||||
|
|
|
@ -119,26 +119,24 @@ default, but there will be no speed up on analysis for these fields.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /imdb?include_type_name=true
|
||||
PUT /imdb
|
||||
{
|
||||
"mappings": {
|
||||
"movies": {
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "text",
|
||||
"term_vector": "yes"
|
||||
},
|
||||
"description": {
|
||||
"type": "text"
|
||||
},
|
||||
"tags": {
|
||||
"type": "text",
|
||||
"fields" : {
|
||||
"raw": {
|
||||
"type" : "text",
|
||||
"analyzer": "keyword",
|
||||
"term_vector" : "yes"
|
||||
}
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "text",
|
||||
"term_vector": "yes"
|
||||
},
|
||||
"description": {
|
||||
"type": "text"
|
||||
},
|
||||
"tags": {
|
||||
"type": "text",
|
||||
"fields" : {
|
||||
"raw": {
|
||||
"type" : "text",
|
||||
"analyzer": "keyword",
|
||||
"term_vector" : "yes"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -787,15 +787,13 @@ allow for highlighting using the postings:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /example?include_type_name=true
|
||||
PUT /example
|
||||
{
|
||||
"mappings": {
|
||||
"doc" : {
|
||||
"properties": {
|
||||
"comment" : {
|
||||
"type": "text",
|
||||
"index_options" : "offsets"
|
||||
}
|
||||
"properties": {
|
||||
"comment" : {
|
||||
"type": "text",
|
||||
"index_options" : "offsets"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -808,15 +806,13 @@ highlighting using the `term_vectors` (this will cause the index to be bigger):
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /example?include_type_name=true
|
||||
PUT /example
|
||||
{
|
||||
"mappings": {
|
||||
"doc" : {
|
||||
"properties": {
|
||||
"comment" : {
|
||||
"type": "text",
|
||||
"term_vector" : "with_positions_offsets"
|
||||
}
|
||||
"properties": {
|
||||
"comment" : {
|
||||
"type": "text",
|
||||
"term_vector" : "with_positions_offsets"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue