diff --git a/docs/painless/painless-contexts/painless-context-examples.asciidoc b/docs/painless/painless-contexts/painless-context-examples.asciidoc index 79fe9056aba..8a069145996 100644 --- a/docs/painless/painless-contexts/painless-context-examples.asciidoc +++ b/docs/painless/painless-contexts/painless-context-examples.asciidoc @@ -43,22 +43,20 @@ the request URL. + [source,js] ---- -PUT /seats?include_type_name=true +PUT /seats { "mappings": { - "seat": { - "properties": { - "theatre": { "type": "keyword" }, - "play": { "type": "text" }, - "actors": { "type": "text" }, - "row": { "type": "integer" }, - "number": { "type": "integer" }, - "cost": { "type": "double" }, - "sold": { "type": "boolean" }, - "datetime": { "type": "date" }, - "date": { "type": "keyword" }, - "time": { "type": "keyword" } - } + "properties": { + "theatre": { "type": "keyword" }, + "play": { "type": "text" }, + "actors": { "type": "text" }, + "row": { "type": "integer" }, + "number": { "type": "integer" }, + "cost": { "type": "double" }, + "sold": { "type": "boolean" }, + "datetime": { "type": "date" }, + "date": { "type": "keyword" }, + "time": { "type": "keyword" } } } } diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index fe7ed1cc317..383df5afb48 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -124,7 +124,7 @@ Then create an analyzer as follows: [source,js] -------------------------------------------------- -PUT kuromoji_sample?include_type_name=true +PUT kuromoji_sample { "settings": { "index": { @@ -186,7 +186,7 @@ BaseFormAttribute. This acts as a lemmatizer for verbs and adjectives. Example: [source,js] -------------------------------------------------- -PUT kuromoji_sample?include_type_name=true +PUT kuromoji_sample { "settings": { "index": { @@ -243,7 +243,7 @@ For example: [source,js] -------------------------------------------------- -PUT kuromoji_sample?include_type_name=true +PUT kuromoji_sample { "settings": { "index": { @@ -317,7 +317,7 @@ katakana reading form: [source,js] -------------------------------------------------- -PUT kuromoji_sample?include_type_name=true +PUT kuromoji_sample { "settings": { "index":{ @@ -381,7 +381,7 @@ This token filter accepts the following setting: [source,js] -------------------------------------------------- -PUT kuromoji_sample?include_type_name=true +PUT kuromoji_sample { "settings": { "index": { @@ -434,7 +434,7 @@ predefined list, then use the [source,js] -------------------------------------------------- -PUT kuromoji_sample?include_type_name=true +PUT kuromoji_sample { "settings": { "index": { @@ -493,7 +493,7 @@ to regular Arabic decimal numbers in half-width characters. For example: [source,js] -------------------------------------------------- -PUT kuromoji_sample?include_type_name=true +PUT kuromoji_sample { "settings": { "index": { diff --git a/docs/plugins/analysis-nori.asciidoc b/docs/plugins/analysis-nori.asciidoc index 1fe21fa13da..68ec943533a 100644 --- a/docs/plugins/analysis-nori.asciidoc +++ b/docs/plugins/analysis-nori.asciidoc @@ -90,7 +90,7 @@ Then create an analyzer as follows: [source,js] -------------------------------------------------- -PUT nori_sample?include_type_name=true +PUT nori_sample { "settings": { "index": { @@ -164,7 +164,7 @@ the `user_dictionary_rules` option: [source,js] -------------------------------------------------- -PUT nori_sample?include_type_name=true +PUT nori_sample { "settings": { "index": { @@ -332,7 +332,7 @@ For example: [source,js] -------------------------------------------------- -PUT nori_sample?include_type_name=true +PUT nori_sample { "settings": { "index": { @@ -398,7 +398,7 @@ The `nori_readingform` token filter rewrites tokens written in Hanja to their Ha [source,js] -------------------------------------------------- -PUT nori_sample?include_type_name=true +PUT nori_sample { "settings": { "index":{ diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc index 7996edb6afb..e22f819e1eb 100644 --- a/docs/plugins/analysis-phonetic.asciidoc +++ b/docs/plugins/analysis-phonetic.asciidoc @@ -29,7 +29,7 @@ The `phonetic` token filter takes the following settings: [source,js] -------------------------------------------------- -PUT phonetic_sample?include_type_name=true +PUT phonetic_sample { "settings": { "index": { diff --git a/docs/plugins/store-smb.asciidoc b/docs/plugins/store-smb.asciidoc index e0649873f87..4f713568655 100644 --- a/docs/plugins/store-smb.asciidoc +++ b/docs/plugins/store-smb.asciidoc @@ -46,7 +46,7 @@ It can also be set on a per-index basis at index creation time: [source,js] ---- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.store.type": "smb_mmap_fs" diff --git a/docs/reference/aggregations/bucket/nested-aggregation.asciidoc b/docs/reference/aggregations/bucket/nested-aggregation.asciidoc index 2acf760fff3..d323eb1c134 100644 --- a/docs/reference/aggregations/bucket/nested-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/nested-aggregation.asciidoc @@ -8,10 +8,9 @@ price for the product. The mapping could look like: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { - "mappings": { - "product" : { + "mappings": { "properties" : { "resellers" : { <1> "type" : "nested", @@ -22,7 +21,6 @@ PUT /index?include_type_name=true } } } - } } -------------------------------------------------- // CONSOLE @@ -52,7 +50,7 @@ GET /_search -------------------------------------------------- // CONSOLE // TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/] -// TEST[s/^/PUT index\/product\/0\?refresh\n{"name":"led", "resellers": [{"name": "foo", "price": 350.00}, {"name": "bar", "price": 500.00}]}\n/] +// TEST[s/^/PUT index\/_doc\/0\?refresh\n{"name":"led", "resellers": [{"name": "foo", "price": 350.00}, {"name": "bar", "price": 500.00}]}\n/] As you can see above, the nested aggregation requires the `path` of the nested documents within the top level documents. Then one can define any type of aggregation over these nested documents. diff --git a/docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc b/docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc index 493326651ef..f922d90331f 100644 --- a/docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc @@ -17,21 +17,19 @@ the issue documents as nested documents. The mapping could look like: [source,js] -------------------------------------------------- -PUT /issues?include_type_name=true +PUT /issues { "mappings": { - "issue" : { - "properties" : { - "tags" : { "type" : "keyword" }, - "comments" : { <1> - "type" : "nested", - "properties" : { - "username" : { "type" : "keyword" }, - "comment" : { "type" : "text" } - } - } - } - } + "properties" : { + "tags" : { "type" : "keyword" }, + "comments" : { <1> + "type" : "nested", + "properties" : { + "username" : { "type" : "keyword" }, + "comment" : { "type" : "text" } + } + } + } } } -------------------------------------------------- @@ -45,7 +43,7 @@ tags of the issues the user has commented on: [source,js] -------------------------------------------------- -POST /issues/issue/0?refresh +POST /issues/_doc/0?refresh {"tags": ["tag_1"], "comments": [{"username": "username_1"}]} -------------------------------------------------- // CONSOLE diff --git a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc index 0b2b769adfc..1c615e795c6 100644 --- a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc @@ -19,23 +19,21 @@ that is significant and probably very relevant to their search. 5/10,000,000 vs [source,js] -------------------------------------------------- -PUT /reports?include_type_name=true +PUT /reports { "mappings": { - "report": { - "properties": { - "force": { - "type": "keyword" - }, - "crime_type": { - "type": "keyword" - } + "properties": { + "force": { + "type": "keyword" + }, + "crime_type": { + "type": "keyword" } } } } -POST /reports/report/_bulk?refresh +POST /reports/_bulk?refresh {"index":{"_id":0}} {"force": "British Transport Police", "crime_type": "Bicycle theft"} {"index":{"_id":1}} diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index 3b104c90332..8c0e586d8b2 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -7,23 +7,21 @@ A multi-bucket value source based aggregation where buckets are dynamically buil [source,js] -------------------------------------------------- -PUT /products?include_type_name=true +PUT /products { "mappings": { - "product": { - "properties": { - "genre": { - "type": "keyword" - }, - "product": { - "type": "keyword" - } + "properties": { + "genre": { + "type": "keyword" + }, + "product": { + "type": "keyword" } } } } -POST /products/product/_bulk?refresh +POST /products/_bulk?refresh {"index":{"_id":0}} {"genre": "rock", "product": "Product A"} {"index":{"_id":1}} diff --git a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc index 153f0fe539e..92133822fa5 100644 --- a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc @@ -53,7 +53,7 @@ Token Filters:: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -157,7 +157,7 @@ Here is an example: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc index 28df6d2d3bc..cc82d2eb817 100644 --- a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc @@ -86,7 +86,7 @@ pre-defined list of English stop words: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -158,7 +158,7 @@ customization: [source,js] ---------------------------------------------------- -PUT /fingerprint_example?include_type_name=true +PUT /fingerprint_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc index 571ff953c95..954b514ced6 100644 --- a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc @@ -68,7 +68,7 @@ for further customization: [source,js] ---------------------------------------------------- -PUT /keyword_example?include_type_name=true +PUT /keyword_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 959504dbbd6..9a4dcbe8aaa 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -78,7 +78,7 @@ The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /arabic_example?include_type_name=true +PUT /arabic_example { "settings": { "analysis": { @@ -128,7 +128,7 @@ The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows [source,js] ---------------------------------------------------- -PUT /armenian_example?include_type_name=true +PUT /armenian_example { "settings": { "analysis": { @@ -176,7 +176,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /basque_example?include_type_name=true +PUT /basque_example { "settings": { "analysis": { @@ -224,7 +224,7 @@ The `bengali` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /bengali_example?include_type_name=true +PUT /bengali_example { "settings": { "analysis": { @@ -275,7 +275,7 @@ The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follow [source,js] ---------------------------------------------------- -PUT /brazilian_example?include_type_name=true +PUT /brazilian_example { "settings": { "analysis": { @@ -323,7 +323,7 @@ The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follow [source,js] ---------------------------------------------------- -PUT /bulgarian_example?include_type_name=true +PUT /bulgarian_example { "settings": { "analysis": { @@ -371,7 +371,7 @@ The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /catalan_example?include_type_name=true +PUT /catalan_example { "settings": { "analysis": { @@ -428,7 +428,7 @@ The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /cjk_example?include_type_name=true +PUT /cjk_example { "settings": { "analysis": { @@ -474,7 +474,7 @@ The `czech` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /czech_example?include_type_name=true +PUT /czech_example { "settings": { "analysis": { @@ -522,7 +522,7 @@ The `danish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /danish_example?include_type_name=true +PUT /danish_example { "settings": { "analysis": { @@ -570,7 +570,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /dutch_example?include_type_name=true +PUT /dutch_example { "settings": { "analysis": { @@ -628,7 +628,7 @@ The `english` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /english_example?include_type_name=true +PUT /english_example { "settings": { "analysis": { @@ -681,7 +681,7 @@ The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /finnish_example?include_type_name=true +PUT /finnish_example { "settings": { "analysis": { @@ -729,7 +729,7 @@ The `french` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /french_example?include_type_name=true +PUT /french_example { "settings": { "analysis": { @@ -787,7 +787,7 @@ The `galician` analyzer could be reimplemented as a `custom` analyzer as follows [source,js] ---------------------------------------------------- -PUT /galician_example?include_type_name=true +PUT /galician_example { "settings": { "analysis": { @@ -835,7 +835,7 @@ The `german` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /german_example?include_type_name=true +PUT /german_example { "settings": { "analysis": { @@ -884,7 +884,7 @@ The `greek` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /greek_example?include_type_name=true +PUT /greek_example { "settings": { "analysis": { @@ -936,7 +936,7 @@ The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /hindi_example?include_type_name=true +PUT /hindi_example { "settings": { "analysis": { @@ -987,7 +987,7 @@ The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follow [source,js] ---------------------------------------------------- -PUT /hungarian_example?include_type_name=true +PUT /hungarian_example { "settings": { "analysis": { @@ -1036,7 +1036,7 @@ The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follo [source,js] ---------------------------------------------------- -PUT /indonesian_example?include_type_name=true +PUT /indonesian_example { "settings": { "analysis": { @@ -1084,7 +1084,7 @@ The `irish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /irish_example?include_type_name=true +PUT /irish_example { "settings": { "analysis": { @@ -1148,7 +1148,7 @@ The `italian` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /italian_example?include_type_name=true +PUT /italian_example { "settings": { "analysis": { @@ -1207,7 +1207,7 @@ The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /latvian_example?include_type_name=true +PUT /latvian_example { "settings": { "analysis": { @@ -1255,7 +1255,7 @@ The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follo [source,js] ---------------------------------------------------- -PUT /lithuanian_example?include_type_name=true +PUT /lithuanian_example { "settings": { "analysis": { @@ -1303,7 +1303,7 @@ The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follow [source,js] ---------------------------------------------------- -PUT /norwegian_example?include_type_name=true +PUT /norwegian_example { "settings": { "analysis": { @@ -1351,7 +1351,7 @@ The `persian` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /persian_example?include_type_name=true +PUT /persian_example { "settings": { "analysis": { @@ -1397,7 +1397,7 @@ The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follo [source,js] ---------------------------------------------------- -PUT /portuguese_example?include_type_name=true +PUT /portuguese_example { "settings": { "analysis": { @@ -1445,7 +1445,7 @@ The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows [source,js] ---------------------------------------------------- -PUT /romanian_example?include_type_name=true +PUT /romanian_example { "settings": { "analysis": { @@ -1494,7 +1494,7 @@ The `russian` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /russian_example?include_type_name=true +PUT /russian_example { "settings": { "analysis": { @@ -1542,7 +1542,7 @@ The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /sorani_example?include_type_name=true +PUT /sorani_example { "settings": { "analysis": { @@ -1592,7 +1592,7 @@ The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /spanish_example?include_type_name=true +PUT /spanish_example { "settings": { "analysis": { @@ -1640,7 +1640,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /swedish_example?include_type_name=true +PUT /swedish_example { "settings": { "analysis": { @@ -1688,7 +1688,7 @@ The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /turkish_example?include_type_name=true +PUT /turkish_example { "settings": { "analysis": { @@ -1741,7 +1741,7 @@ The `thai` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- -PUT /thai_example?include_type_name=true +PUT /thai_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc index 759c7816169..027f37280a6 100644 --- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc @@ -177,7 +177,7 @@ on non-word characters or on underscores (`\W|_`), and to lower-case the result: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -266,7 +266,7 @@ The following more complicated example splits CamelCase text into tokens: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -386,7 +386,7 @@ customization: [source,js] ---------------------------------------------------- -PUT /pattern_example?include_type_name=true +PUT /pattern_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc index 23130a4fd58..d82655d9bd8 100644 --- a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc @@ -135,7 +135,7 @@ a starting point for further customization: [source,js] ---------------------------------------------------- -PUT /simple_example?include_type_name=true +PUT /simple_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc index 959e493d9d5..3097ece21db 100644 --- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc @@ -151,7 +151,7 @@ pre-defined list of English stop words: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -283,7 +283,7 @@ it, usually by adding token filters. This would recreate the built-in [source,js] ---------------------------------------------------- -PUT /standard_example?include_type_name=true +PUT /standard_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc index 2586d79c844..1b84797d947 100644 --- a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc @@ -130,7 +130,7 @@ words as stop words: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -248,7 +248,7 @@ customization: [source,js] ---------------------------------------------------- -PUT /stop_example?include_type_name=true +PUT /stop_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc index 3ebc665abdd..31ba8d9ce8f 100644 --- a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc @@ -128,7 +128,7 @@ and you can use it as a starting point for further customization: [source,js] ---------------------------------------------------- -PUT /whitespace_example?include_type_name=true +PUT /whitespace_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc b/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc index a9e11cf778d..6c1a1875d67 100644 --- a/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc @@ -73,7 +73,7 @@ tags in place: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc b/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc index 0cbc6de782d..30e565d443a 100644 --- a/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc @@ -33,7 +33,7 @@ numerals with their Latin equivalents: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -107,7 +107,7 @@ example replaces the `:)` and `:(` emoticons with a text equivalent: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc index 77eb1bb0582..bd22b013334 100644 --- a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc @@ -8,7 +8,7 @@ equivalents, if one exists. Example: [source,js] -------------------------------------------------- -PUT /asciifold_example?include_type_name=true +PUT /asciifold_example { "settings" : { "analysis" : { @@ -30,7 +30,7 @@ example: [source,js] -------------------------------------------------- -PUT /asciifold_example?include_type_name=true +PUT /asciifold_example { "settings" : { "analysis" : { diff --git a/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc index a7ec4d92485..cc26d025f04 100644 --- a/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc @@ -16,7 +16,7 @@ Bigrams are generated for characters in `han`, `hiragana`, `katakana` and [source,js] -------------------------------------------------- -PUT /cjk_bigram_example?include_type_name=true +PUT /cjk_bigram_example { "settings" : { "analysis" : { diff --git a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc index 538e28a8cf0..80ef7c2ce79 100644 --- a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc @@ -41,7 +41,7 @@ Here is an example: [source,js] -------------------------------------------------- -PUT /common_grams_example?include_type_name=true +PUT /common_grams_example { "settings": { "analysis": { @@ -168,4 +168,4 @@ And the response will be: ] } -------------------------------------------------- -// TESTRESPONSE \ No newline at end of file +// TESTRESPONSE diff --git a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc index b345e0b7b4c..d200c0b988b 100644 --- a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc @@ -84,7 +84,7 @@ Here is an example: [source,js] -------------------------------------------------- -PUT /compound_word_example?include_type_name=true +PUT /compound_word_example { "settings": { "index": { diff --git a/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc index e241bddb12b..0aeadded0d4 100644 --- a/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc @@ -20,7 +20,7 @@ You can set it up like: [source,js] -------------------------------------------------- -PUT /condition_example?include_type_name=true +PUT /condition_example { "settings" : { "analysis" : { @@ -87,4 +87,4 @@ And it'd respond: // TESTRESPONSE <1> The term `What` has been lowercased, because it is only 4 characters long <2> The term `Flapdoodle` has been left in its original case, because it doesn't pass - the predicate \ No newline at end of file + the predicate diff --git a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc index 7a28760194a..924903b9f65 100644 --- a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc @@ -9,7 +9,7 @@ example: [source,js] -------------------------------------------------- -PUT /elision_example?include_type_name=true +PUT /elision_example { "settings" : { "analysis" : { diff --git a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc index e68b6685a78..cef687f7619 100644 --- a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc @@ -42,7 +42,7 @@ settings: [source,js] -------------------------------------------------- -PUT /hunspell_example?include_type_name=true +PUT /hunspell_example { "settings": { "analysis" : { diff --git a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc index ac5b3f368b5..f8dce95f1b0 100644 --- a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc @@ -19,7 +19,7 @@ You can set it up like: [source,js] -------------------------------------------------- -PUT /keep_types_example?include_type_name=true +PUT /keep_types_example { "settings" : { "analysis" : { @@ -80,7 +80,7 @@ If the `mode` parameter is set to `exclude` like in the following example: [source,js] -------------------------------------------------- -PUT /keep_types_exclude_example?include_type_name=true +PUT /keep_types_exclude_example { "settings" : { "analysis" : { @@ -139,4 +139,4 @@ The response will be: ] } -------------------------------------------------- -// TESTRESPONSE \ No newline at end of file +// TESTRESPONSE diff --git a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc index 1f1d49cfe89..b7385379be9 100644 --- a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc @@ -20,7 +20,7 @@ keep_words_case:: a boolean indicating whether to lower case the words (defaults [source,js] -------------------------------------------------- -PUT /keep_words_example?include_type_name=true +PUT /keep_words_example { "settings" : { "analysis" : { diff --git a/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc index 8a12b0d4757..1f1e4e655c5 100644 --- a/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc @@ -23,7 +23,7 @@ You can configure it like: [source,js] -------------------------------------------------- -PUT /keyword_marker_example?include_type_name=true +PUT /keyword_marker_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc index e0a2e4c73c9..044e8c14769 100644 --- a/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc @@ -14,7 +14,7 @@ preserve both the stemmed and unstemmed version of tokens: [source,js] -------------------------------------------------- -PUT /keyword_repeat_example?include_type_name=true +PUT /keyword_repeat_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc index deb13843a1c..ba2018c1076 100644 --- a/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc @@ -18,7 +18,7 @@ Here is an example: [source,js] -------------------------------------------------- -PUT /limit_example?include_type_name=true +PUT /limit_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc index c0c35e8a60c..519fd77ba2a 100644 --- a/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc @@ -10,7 +10,7 @@ custom analyzer [source,js] -------------------------------------------------- -PUT /lowercase_example?include_type_name=true +PUT /lowercase_example { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc index 8ad3fab8f36..a92e2476ad7 100644 --- a/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc @@ -31,7 +31,7 @@ You can set it up like: [source,js] -------------------------------------------------- -PUT /multiplexer_example?include_type_name=true +PUT /multiplexer_example { "settings" : { "analysis" : { @@ -121,4 +121,4 @@ that produce multiple tokens at the same position. This means that any filters within the multiplexer will be ignored for the purpose of synonyms. If you want to use filters contained within the multiplexer for parsing synonyms (for example, to apply stemming to the synonym lists), then you should append the synonym filter -to the relevant multiplexer filter list. \ No newline at end of file +to the relevant multiplexer filter list. diff --git a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc index a028abef7a1..5b935d31f12 100644 --- a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc @@ -46,7 +46,7 @@ This is particularly useful for indexing text like camel-case code, eg [source,js] -------------------------------------------------- -PUT test?include_type_name=true +PUT test { "settings" : { "analysis" : { @@ -87,7 +87,7 @@ Another example is analyzing email addresses: [source,js] -------------------------------------------------- -PUT test?include_type_name=true +PUT test { "settings" : { "analysis" : { diff --git a/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc index 2e7f7c11631..44ead824a84 100644 --- a/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc @@ -17,7 +17,7 @@ You can set it up like: [source,js] -------------------------------------------------- -PUT /condition_example?include_type_name=true +PUT /condition_example { "settings" : { "analysis" : { @@ -76,4 +76,4 @@ And it'd respond: <1> The token 'What' has been removed from the tokenstream because it does not match the predicate. -<2> The position and offset values are unaffected by the removal of earlier tokens \ No newline at end of file +<2> The position and offset values are unaffected by the removal of earlier tokens diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index c9f8eff8136..99ed03649ff 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -12,7 +12,7 @@ For example: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "settings": { "analysis" : { diff --git a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc index b80bd517b19..e178181d147 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc @@ -20,7 +20,7 @@ Here is an example: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "settings": { "analysis" : { @@ -53,7 +53,7 @@ You can also define the overrides rules inline: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "settings": { "analysis" : { diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index f6db7206b04..f59e2f3f2cf 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -6,7 +6,7 @@ filters through a single unified interface. For example: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "settings": { "analysis" : { diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc index 579d2202c00..3167a4342ac 100644 --- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc @@ -33,7 +33,7 @@ The `stopwords` parameter accepts either an array of stopwords: [source,js] ------------------------------------ -PUT /my_index?include_type_name=true +PUT /my_index { "settings": { "analysis": { @@ -53,7 +53,7 @@ or a predefined language-specific list: [source,js] ------------------------------------ -PUT /my_index?include_type_name=true +PUT /my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index d614a6bcf4d..2a555d7d044 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -23,7 +23,7 @@ Here is an example: [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "index" : { @@ -59,7 +59,7 @@ to note that only those synonym rules which cannot get parsed are ignored. For i [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "index" : { @@ -118,7 +118,7 @@ configuration file (note use of `synonyms` instead of `synonyms_path`): [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "index" : { @@ -150,7 +150,7 @@ declared using `format`: [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "index" : { diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index 6bb42354c35..715abdde633 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -7,7 +7,7 @@ Here is an example: [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "index" : { @@ -46,7 +46,7 @@ to note that only those synonym rules which cannot get parsed are ignored. For i [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "index" : { @@ -106,7 +106,7 @@ configuration file (note use of `synonyms` instead of `synonyms_path`): [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "index" : { @@ -138,7 +138,7 @@ declared using `format`: [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "index" : { @@ -175,4 +175,4 @@ positions, some token filters may cause issues here. Token filters that produce multiple versions of a token may choose which version of the token to emit when parsing synonyms, e.g. `asciifolding` will only produce the folded version of the token. Others, e.g. `multiplexer`, `word_delimiter_graph` or `ngram` will throw an -error. \ No newline at end of file +error. diff --git a/docs/reference/analysis/tokenizers/classic-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/classic-tokenizer.asciidoc index ca827e73ec6..52bdcbd7732 100644 --- a/docs/reference/analysis/tokenizers/classic-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/classic-tokenizer.asciidoc @@ -145,7 +145,7 @@ In this example, we configure the `classic` tokenizer to have a [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc index c558a293927..c182ffacd1c 100644 --- a/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc @@ -209,7 +209,7 @@ digits as tokens, and to produce tri-grams (grams of length `3`): [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc index b27c1fb7cef..55aa7d66da3 100644 --- a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc @@ -93,7 +93,7 @@ characters, and to replace them with `/`. The first two tokens are skipped: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc index de52ea31372..5c19fcf59cc 100644 --- a/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc @@ -125,7 +125,7 @@ tokens when it encounters commas: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -215,7 +215,7 @@ escaped, so the pattern ends up looking like: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc index 2f68a0b8937..adc5fc05dee 100644 --- a/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc @@ -36,7 +36,7 @@ three-digit numbers [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc index d74f8823ff3..fc2e186f972 100644 --- a/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc @@ -37,7 +37,7 @@ text on underscores. [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc index b19f3118800..9f77a0e13dc 100644 --- a/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc @@ -136,7 +136,7 @@ In this example, we configure the `standard` tokenizer to have a [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc index 8df2bc50752..7fea0f1e8d8 100644 --- a/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc @@ -92,7 +92,7 @@ In this example, we configure the `uax_url_email` tokenizer to have a [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 9cf831249dd..394231e448d 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -8,7 +8,7 @@ including filter and routing infos. Hidden setup for example: [source,js] -------------------------------------------------- -PUT test1?include_type_name=true +PUT test1 { "aliases": { "alias1": {}, diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index 16041ef8332..1af236f7d86 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -174,7 +174,7 @@ In the following example, we will create a leader index in the remote cluster: [source,js] -------------------------------------------------- -PUT /server-metrics?include_type_name=true +PUT /server-metrics { "settings" : { "index" : { @@ -188,29 +188,27 @@ PUT /server-metrics?include_type_name=true } }, "mappings" : { - "metric" : { - "properties" : { - "@timestamp" : { - "type" : "date" - }, - "accept" : { - "type" : "long" - }, - "deny" : { - "type" : "long" - }, - "host" : { - "type" : "keyword" - }, - "response" : { - "type" : "float" - }, - "service" : { - "type" : "keyword" - }, - "total" : { - "type" : "long" - } + "properties" : { + "@timestamp" : { + "type" : "date" + }, + "accept" : { + "type" : "long" + }, + "deny" : { + "type" : "long" + }, + "host" : { + "type" : "keyword" + }, + "response" : { + "type" : "float" + }, + "service" : { + "type" : "keyword" + }, + "total" : { + "type" : "long" } } } diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index d23faf9f75d..66762ead9eb 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -64,7 +64,7 @@ PUT _ilm/policy/my_policy } } -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.lifecycle.name": "my_policy", diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index 207d5139298..57ea1a226ea 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -72,7 +72,7 @@ PUT _ilm/policy/my_policy } } -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.lifecycle.name": "my_policy" diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc index 81646cc1350..888d3f17eec 100644 --- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc +++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc @@ -62,7 +62,7 @@ PUT _ilm/policy/my_policy } } -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.lifecycle.name": "my_policy" diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index a671e33f2b1..abe643255bf 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -41,7 +41,7 @@ telling it to use the policy they have created: [source,js] -------------------------------------------------- -PUT /myindex?include_type_name=true +PUT /myindex { "settings": { "index.number_of_shards": 2, diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc index 5c5e188a9d4..f06c95f49c0 100644 --- a/docs/reference/ilm/getting-started-ilm.asciidoc +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -107,7 +107,7 @@ To begin, we will want to bootstrap our first index to write to. [source,js] ----------------------- -PUT datastream-000001?include_type_name=true +PUT datastream-000001 { "aliases": { "datastream": { diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 2f71c20e2c7..adf78cecd89 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -356,7 +356,7 @@ index "my_index" must be the write index for the alias. For more information, re [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.lifecycle.name": "my_policy", diff --git a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc index 417b4bf9ef8..7af686238f3 100644 --- a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc @@ -73,7 +73,7 @@ initial index which will be managed by our policy: [source,js] ----------------------- -PUT test-000001?include_type_name=true +PUT test-000001 { "aliases": { "test-alias":{ @@ -96,7 +96,7 @@ request so {ilm} immediately starts managing the index: [source,js] ----------------------- -PUT test-index?include_type_name=true +PUT test-index { "settings": { "number_of_shards": 1, diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc index 1b5666f1410..e5366f028a9 100644 --- a/docs/reference/ilm/start-stop-ilm.asciidoc +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -39,7 +39,7 @@ PUT _ilm/policy/my_policy } } -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.lifecycle.name": "my_policy" diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc index da3983d053c..3e6627fdd3a 100644 --- a/docs/reference/ilm/update-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -168,7 +168,7 @@ PUT _ilm/policy/my_executing_policy //// [source,js] ------------------------ -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.lifecycle.name": "my_executing_policy" @@ -486,7 +486,7 @@ PUT _ilm/policy/my_other_policy } } -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.lifecycle.name": "my_policy" diff --git a/docs/reference/index-modules/allocation/prioritization.asciidoc b/docs/reference/index-modules/allocation/prioritization.asciidoc index f702a2f20f6..6693e6adb75 100644 --- a/docs/reference/index-modules/allocation/prioritization.asciidoc +++ b/docs/reference/index-modules/allocation/prioritization.asciidoc @@ -19,14 +19,14 @@ PUT index_1 PUT index_2 -PUT index_3?include_type_name=true +PUT index_3 { "settings": { "index.priority": 10 } } -PUT index_4?include_type_name=true +PUT index_4 { "settings": { "index.priority": 5 diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index 1483a04868e..8c1b99a42f2 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -24,7 +24,7 @@ creation time: [source,js] --------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "settings": { "index.store.type": "niofs" @@ -114,7 +114,7 @@ or in the index settings at index creation time: [source,js] --------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "settings": { "index.store.preload": ["nvd", "dvd"] diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index 8570176282e..a0d0f3c5b24 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -224,7 +224,7 @@ The following setting allows to limit the number of tokens that can be produced: [source,js] -------------------------------------------------- -PUT analyze_sample?include_type_name=true +PUT analyze_sample { "settings" : { "index.analyze.max_token_count" : 20000 @@ -242,4 +242,4 @@ GET analyze_sample/_analyze } -------------------------------------------------- // CONSOLE -// TEST[setup:analyze_sample] \ No newline at end of file +// TEST[setup:analyze_sample] diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 975beacc02c..0929b36e774 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -24,7 +24,7 @@ indices recovery result. [source,js] -------------------------------------------------- # create the index -PUT index1?include_type_name=true +PUT index1 {"settings": {"index.number_of_shards": 1}} # create the repository diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index d53fd465519..1730c95e0dd 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -187,7 +187,7 @@ override any values set in matching index templates. For example, the following [source,js] -------------------------------------------------- -PUT /logs-000001?include_type_name=true +PUT /logs-000001 { "aliases": { "logs_write": {} @@ -216,7 +216,7 @@ checked without performing the actual rollover: [source,js] -------------------------------------------------- -PUT /logs-000001?include_type_name=true +PUT /logs-000001 { "aliases": { "logs_write": {} @@ -258,7 +258,7 @@ Look at the behavior of the aliases in the following example where `is_write_ind [source,js] -------------------------------------------------- -PUT my_logs_index-000001?include_type_name=true +PUT my_logs_index-000001 { "aliases": { "logs": { "is_write_index": true } <1> diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 465d7603a60..ade0a8075d5 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -86,7 +86,7 @@ Create a new index: [source,js] -------------------------------------------------- -PUT my_source_index?include_type_name=true +PUT my_source_index { "settings": { "index.number_of_shards" : 1 diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 11ba1c55cb2..a740b8c3b41 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -198,14 +198,12 @@ the cell right next to it -- even though the shape is very close to the point. [source,js] -------------------------------------------------- -PUT /example?include_type_name=true +PUT /example { "mappings": { - "doc": { - "properties": { - "location": { - "type": "geo_shape" - } + "properties": { + "location": { + "type": "geo_shape" } } } @@ -289,7 +287,7 @@ API. The following is an example of a point in GeoJSON. [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "point", @@ -303,7 +301,7 @@ The following is an example of a point in WKT: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : "POINT (-77.03653 38.897676)" } @@ -320,7 +318,7 @@ following is an example of a LineString in GeoJSON. [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "linestring", @@ -334,7 +332,7 @@ The following is an example of a LineString in WKT: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : "LINESTRING (-77.03653 38.897676, -77.009051 38.889939)" } @@ -353,7 +351,7 @@ closed). The following is an example of a Polygon in GeoJSON. [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "polygon", @@ -369,7 +367,7 @@ The following is an example of a Polygon in WKT: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0))" } @@ -382,7 +380,7 @@ of a polygon with a hole: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "polygon", @@ -399,7 +397,7 @@ The following is an example of a Polygon with a hole in WKT: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2))" } @@ -427,7 +425,7 @@ crosses the dateline. [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "polygon", @@ -447,7 +445,7 @@ overriding the orientation on a document: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "polygon", @@ -467,7 +465,7 @@ The following is an example of a list of geojson points: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "multipoint", @@ -483,7 +481,7 @@ The following is an example of a list of WKT points: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : "MULTIPOINT (102.0 2.0, 103.0 2.0)" } @@ -497,7 +495,7 @@ The following is an example of a list of geojson linestrings: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "multilinestring", @@ -515,7 +513,7 @@ The following is an example of a list of WKT linestrings: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : "MULTILINESTRING ((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0), (100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8))" } @@ -529,7 +527,7 @@ The following is an example of a list of geojson polygons (second polygon contai [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "multipolygon", @@ -547,7 +545,7 @@ The following is an example of a list of WKT polygons (second polygon contains a [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : "MULTIPOLYGON (((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0, 102.0 2.0)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))" } @@ -561,7 +559,7 @@ The following is an example of a collection of geojson geometry objects: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type": "geometrycollection", @@ -584,7 +582,7 @@ The following is an example of a collection of WKT geometry objects: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : "GEOMETRYCOLLECTION (POINT (100.0 0.0), LINESTRING (101.0 0.0, 102.0 1.0))" } @@ -601,7 +599,7 @@ bounding rectangle in the format [[minLon, maxLat],[maxLon, minLat]]: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "envelope", @@ -617,7 +615,7 @@ The following is an example of an envelope using the WKT BBOX format: [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : "BBOX (100.0, 102.0, 2.0, 0.0)" } @@ -635,7 +633,7 @@ a `POLYGON`. [source,js] -------------------------------------------------- -POST /example/doc +POST /example/_doc { "location" : { "type" : "circle", diff --git a/docs/reference/modules/indices/request_cache.asciidoc b/docs/reference/modules/indices/request_cache.asciidoc index d13c1085898..fc04c5e9c63 100644 --- a/docs/reference/modules/indices/request_cache.asciidoc +++ b/docs/reference/modules/indices/request_cache.asciidoc @@ -55,7 +55,7 @@ index as follows: [source,js] ----------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "settings": { "index.requests.cache.enable": false diff --git a/docs/reference/query-dsl/mlt-query.asciidoc b/docs/reference/query-dsl/mlt-query.asciidoc index 3d8b08b8ff4..509b4a9b440 100644 --- a/docs/reference/query-dsl/mlt-query.asciidoc +++ b/docs/reference/query-dsl/mlt-query.asciidoc @@ -119,26 +119,24 @@ default, but there will be no speed up on analysis for these fields. [source,js] -------------------------------------------------- -PUT /imdb?include_type_name=true +PUT /imdb { "mappings": { - "movies": { - "properties": { - "title": { - "type": "text", - "term_vector": "yes" - }, - "description": { - "type": "text" - }, - "tags": { - "type": "text", - "fields" : { - "raw": { - "type" : "text", - "analyzer": "keyword", - "term_vector" : "yes" - } + "properties": { + "title": { + "type": "text", + "term_vector": "yes" + }, + "description": { + "type": "text" + }, + "tags": { + "type": "text", + "fields" : { + "raw": { + "type" : "text", + "analyzer": "keyword", + "term_vector" : "yes" } } } diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 4dd037cf3c0..ad836c7c535 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -787,15 +787,13 @@ allow for highlighting using the postings: [source,js] -------------------------------------------------- -PUT /example?include_type_name=true +PUT /example { "mappings": { - "doc" : { - "properties": { - "comment" : { - "type": "text", - "index_options" : "offsets" - } + "properties": { + "comment" : { + "type": "text", + "index_options" : "offsets" } } } @@ -808,15 +806,13 @@ highlighting using the `term_vectors` (this will cause the index to be bigger): [source,js] -------------------------------------------------- -PUT /example?include_type_name=true +PUT /example { "mappings": { - "doc" : { - "properties": { - "comment" : { - "type": "text", - "term_vector" : "with_positions_offsets" - } + "properties": { + "comment" : { + "type": "text", + "term_vector" : "with_positions_offsets" } } }