Update the default for include_type_name to false. (#37285)
* Default include_type_name to false for get and put mappings. * Default include_type_name to false for get field mappings. * Add a constant for the default include_type_name value. * Default include_type_name to false for get and put index templates. * Default include_type_name to false for create index. * Update create index calls in REST documentation to use include_type_name=true. * Some minor clean-ups around the get index API. * In REST tests, use include_type_name=true by default for index creation. * Make sure to use 'expression == false'. * Clarify the different IndexTemplateMetaData toXContent methods. * Fix FullClusterRestartIT#testSnapshotRestore. * Fix the ml_anomalies_default_mappings test. * Fix GetFieldMappingsResponseTests and GetIndexTemplateResponseTests. We make sure to specify include_type_name=true during xContent parsing, so we continue to test the legacy typed responses. XContent generation for the typeless responses is currently only covered by REST tests, but we will be adding unit test coverage for these as we implement each typeless API in the Java HLRC. This commit also refactors GetMappingsResponse to follow the same appraoch as the other mappings-related responses, where we read include_type_name out of the xContent params, instead of creating a second toXContent method. This gives better consistency in the response parsing code. * Fix more REST tests. * Improve some wording in the create index documentation. * Add a note about types removal in the create index docs. * Fix SmokeTestMonitoringWithSecurityIT#testHTTPExporterWithSSL. * Make sure to mention include_type_name in the REST docs for affected APIs. * Make sure to use 'expression == false' in FullClusterRestartIT. * Mention include_type_name in the REST templates docs.
This commit is contained in:
parent
f3edbe2911
commit
36a3b84fc9
|
@ -57,6 +57,8 @@ import org.elasticsearch.common.Strings;
|
|||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER;
|
||||
|
||||
final class IndicesRequestConverters {
|
||||
|
||||
private IndicesRequestConverters() {}
|
||||
|
@ -103,6 +105,7 @@ final class IndicesRequestConverters {
|
|||
parameters.withTimeout(createIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout());
|
||||
parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards());
|
||||
parameters.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
request.setEntity(RequestConverters.createEntity(createIndexRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
|
@ -131,6 +134,7 @@ final class IndicesRequestConverters {
|
|||
RequestConverters.Params parameters = new RequestConverters.Params(request);
|
||||
parameters.withTimeout(putMappingRequest.timeout());
|
||||
parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout());
|
||||
parameters.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
request.setEntity(RequestConverters.createEntity(putMappingRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
|
@ -146,6 +150,8 @@ final class IndicesRequestConverters {
|
|||
parameters.withMasterTimeout(getMappingsRequest.masterNodeTimeout());
|
||||
parameters.withIndicesOptions(getMappingsRequest.indicesOptions());
|
||||
parameters.withLocal(getMappingsRequest.local());
|
||||
parameters.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
|
@ -165,6 +171,8 @@ final class IndicesRequestConverters {
|
|||
parameters.withIndicesOptions(getFieldMappingsRequest.indicesOptions());
|
||||
parameters.withIncludeDefaults(getFieldMappingsRequest.includeDefaults());
|
||||
parameters.withLocal(getFieldMappingsRequest.local());
|
||||
parameters.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
|
@ -357,6 +365,7 @@ final class IndicesRequestConverters {
|
|||
if (Strings.hasText(putIndexTemplateRequest.cause())) {
|
||||
params.putParam("cause", putIndexTemplateRequest.cause());
|
||||
}
|
||||
params.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
request.setEntity(RequestConverters.createEntity(putIndexTemplateRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
@ -395,6 +404,7 @@ final class IndicesRequestConverters {
|
|||
final RequestConverters.Params params = new RequestConverters.Params(request);
|
||||
params.withLocal(getIndexTemplatesRequest.isLocal());
|
||||
params.withMasterTimeout(getIndexTemplatesRequest.getMasterNodeTimeout());
|
||||
params.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
return request;
|
||||
}
|
||||
|
||||
|
|
|
@ -255,9 +255,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
.put("number_of_shards", 1)
|
||||
.put("number_of_replicas", 0)
|
||||
.build();
|
||||
String mapping = "\"_doc\": { \"_source\": {\n" +
|
||||
" \"enabled\": false\n" +
|
||||
" } }";
|
||||
String mapping = "\"_source\": {\"enabled\": false}";
|
||||
createIndex(noSourceIndex, settings, mapping);
|
||||
assertEquals(
|
||||
RestStatus.OK,
|
||||
|
@ -1242,7 +1240,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
.put("number_of_shards", 1)
|
||||
.put("number_of_replicas", 0)
|
||||
.build();
|
||||
String mappings = "\"_doc\":{\"properties\":{\"field\":{\"type\":\"text\"}}}";
|
||||
String mappings = "\"properties\":{\"field\":{\"type\":\"text\"}}";
|
||||
createIndex(sourceIndex, settings, mappings);
|
||||
assertEquals(
|
||||
RestStatus.OK,
|
||||
|
@ -1318,7 +1316,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
.put("number_of_shards", 1)
|
||||
.put("number_of_replicas", 0)
|
||||
.build();
|
||||
String mappings = "\"_doc\":{\"properties\":{\"field\":{\"type\":\"text\"}}}";
|
||||
String mappings = "\"properties\":{\"field\":{\"type\":\"text\"}}";
|
||||
createIndex(sourceIndex, settings, mappings);
|
||||
assertEquals(
|
||||
RestStatus.OK,
|
||||
|
|
|
@ -341,7 +341,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
.put(SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
String mappings = "\"_doc\":{\"properties\":{\"field-1\":{\"type\":\"integer\"}}}";
|
||||
String mappings = "\"properties\":{\"field-1\":{\"type\":\"integer\"}}";
|
||||
createIndex(indexName, basicSettings, mappings);
|
||||
|
||||
GetIndexRequest getIndexRequest = new GetIndexRequest()
|
||||
|
@ -371,7 +371,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
.put(SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
String mappings = "\"_doc\":{\"properties\":{\"field-1\":{\"type\":\"integer\"}}}";
|
||||
String mappings = "\"properties\":{\"field-1\":{\"type\":\"integer\"}}";
|
||||
createIndex(indexName, basicSettings, mappings);
|
||||
|
||||
GetIndexRequest getIndexRequest = new GetIndexRequest()
|
||||
|
@ -1251,8 +1251,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(extractRawValues("my-template.index_patterns", templates), contains("pattern-1", "name-*"));
|
||||
assertThat(extractValue("my-template.settings.index.number_of_shards", templates), equalTo("3"));
|
||||
assertThat(extractValue("my-template.settings.index.number_of_replicas", templates), equalTo("0"));
|
||||
assertThat(extractValue("my-template.mappings.doc.properties.host_name.type", templates), equalTo("keyword"));
|
||||
assertThat(extractValue("my-template.mappings.doc.properties.description.type", templates), equalTo("text"));
|
||||
assertThat(extractValue("my-template.mappings.properties.host_name.type", templates), equalTo("keyword"));
|
||||
assertThat(extractValue("my-template.mappings.properties.description.type", templates), equalTo("text"));
|
||||
assertThat((Map<String, String>) extractValue("my-template.aliases.alias-1", templates), hasEntry("index_routing", "abc"));
|
||||
assertThat((Map<String, String>) extractValue("my-template.aliases.{index}-write", templates), hasEntry("search_routing", "xyz"));
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases;
|
|||
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest;
|
||||
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings;
|
||||
import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction;
|
||||
import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
|
@ -132,6 +133,7 @@ public class IndicesRequestConvertersTests extends ESTestCase {
|
|||
RequestConvertersTests.setRandomTimeout(createIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
RequestConvertersTests.setRandomMasterTimeout(createIndexRequest, expectedParams);
|
||||
RequestConvertersTests.setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
|
||||
expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
Request request = IndicesRequestConverters.createIndex(createIndexRequest);
|
||||
Assert.assertEquals("/" + createIndexRequest.index(), request.getEndpoint());
|
||||
|
@ -173,6 +175,7 @@ public class IndicesRequestConvertersTests extends ESTestCase {
|
|||
|
||||
RequestConvertersTests.setRandomTimeout(putMappingRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
RequestConvertersTests.setRandomMasterTimeout(putMappingRequest, expectedParams);
|
||||
expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
Request request = IndicesRequestConverters.putMapping(putMappingRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
|
@ -214,6 +217,7 @@ public class IndicesRequestConvertersTests extends ESTestCase {
|
|||
getMappingRequest::indicesOptions, expectedParams);
|
||||
RequestConvertersTests.setRandomMasterTimeout(getMappingRequest, expectedParams);
|
||||
RequestConvertersTests.setRandomLocal(getMappingRequest, expectedParams);
|
||||
expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
Request request = IndicesRequestConverters.getMappings(getMappingRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
|
@ -266,6 +270,7 @@ public class IndicesRequestConvertersTests extends ESTestCase {
|
|||
RequestConvertersTests.setRandomIndicesOptions(getFieldMappingsRequest::indicesOptions, getFieldMappingsRequest::indicesOptions,
|
||||
expectedParams);
|
||||
RequestConvertersTests.setRandomLocal(getFieldMappingsRequest::local, expectedParams);
|
||||
expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
Request request = IndicesRequestConverters.getFieldMapping(getFieldMappingsRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
|
@ -835,6 +840,8 @@ public class IndicesRequestConvertersTests extends ESTestCase {
|
|||
expectedParams.put("cause", cause);
|
||||
}
|
||||
RequestConvertersTests.setRandomMasterTimeout(putTemplateRequest, expectedParams);
|
||||
expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
Request request = IndicesRequestConverters.putTemplate(putTemplateRequest);
|
||||
Assert.assertThat(request.getEndpoint(), equalTo("/_template/" + names.get(putTemplateRequest.name())));
|
||||
Assert.assertThat(request.getParameters(), equalTo(expectedParams));
|
||||
|
@ -888,6 +895,8 @@ public class IndicesRequestConvertersTests extends ESTestCase {
|
|||
Map<String, String> expectedParams = new HashMap<>();
|
||||
RequestConvertersTests.setRandomMasterTimeout(getTemplatesRequest::setMasterNodeTimeout, expectedParams);
|
||||
RequestConvertersTests.setRandomLocal(getTemplatesRequest::setLocal, expectedParams);
|
||||
expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
|
||||
Request request = IndicesRequestConverters.getTemplates(getTemplatesRequest);
|
||||
Assert.assertThat(request.getEndpoint(),
|
||||
equalTo("/_template/" + names.stream().map(encodes::get).collect(Collectors.joining(","))));
|
||||
|
|
|
@ -140,11 +140,9 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
create.setJsonEntity(
|
||||
"{" +
|
||||
" \"mappings\": {" +
|
||||
" \"_doc\": {" +
|
||||
" \"properties\": {" +
|
||||
" \"rating\": {" +
|
||||
" \"type\": \"keyword\"" +
|
||||
" }" +
|
||||
" \"properties\": {" +
|
||||
" \"rating\": {" +
|
||||
" \"type\": \"keyword\"" +
|
||||
" }" +
|
||||
" }" +
|
||||
" }" +
|
||||
|
@ -172,16 +170,14 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
create.setJsonEntity(
|
||||
"{" +
|
||||
" \"mappings\": {" +
|
||||
" \"_doc\": {" +
|
||||
" \"properties\": {" +
|
||||
" \"field1\": {" +
|
||||
" \"type\": \"keyword\"," +
|
||||
" \"store\": true" +
|
||||
" }," +
|
||||
" \"field2\": {" +
|
||||
" \"type\": \"keyword\"," +
|
||||
" \"store\": true" +
|
||||
" }" +
|
||||
" \"properties\": {" +
|
||||
" \"field1\": {" +
|
||||
" \"type\": \"keyword\"," +
|
||||
" \"store\": true" +
|
||||
" }," +
|
||||
" \"field2\": {" +
|
||||
" \"type\": \"keyword\"," +
|
||||
" \"store\": true" +
|
||||
" }" +
|
||||
" }" +
|
||||
" }" +
|
||||
|
@ -445,12 +441,10 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
|
|||
createIndex.setJsonEntity(
|
||||
"{\n" +
|
||||
" \"mappings\": {\n" +
|
||||
" \"_doc\" : {\n" +
|
||||
" \"properties\" : {\n" +
|
||||
" \"qa_join_field\" : {\n" +
|
||||
" \"type\" : \"join\",\n" +
|
||||
" \"relations\" : { \"question\" : \"answer\" }\n" +
|
||||
" }\n" +
|
||||
" \"properties\" : {\n" +
|
||||
" \"qa_join_field\" : {\n" +
|
||||
" \"type\" : \"join\",\n" +
|
||||
" \"relations\" : { \"question\" : \"answer\" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }" +
|
||||
|
|
|
@ -792,17 +792,15 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
String mapping =
|
||||
"\"_doc\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"user\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" },\n" +
|
||||
" \"field1\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" },\n" +
|
||||
" \"field2\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" }\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"user\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" },\n" +
|
||||
" \"field1\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" },\n" +
|
||||
" \"field2\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
createIndex("source1", Settings.EMPTY, mapping);
|
||||
|
@ -1000,19 +998,17 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
String mapping =
|
||||
"\"_doc\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"user\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" },\n" +
|
||||
" \"field1\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" },\n" +
|
||||
" \"field2\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
" \"properties\": {\n" +
|
||||
" \"user\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" },\n" +
|
||||
" \"field1\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" },\n" +
|
||||
" \"field2\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
createIndex("source1", Settings.EMPTY, mapping);
|
||||
createIndex("source2", Settings.EMPTY, mapping);
|
||||
createPipeline("my_pipeline");
|
||||
|
@ -1125,19 +1121,17 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
String mapping =
|
||||
"\"_doc\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"user\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" },\n" +
|
||||
" \"field1\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" },\n" +
|
||||
" \"field2\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
" \"properties\": {\n" +
|
||||
" \"user\": {\n" +
|
||||
" \"type\": \"text\"\n" +
|
||||
" },\n" +
|
||||
" \"field1\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" },\n" +
|
||||
" \"field2\": {\n" +
|
||||
" \"type\": \"integer\"\n" +
|
||||
" }\n" +
|
||||
" }";
|
||||
createIndex("source1", Settings.EMPTY, mapping);
|
||||
createIndex("source2", Settings.EMPTY, mapping);
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ public class RequestsWithoutContentIT extends ESRestTestCase {
|
|||
|
||||
public void testPutMappingsMissingBody() throws IOException {
|
||||
ResponseException responseException = expectThrows(ResponseException.class, () ->
|
||||
client().performRequest(new Request(randomBoolean() ? "POST" : "PUT", "/test_index/test_type/_mapping")));
|
||||
client().performRequest(new Request(randomBoolean() ? "POST" : "PUT", "/test_index/_mapping")));
|
||||
assertResponseException(responseException, "request body is required");
|
||||
}
|
||||
|
||||
|
|
|
@ -525,6 +525,7 @@ for (int i = 0; i < 5; i++) {
|
|||
buildRestTests.setups['library'] = '''
|
||||
- do:
|
||||
indices.create:
|
||||
include_type_name: true
|
||||
index: library
|
||||
body:
|
||||
settings:
|
||||
|
|
|
@ -43,7 +43,7 @@ the request URL.
|
|||
+
|
||||
[source,js]
|
||||
----
|
||||
PUT /seats
|
||||
PUT /seats?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"seat": {
|
||||
|
|
|
@ -71,7 +71,7 @@ index:: The name of an index containing a mapping that is compatible with the do
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------------------
|
||||
PUT /my-index
|
||||
PUT /my-index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -129,7 +129,7 @@ query:: If `_score` is used in the script then a query can specified that will b
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------------------
|
||||
PUT /my-index
|
||||
PUT /my-index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -64,7 +64,7 @@ Here are two examples, the default usage and a customised character filter:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT icu_sample
|
||||
PUT icu_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -112,7 +112,7 @@ using custom rules to break Myanmar and Khmer text into syllables.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT icu_sample
|
||||
PUT icu_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -153,7 +153,7 @@ Then create an analyzer to use this rule file as follows:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT icu_sample
|
||||
PUT icu_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index":{
|
||||
|
@ -221,7 +221,7 @@ Here are two examples, the default usage and a customised token filter:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT icu_sample
|
||||
PUT icu_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -267,7 +267,7 @@ available to all indices:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT icu_sample
|
||||
PUT icu_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -301,7 +301,7 @@ these filtered character are not lowercased which is why we add the
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT icu_sample
|
||||
PUT icu_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -354,7 +354,7 @@ Below is an example of how to set up a field for sorting German names in
|
|||
|
||||
[source,js]
|
||||
--------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -503,7 +503,7 @@ For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT icu_sample
|
||||
PUT icu_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
|
|
@ -124,7 +124,7 @@ Then create an analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -186,7 +186,7 @@ BaseFormAttribute. This acts as a lemmatizer for verbs and adjectives. Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -243,7 +243,7 @@ For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -317,7 +317,7 @@ katakana reading form:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index":{
|
||||
|
@ -381,7 +381,7 @@ This token filter accepts the following setting:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -434,7 +434,7 @@ predefined list, then use the
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -493,7 +493,7 @@ to regular Arabic decimal numbers in half-width characters. For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT kuromoji_sample
|
||||
PUT kuromoji_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
|
|
@ -90,7 +90,7 @@ Then create an analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT nori_sample
|
||||
PUT nori_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -164,7 +164,7 @@ the `user_dictionary_rules` option:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT nori_sample
|
||||
PUT nori_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -332,7 +332,7 @@ For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT nori_sample
|
||||
PUT nori_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
@ -398,7 +398,7 @@ The `nori_readingform` token filter rewrites tokens written in Hanja to their Ha
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT nori_sample
|
||||
PUT nori_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index":{
|
||||
|
|
|
@ -29,7 +29,7 @@ The `phonetic` token filter takes the following settings:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT phonetic_sample
|
||||
PUT phonetic_sample?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
|
|
@ -24,7 +24,7 @@ the search index:
|
|||
|
||||
[source,js]
|
||||
--------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -168,7 +168,7 @@ sense to include them in dedicated structured fields to support discovery via ag
|
|||
|
||||
[source,js]
|
||||
--------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -16,7 +16,7 @@ value and its hash are stored in the index:
|
|||
|
||||
[source,js]
|
||||
--------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -15,7 +15,7 @@ In order to enable the `_size` field, set the mapping as follows:
|
|||
|
||||
[source,js]
|
||||
--------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -46,7 +46,7 @@ It can also be set on a per-index basis at index creation time:
|
|||
|
||||
[source,js]
|
||||
----
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.store.type": "smb_mmap_fs"
|
||||
|
|
|
@ -11,7 +11,7 @@ For example, let's say we have an index of questions and answers. The answer typ
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT child_example
|
||||
PUT child_example?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -16,7 +16,7 @@ a composite bucket.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /sales
|
||||
PUT /sales?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -5,7 +5,7 @@ A multi-bucket aggregation that works on `geo_point` fields and conceptually wor
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /museums
|
||||
PUT /museums?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -19,7 +19,7 @@ The specified field must be of type `geo_point` (which can only be set explicitl
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /museums
|
||||
PUT /museums?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -8,7 +8,7 @@ price for the product. The mapping could look like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /index
|
||||
PUT /index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"product" : {
|
||||
|
|
|
@ -11,7 +11,7 @@ For example, let's say we have an index of questions and answers. The answer typ
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT parent_example
|
||||
PUT parent_example?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -17,7 +17,7 @@ the issue documents as nested documents. The mapping could look like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /issues
|
||||
PUT /issues?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"issue" : {
|
||||
|
|
|
@ -19,7 +19,7 @@ that is significant and probably very relevant to their search. 5/10,000,000 vs
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /reports
|
||||
PUT /reports?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"report": {
|
||||
|
|
|
@ -7,7 +7,7 @@ A multi-bucket value source based aggregation where buckets are dynamically buil
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /products
|
||||
PUT /products?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"product": {
|
||||
|
|
|
@ -8,7 +8,7 @@ Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /museums
|
||||
PUT /museums?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -7,7 +7,7 @@ Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /museums
|
||||
PUT /museums?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -240,7 +240,7 @@ Let's see how it works with a real sample. Considering the following mapping:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /sales
|
||||
PUT /sales?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc" : {
|
||||
|
|
|
@ -39,7 +39,7 @@ Each <<text,`text`>> field in a mapping can specify its own
|
|||
|
||||
[source,js]
|
||||
-------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -8,7 +8,7 @@ to support a list of stop words:
|
|||
|
||||
[source,js]
|
||||
--------------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -53,7 +53,7 @@ Token Filters::
|
|||
|
||||
[source,js]
|
||||
--------------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -157,7 +157,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -86,7 +86,7 @@ pre-defined list of English stop words:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -158,7 +158,7 @@ customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /fingerprint_example
|
||||
PUT /fingerprint_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -68,7 +68,7 @@ for further customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /keyword_example
|
||||
PUT /keyword_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -78,7 +78,7 @@ The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /arabic_example
|
||||
PUT /arabic_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -128,7 +128,7 @@ The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /armenian_example
|
||||
PUT /armenian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -176,7 +176,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /basque_example
|
||||
PUT /basque_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -224,7 +224,7 @@ The `bengali` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /bengali_example
|
||||
PUT /bengali_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -275,7 +275,7 @@ The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follow
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /brazilian_example
|
||||
PUT /brazilian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -323,7 +323,7 @@ The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follow
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /bulgarian_example
|
||||
PUT /bulgarian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -371,7 +371,7 @@ The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /catalan_example
|
||||
PUT /catalan_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -428,7 +428,7 @@ The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /cjk_example
|
||||
PUT /cjk_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -474,7 +474,7 @@ The `czech` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /czech_example
|
||||
PUT /czech_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -522,7 +522,7 @@ The `danish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /danish_example
|
||||
PUT /danish_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -570,7 +570,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /dutch_example
|
||||
PUT /dutch_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -628,7 +628,7 @@ The `english` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /english_example
|
||||
PUT /english_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -681,7 +681,7 @@ The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /finnish_example
|
||||
PUT /finnish_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -729,7 +729,7 @@ The `french` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /french_example
|
||||
PUT /french_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -787,7 +787,7 @@ The `galician` analyzer could be reimplemented as a `custom` analyzer as follows
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /galician_example
|
||||
PUT /galician_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -835,7 +835,7 @@ The `german` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /german_example
|
||||
PUT /german_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -884,7 +884,7 @@ The `greek` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /greek_example
|
||||
PUT /greek_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -936,7 +936,7 @@ The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /hindi_example
|
||||
PUT /hindi_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -987,7 +987,7 @@ The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follow
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /hungarian_example
|
||||
PUT /hungarian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1036,7 +1036,7 @@ The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follo
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /indonesian_example
|
||||
PUT /indonesian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1084,7 +1084,7 @@ The `irish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /irish_example
|
||||
PUT /irish_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1148,7 +1148,7 @@ The `italian` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /italian_example
|
||||
PUT /italian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1207,7 +1207,7 @@ The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /latvian_example
|
||||
PUT /latvian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1255,7 +1255,7 @@ The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follo
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /lithuanian_example
|
||||
PUT /lithuanian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1303,7 +1303,7 @@ The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follow
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /norwegian_example
|
||||
PUT /norwegian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1351,7 +1351,7 @@ The `persian` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /persian_example
|
||||
PUT /persian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1397,7 +1397,7 @@ The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follo
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /portuguese_example
|
||||
PUT /portuguese_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1445,7 +1445,7 @@ The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /romanian_example
|
||||
PUT /romanian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1494,7 +1494,7 @@ The `russian` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /russian_example
|
||||
PUT /russian_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1542,7 +1542,7 @@ The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /sorani_example
|
||||
PUT /sorani_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1592,7 +1592,7 @@ The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /spanish_example
|
||||
PUT /spanish_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1640,7 +1640,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /swedish_example
|
||||
PUT /swedish_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1688,7 +1688,7 @@ The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /turkish_example
|
||||
PUT /turkish_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -1741,7 +1741,7 @@ The `thai` analyzer could be reimplemented as a `custom` analyzer as follows:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /thai_example
|
||||
PUT /thai_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -177,7 +177,7 @@ on non-word characters or on underscores (`\W|_`), and to lower-case the result:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -266,7 +266,7 @@ The following more complicated example splits CamelCase text into tokens:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -386,7 +386,7 @@ customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /pattern_example
|
||||
PUT /pattern_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -135,7 +135,7 @@ a starting point for further customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /simple_example
|
||||
PUT /simple_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -151,7 +151,7 @@ pre-defined list of English stop words:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -283,7 +283,7 @@ it, usually by adding token filters. This would recreate the built-in
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /standard_example
|
||||
PUT /standard_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -130,7 +130,7 @@ words as stop words:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -248,7 +248,7 @@ customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /stop_example
|
||||
PUT /stop_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -128,7 +128,7 @@ and you can use it as a starting point for further customization:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------
|
||||
PUT /whitespace_example
|
||||
PUT /whitespace_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -73,7 +73,7 @@ tags in place:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -33,7 +33,7 @@ numerals with their Latin equivalents:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -107,7 +107,7 @@ example replaces the `:)` and `:(` emoticons with a text equivalent:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -49,7 +49,7 @@ replace any embedded dashes in numbers with underscores, i.e `123-456-789` ->
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -100,7 +100,7 @@ camelCase words to be queried individually:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -23,7 +23,7 @@ to get one is by building a custom one. Custom normalizers take a list of char
|
|||
|
||||
[source,js]
|
||||
--------------------------------
|
||||
PUT index
|
||||
PUT index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -41,7 +41,7 @@ referred to when running the `analyze` API on a specific index:
|
|||
|
||||
[source,js]
|
||||
-------------------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -8,7 +8,7 @@ equivalents, if one exists. Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /asciifold_example
|
||||
PUT /asciifold_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -30,7 +30,7 @@ example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /asciifold_example
|
||||
PUT /asciifold_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -16,7 +16,7 @@ Bigrams are generated for characters in `han`, `hiragana`, `katakana` and
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /cjk_bigram_example
|
||||
PUT /cjk_bigram_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -41,7 +41,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /common_grams_example
|
||||
PUT /common_grams_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -84,7 +84,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /compound_word_example
|
||||
PUT /compound_word_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
|
|
@ -20,7 +20,7 @@ You can set it up like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /condition_example
|
||||
PUT /condition_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -9,7 +9,7 @@ example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /elision_example
|
||||
PUT /elision_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -42,7 +42,7 @@ settings:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /hunspell_example
|
||||
PUT /hunspell_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
|
|
@ -19,7 +19,7 @@ You can set it up like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keep_types_example
|
||||
PUT /keep_types_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -80,7 +80,7 @@ If the `mode` parameter is set to `exclude` like in the following example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keep_types_exclude_example
|
||||
PUT /keep_types_exclude_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -20,7 +20,7 @@ keep_words_case:: a boolean indicating whether to lower case the words (defaults
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keep_words_example
|
||||
PUT /keep_words_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -23,7 +23,7 @@ You can configure it like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keyword_marker_example
|
||||
PUT /keyword_marker_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -14,7 +14,7 @@ preserve both the stemmed and unstemmed version of tokens:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /keyword_repeat_example
|
||||
PUT /keyword_repeat_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -18,7 +18,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /limit_example
|
||||
PUT /limit_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -10,7 +10,7 @@ custom analyzer
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /lowercase_example
|
||||
PUT /lowercase_example?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -31,7 +31,7 @@ You can set it up like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /multiplexer_example
|
||||
PUT /multiplexer_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -46,7 +46,7 @@ This is particularly useful for indexing text like camel-case code, eg
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test
|
||||
PUT test?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
@ -87,7 +87,7 @@ Another example is analyzing email addresses:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test
|
||||
PUT test?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -17,7 +17,7 @@ You can set it up like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /condition_example
|
||||
PUT /condition_example?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"analysis" : {
|
||||
|
|
|
@ -12,7 +12,7 @@ For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index
|
||||
PUT /my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
|
|
@ -20,7 +20,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index
|
||||
PUT /my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
@ -53,7 +53,7 @@ You can also define the overrides rules inline:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index
|
||||
PUT /my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
|
|
@ -6,7 +6,7 @@ filters through a single unified interface. For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /my_index
|
||||
PUT /my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis" : {
|
||||
|
|
|
@ -33,7 +33,7 @@ The `stopwords` parameter accepts either an array of stopwords:
|
|||
|
||||
[source,js]
|
||||
------------------------------------
|
||||
PUT /my_index
|
||||
PUT /my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -53,7 +53,7 @@ or a predefined language-specific list:
|
|||
|
||||
[source,js]
|
||||
------------------------------------
|
||||
PUT /my_index
|
||||
PUT /my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -23,7 +23,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index
|
||||
PUT /test_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -59,7 +59,7 @@ to note that only those synonym rules which cannot get parsed are ignored. For i
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index
|
||||
PUT /test_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -118,7 +118,7 @@ configuration file (note use of `synonyms` instead of `synonyms_path`):
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index
|
||||
PUT /test_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -150,7 +150,7 @@ declared using `format`:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index
|
||||
PUT /test_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
|
|
@ -7,7 +7,7 @@ Here is an example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index
|
||||
PUT /test_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -46,7 +46,7 @@ to note that only those synonym rules which cannot get parsed are ignored. For i
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index
|
||||
PUT /test_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -106,7 +106,7 @@ configuration file (note use of `synonyms` instead of `synonyms_path`):
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index
|
||||
PUT /test_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
@ -138,7 +138,7 @@ declared using `format`:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test_index
|
||||
PUT /test_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index" : {
|
||||
|
|
|
@ -145,7 +145,7 @@ In this example, we configure the `classic` tokenizer to have a
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -104,7 +104,7 @@ length `10`:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -222,7 +222,7 @@ Below is an example of how to set up a field for _search-as-you-type_:
|
|||
|
||||
[source,js]
|
||||
-----------------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -209,7 +209,7 @@ digits as tokens, and to produce tri-grams (grams of length `3`):
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -93,7 +93,7 @@ characters, and to replace them with `/`. The first two tokens are skipped:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -125,7 +125,7 @@ tokens when it encounters commas:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
@ -215,7 +215,7 @@ escaped, so the pattern ends up looking like:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -36,7 +36,7 @@ three-digit numbers
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -37,7 +37,7 @@ text on underscores.
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -136,7 +136,7 @@ In this example, we configure the `standard` tokenizer to have a
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -92,7 +92,7 @@ In this example, we configure the `uax_url_email` tokenizer to have a
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -8,7 +8,7 @@ including filter and routing infos.
|
|||
Hidden setup for example:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test1
|
||||
PUT test1?include_type_name=true
|
||||
{
|
||||
"aliases": {
|
||||
"alias1": {},
|
||||
|
|
|
@ -9,7 +9,7 @@ on every data node in the cluster.
|
|||
Hidden setup snippet to build an index with fielddata so our results are real:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test
|
||||
PUT test?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -167,7 +167,7 @@ In the following example, we will create a leader index in the remote cluster:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /server-metrics
|
||||
PUT /server-metrics?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"index" : {
|
||||
|
|
|
@ -107,7 +107,7 @@ Consider for instance the following mapping:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter
|
||||
PUT twitter?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -125,7 +125,7 @@ First, we create an index that stores term vectors, payloads etc. :
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /twitter/
|
||||
PUT /twitter?include_type_name=true
|
||||
{ "mappings": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
|
|
|
@ -637,7 +637,7 @@ added a mapping value to pick up more fields from the data:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test
|
||||
PUT test?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -659,7 +659,7 @@ POST test/_doc?refresh
|
|||
"text": "words words",
|
||||
"flag": "foo"
|
||||
}
|
||||
PUT test/_mapping/_doc <2>
|
||||
PUT test/_mapping <2>
|
||||
{
|
||||
"properties": {
|
||||
"text": {"type": "text"},
|
||||
|
|
|
@ -12,7 +12,7 @@ filter on, you can safely disable indexing on this field in your
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index
|
||||
PUT index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -35,7 +35,7 @@ to not write norms to the index:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index
|
||||
PUT index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -58,7 +58,7 @@ Elasticsearch to not index positions:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index
|
||||
PUT index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -81,7 +81,7 @@ and scoring will assume that terms appear only once in every document.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index
|
||||
PUT index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -115,7 +115,7 @@ fields as `keyword`:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index
|
||||
PUT index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -9,7 +9,7 @@ content indexed in two different ways:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index
|
||||
PUT index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"analysis": {
|
||||
|
|
|
@ -50,7 +50,7 @@ field.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT movies
|
||||
PUT movies?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -123,7 +123,7 @@ should be mapped as a <<keyword,`keyword`>>:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index
|
||||
PUT index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -322,7 +322,7 @@ eagerly at refresh-time by configuring mappings as described below:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index
|
||||
PUT index?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
|
|
@ -64,7 +64,7 @@ PUT _ilm/policy/my_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy",
|
||||
|
|
|
@ -72,7 +72,7 @@ PUT _ilm/policy/my_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy"
|
||||
|
|
|
@ -62,7 +62,7 @@ PUT _ilm/policy/my_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy"
|
||||
|
|
|
@ -41,7 +41,7 @@ telling it to use the policy they have created:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /myindex
|
||||
PUT /myindex?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.number_of_shards": 2,
|
||||
|
|
|
@ -107,7 +107,7 @@ To begin, we will want to bootstrap our first index to write to.
|
|||
|
||||
[source,js]
|
||||
-----------------------
|
||||
PUT datastream-000001
|
||||
PUT datastream-000001?include_type_name=true
|
||||
{
|
||||
"aliases": {
|
||||
"datastream": {
|
||||
|
|
|
@ -353,7 +353,7 @@ index "my_index" must be the write index for the alias. For more information, re
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy",
|
||||
|
|
|
@ -73,7 +73,7 @@ initial index which will be managed by our policy:
|
|||
|
||||
[source,js]
|
||||
-----------------------
|
||||
PUT test-000001
|
||||
PUT test-000001?include_type_name=true
|
||||
{
|
||||
"aliases": {
|
||||
"test-alias":{
|
||||
|
@ -96,7 +96,7 @@ request so {ilm} immediately starts managing the index:
|
|||
|
||||
[source,js]
|
||||
-----------------------
|
||||
PUT test-index
|
||||
PUT test-index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"number_of_shards": 1,
|
||||
|
|
|
@ -39,7 +39,7 @@ PUT _ilm/policy/my_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy"
|
||||
|
|
|
@ -168,7 +168,7 @@ PUT _ilm/policy/my_executing_policy
|
|||
////
|
||||
[source,js]
|
||||
------------------------
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_executing_policy"
|
||||
|
@ -486,7 +486,7 @@ PUT _ilm/policy/my_other_policy
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index
|
||||
PUT my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.lifecycle.name": "my_policy"
|
||||
|
|
|
@ -19,14 +19,14 @@ PUT index_1
|
|||
|
||||
PUT index_2
|
||||
|
||||
PUT index_3
|
||||
PUT index_3?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.priority": 10
|
||||
}
|
||||
}
|
||||
|
||||
PUT index_4
|
||||
PUT index_4?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.priority": 5
|
||||
|
|
|
@ -14,7 +14,7 @@ For instance the following example shows how to define a sort on a single field:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter
|
||||
PUT twitter?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"index" : {
|
||||
|
@ -42,7 +42,7 @@ It is also possible to sort the index by more than one field:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter
|
||||
PUT twitter?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"index" : {
|
||||
|
@ -118,7 +118,7 @@ For example, let's say we have an index that contains events sorted by a timesta
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT events
|
||||
PUT events?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"index" : {
|
||||
|
|
|
@ -20,7 +20,7 @@ settings.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /index
|
||||
PUT /index?include_type_name=true
|
||||
{
|
||||
"settings" : {
|
||||
"index" : {
|
||||
|
@ -44,7 +44,7 @@ Here we configure the DFRSimilarity so it can be referenced as
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /index/_mapping/_doc
|
||||
PUT /index/_mapping
|
||||
{
|
||||
"properties" : {
|
||||
"title" : { "type" : "text", "similarity" : "my_similarity" }
|
||||
|
@ -200,7 +200,7 @@ TF-IDF:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /index
|
||||
PUT /index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"number_of_shards": 1,
|
||||
|
@ -369,7 +369,7 @@ more efficient:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /index
|
||||
PUT /index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"number_of_shards": 1,
|
||||
|
@ -537,7 +537,7 @@ it is <<indices-create-index,created>>:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /index
|
||||
PUT /index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index": {
|
||||
|
|
|
@ -24,7 +24,7 @@ creation time:
|
|||
|
||||
[source,js]
|
||||
---------------------------------
|
||||
PUT /my_index
|
||||
PUT /my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.store.type": "niofs"
|
||||
|
@ -114,7 +114,7 @@ or in the index settings at index creation time:
|
|||
|
||||
[source,js]
|
||||
---------------------------------
|
||||
PUT /my_index
|
||||
PUT /my_index?include_type_name=true
|
||||
{
|
||||
"settings": {
|
||||
"index.store.preload": ["nvd", "dvd"]
|
||||
|
|
|
@ -142,7 +142,7 @@ exist in the mapping:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test1
|
||||
PUT /test1?include_type_name=true
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
|
@ -376,7 +376,7 @@ First create the index and add a mapping for the `user_id` field:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /users
|
||||
PUT /users?include_type_name=true
|
||||
{
|
||||
"mappings" : {
|
||||
"_doc" : {
|
||||
|
@ -416,7 +416,7 @@ Aliases can also be specified during <<create-index-aliases,index creation>>:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /logs_20162801
|
||||
PUT /logs_20162801?include_type_name=true
|
||||
{
|
||||
"mappings" : {
|
||||
"_doc" : {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue