Merge branch 'master' into close-index-api-refactoring
This commit is contained in:
commit
bd2af2c400
|
@ -7,3 +7,4 @@
|
|||
|
||||
ES_BUILD_JAVA:
|
||||
- java11
|
||||
- openjdk12
|
||||
|
|
|
@ -9,5 +9,6 @@ ES_RUNTIME_JAVA:
|
|||
- java8
|
||||
- java8fips
|
||||
- java11
|
||||
- openjdk12
|
||||
- zulu8
|
||||
- zulu11
|
||||
|
|
|
@ -293,18 +293,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
it.standardOutput = dockerVersionOutput
|
||||
})
|
||||
final String dockerVersion = dockerVersionOutput.toString().trim()
|
||||
final Matcher matcher = dockerVersion =~ /Docker version (\d+\.\d+)\.\d+(?:-ce)?, build [0-9a-f]{7}/
|
||||
assert matcher.matches() : dockerVersion
|
||||
final dockerMajorMinorVersion = matcher.group(1)
|
||||
final String[] majorMinor = dockerMajorMinorVersion.split("\\.")
|
||||
if (Integer.parseInt(majorMinor[0]) < 17
|
||||
|| (Integer.parseInt(majorMinor[0]) == 17 && Integer.parseInt(majorMinor[1]) < 5)) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"building Docker images requires Docker version 17.05+ due to use of multi-stage builds yet was [%s]",
|
||||
dockerVersion)
|
||||
throwDockerRequiredException(message)
|
||||
}
|
||||
checkDockerVersionRecent(dockerVersion)
|
||||
|
||||
final ByteArrayOutputStream dockerImagesErrorOutput = new ByteArrayOutputStream()
|
||||
// the Docker binary executes, check that we can execute a privileged command
|
||||
|
@ -339,6 +328,21 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
|
||||
protected static void checkDockerVersionRecent(String dockerVersion) {
|
||||
final Matcher matcher = dockerVersion =~ /Docker version (\d+\.\d+)\.\d+(?:-ce)?, build [0-9a-f]{7,40}/
|
||||
assert matcher.matches(): dockerVersion
|
||||
final dockerMajorMinorVersion = matcher.group(1)
|
||||
final String[] majorMinor = dockerMajorMinorVersion.split("\\.")
|
||||
if (Integer.parseInt(majorMinor[0]) < 17
|
||||
|| (Integer.parseInt(majorMinor[0]) == 17 && Integer.parseInt(majorMinor[1]) < 5)) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"building Docker images requires Docker version 17.05+ due to use of multi-stage builds yet was [%s]",
|
||||
dockerVersion)
|
||||
throwDockerRequiredException(message)
|
||||
}
|
||||
}
|
||||
|
||||
private static void throwDockerRequiredException(final String message) {
|
||||
throw new GradleException(
|
||||
message + "\nyou can address this by attending to the reported issue, "
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle;
|
||||
|
||||
import org.elasticsearch.gradle.test.GradleUnitTestCase;
|
||||
import org.gradle.api.GradleException;
|
||||
import org.junit.Test;
|
||||
|
||||
|
||||
public class BuildPluginTests extends GradleUnitTestCase {
|
||||
|
||||
public void testPassingDockerVersions() {
|
||||
BuildPlugin.checkDockerVersionRecent("Docker version 18.06.1-ce, build e68fc7a215d7");
|
||||
BuildPlugin.checkDockerVersionRecent("Docker version 17.05.0, build e68fc7a");
|
||||
BuildPlugin.checkDockerVersionRecent("Docker version 17.05.1, build e68fc7a");
|
||||
}
|
||||
|
||||
@Test(expected = GradleException.class)
|
||||
public void testFailingDockerVersions() {
|
||||
BuildPlugin.checkDockerVersionRecent("Docker version 17.04.0, build e68fc7a");
|
||||
}
|
||||
|
||||
}
|
|
@ -299,8 +299,16 @@ final class RequestConverters {
|
|||
|
||||
static Request index(IndexRequest indexRequest) {
|
||||
String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME;
|
||||
boolean isCreate = (indexRequest.opType() == DocWriteRequest.OpType.CREATE);
|
||||
String endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), isCreate ? "_create" : null);
|
||||
|
||||
String endpoint;
|
||||
if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) {
|
||||
endpoint = indexRequest.type().equals(MapperService.SINGLE_MAPPING_NAME)
|
||||
? endpoint(indexRequest.index(), "_create", indexRequest.id())
|
||||
: endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), "_create");
|
||||
} else {
|
||||
endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id());
|
||||
}
|
||||
|
||||
Request request = new Request(method, endpoint);
|
||||
|
||||
Params parameters = new Params(request);
|
||||
|
@ -471,7 +479,7 @@ final class RequestConverters {
|
|||
}
|
||||
|
||||
static Request explain(ExplainRequest explainRequest) throws IOException {
|
||||
String endpoint = explainRequest.isTypeless()
|
||||
String endpoint = explainRequest.type().equals(MapperService.SINGLE_MAPPING_NAME)
|
||||
? endpoint(explainRequest.index(), "_explain", explainRequest.id())
|
||||
: endpoint(explainRequest.index(), explainRequest.type(), explainRequest.id(), "_explain");
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
|
|
|
@ -661,7 +661,7 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
|
||||
Request request = RequestConverters.index(indexRequest);
|
||||
if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) {
|
||||
assertEquals("/" + index + "/_doc/" + id + "/_create", request.getEndpoint());
|
||||
assertEquals("/" + index + "/_create/" + id, request.getEndpoint());
|
||||
} else if (id != null) {
|
||||
assertEquals("/" + index + "/_doc/" + id, request.getEndpoint());
|
||||
} else {
|
||||
|
@ -1685,17 +1685,17 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertEquals("/a/b", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b").addPathPartAsIs("_create");
|
||||
assertEquals("/a/b/_create", endpointBuilder.build());
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b").addPathPartAsIs("_endpoint");
|
||||
assertEquals("/a/b/_endpoint", endpointBuilder.build());
|
||||
}
|
||||
|
||||
{
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c").addPathPartAsIs("_create");
|
||||
assertEquals("/a/b/c/_create", endpointBuilder.build());
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c").addPathPartAsIs("_endpoint");
|
||||
assertEquals("/a/b/c/_endpoint", endpointBuilder.build());
|
||||
}
|
||||
{
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPartAsIs("_create");
|
||||
assertEquals("/a/_create", endpointBuilder.build());
|
||||
EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPartAsIs("_endpoint");
|
||||
assertEquals("/a/_endpoint", endpointBuilder.build());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1318,7 +1318,6 @@ public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/36362")
|
||||
public void testInvalidateToken() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
:version: 7.0.0-alpha1
|
||||
:version: 7.0.0-alpha2
|
||||
:major-version: 7.x
|
||||
:lucene_version: 8.0.0
|
||||
:lucene_version_path: 8_0_0
|
||||
|
|
|
@ -460,4 +460,5 @@ include-tagged::{doc-tests-file}[{api}-request-profiling-aggs]
|
|||
<4> Retrieve the time in millis spent executing the Lucene collector
|
||||
<5> Retrieve the profile results for the sub-aggregations (if any)
|
||||
|
||||
The Rest API documentation contains more information about {ref}/_profiling_aggregations.html[Profiling Aggregations]
|
||||
The Rest API documentation contains more information about
|
||||
{ref}/search-profile-aggregations.html[Profiling aggregations].
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
[[discovery]]
|
||||
== Discovery Plugins
|
||||
|
||||
Discovery plugins extend Elasticsearch by adding new discovery mechanisms that
|
||||
can be used instead of {ref}/modules-discovery-zen.html[Zen Discovery].
|
||||
Discovery plugins extend Elasticsearch by adding new hosts providers that can be
|
||||
used to extend the {ref}/modules-discovery.html[cluster formation module].
|
||||
|
||||
[float]
|
||||
==== Core discovery plugins
|
||||
|
@ -11,22 +11,24 @@ The core discovery plugins are:
|
|||
|
||||
<<discovery-ec2,EC2 discovery>>::
|
||||
|
||||
The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API] for unicast discovery.
|
||||
The EC2 discovery plugin uses the https://github.com/aws/aws-sdk-java[AWS API]
|
||||
for unicast discovery.
|
||||
|
||||
<<discovery-azure-classic,Azure Classic discovery>>::
|
||||
|
||||
The Azure Classic discovery plugin uses the Azure Classic API for unicast discovery.
|
||||
The Azure Classic discovery plugin uses the Azure Classic API for unicast
|
||||
discovery.
|
||||
|
||||
<<discovery-gce,GCE discovery>>::
|
||||
|
||||
The Google Compute Engine discovery plugin uses the GCE API for unicast discovery.
|
||||
The Google Compute Engine discovery plugin uses the GCE API for unicast
|
||||
discovery.
|
||||
|
||||
[float]
|
||||
==== Community contributed discovery plugins
|
||||
|
||||
A number of discovery plugins have been contributed by our community:
|
||||
|
||||
* https://github.com/shikhar/eskka[eskka Discovery Plugin] (by Shikhar Bhushan)
|
||||
* https://github.com/fabric8io/elasticsearch-cloud-kubernetes[Kubernetes Discovery Plugin] (by Jimmi Dyson, http://fabric8.io[fabric8])
|
||||
|
||||
include::discovery-ec2.asciidoc[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-delete-auto-follow-pattern]]
|
||||
=== Delete Auto-Follow Pattern API
|
||||
=== Delete auto-follow pattern API
|
||||
++++
|
||||
<titleabbrev>Delete Auto-Follow Pattern</titleabbrev>
|
||||
<titleabbrev>Delete auto-follow pattern</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-get-auto-follow-pattern]]
|
||||
=== Get Auto-Follow Pattern API
|
||||
=== Get auto-follow pattern API
|
||||
++++
|
||||
<titleabbrev>Get Auto-Follow Pattern</titleabbrev>
|
||||
<titleabbrev>Get auto-follow pattern</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-put-auto-follow-pattern]]
|
||||
=== Create Auto-Follow Pattern API
|
||||
=== Create auto-follow pattern API
|
||||
++++
|
||||
<titleabbrev>Create Auto-Follow Pattern</titleabbrev>
|
||||
<titleabbrev>Create auto-follow pattern</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-get-follow-stats]]
|
||||
=== Get Follower Stats API
|
||||
=== Get follower stats API
|
||||
++++
|
||||
<titleabbrev>Get Follower Stats</titleabbrev>
|
||||
<titleabbrev>Get follower stats</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-post-pause-follow]]
|
||||
=== Pause Follower API
|
||||
=== Pause follower API
|
||||
++++
|
||||
<titleabbrev>Pause Follower</titleabbrev>
|
||||
<titleabbrev>Pause follower</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-post-resume-follow]]
|
||||
=== Resume Follower API
|
||||
=== Resume follower API
|
||||
++++
|
||||
<titleabbrev>Resume Follower</titleabbrev>
|
||||
<titleabbrev>Resume follower</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-put-follow]]
|
||||
=== Create Follower API
|
||||
=== Create follower API
|
||||
++++
|
||||
<titleabbrev>Create Follower</titleabbrev>
|
||||
<titleabbrev>Create follower</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-get-stats]]
|
||||
=== Get Cross-Cluster Replication Stats API
|
||||
=== Get cross-cluster replication stats API
|
||||
++++
|
||||
<titleabbrev>Get CCR Stats</titleabbrev>
|
||||
<titleabbrev>Get CCR stats</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-getting-started]]
|
||||
== Getting Started with {ccr}
|
||||
== Getting started with {ccr}
|
||||
|
||||
beta[]
|
||||
|
||||
|
|
|
@ -51,7 +51,8 @@ keys for each instance. If you chose to generate a CA, which is the default
|
|||
behavior, the certificate and private key are included in the output file. If
|
||||
you chose to generate CSRs, you should provide them to your commercial or
|
||||
organization-specific certificate authority to obtain signed certificates. The
|
||||
signed certificates must be in PEM format to work with {security}.
|
||||
signed certificates must be in PEM format to work with the {stack}
|
||||
{security-features}.
|
||||
|
||||
[float]
|
||||
=== Parameters
|
||||
|
|
|
@ -93,7 +93,8 @@ the command produces a zip file containing the generated certificates and keys.
|
|||
|
||||
The `csr` mode generates certificate signing requests (CSRs) that you can send
|
||||
to a trusted certificate authority to obtain signed certificates. The signed
|
||||
certificates must be in PEM or PKCS#12 format to work with {security}.
|
||||
certificates must be in PEM or PKCS#12 format to work with {es}
|
||||
{security-features}.
|
||||
|
||||
By default, the command produces a single CSR for a single instance.
|
||||
|
||||
|
|
|
@ -19,8 +19,8 @@ bin/elasticsearch-setup-passwords auto|interactive
|
|||
[float]
|
||||
=== Description
|
||||
|
||||
This command is intended for use only during the initial configuration of
|
||||
{xpack}. It uses the
|
||||
This command is intended for use only during the initial configuration of the
|
||||
{es} {security-features}. It uses the
|
||||
{stack-ov}/built-in-users.html#bootstrap-elastic-passwords[`elastic` bootstrap password]
|
||||
to run user management API requests. After you set a password for the `elastic`
|
||||
user, the bootstrap password is no longer active and you cannot use this command.
|
||||
|
@ -36,7 +36,7 @@ location, ensure that the *ES_PATH_CONF* environment variable returns the
|
|||
correct path before you run the `elasticsearch-setup-passwords` command. You can
|
||||
override settings in your `elasticsearch.yml` file by using the `-E` command
|
||||
option. For more information about debugging connection failures, see
|
||||
{xpack-ref}/trb-security-setup.html[`elasticsearch-setup-passwords` command fails due to connection failure].
|
||||
{stack-ov}/trb-security-setup.html[`elasticsearch-setup-passwords` command fails due to connection failure].
|
||||
|
||||
[float]
|
||||
=== Parameters
|
||||
|
|
|
@ -189,7 +189,7 @@ Another option to specify `create` is to use the following uri:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter/_doc/1/_create
|
||||
PUT twitter/_create/1
|
||||
{
|
||||
"user" : "kimchy",
|
||||
"post_date" : "2009-11-15T14:12:12",
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-delete-lifecycle]]
|
||||
=== Delete Lifecycle Policy API
|
||||
=== Delete lifecycle policy API
|
||||
++++
|
||||
<titleabbrev>Delete Policy</titleabbrev>
|
||||
<titleabbrev>Delete policy</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-explain-lifecycle]]
|
||||
=== Explain Lifecycle API
|
||||
=== Explain lifecycle API
|
||||
++++
|
||||
<titleabbrev>Explain Lifecycle</titleabbrev>
|
||||
<titleabbrev>Explain lifecycle</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
@ -170,7 +170,7 @@ entered this phase
|
|||
<3> The date the loaded policy was last modified
|
||||
<4> The epoch time when the loaded policy was last modified
|
||||
|
||||
If {ILM} is waiting for a step to complete, the response includes status
|
||||
If {ilm-init} is waiting for a step to complete, the response includes status
|
||||
information for the step that's being performed on the index.
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-get-lifecycle]]
|
||||
=== Get Lifecycle Policy API
|
||||
=== Get lifecycle policy API
|
||||
++++
|
||||
<titleabbrev>Get Policy</titleabbrev>
|
||||
<titleabbrev>Get policy</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-get-status]]
|
||||
=== Get {ILM} Status API
|
||||
=== Get {ilm} status API
|
||||
++++
|
||||
<titleabbrev>Get {ILM} Status</titleabbrev>
|
||||
<titleabbrev>Get {ilm} status</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
||||
Retrieves the current {ilm} status.
|
||||
Retrieves the current {ilm} ({ilm-init}) status.
|
||||
|
||||
==== Request
|
||||
|
||||
|
@ -16,9 +16,9 @@ Retrieves the current {ilm} status.
|
|||
|
||||
==== Description
|
||||
|
||||
Returns the status of the {ILM} plugin. The `operation_mode` field in the
|
||||
Returns the status of the {ilm-init} plugin. The `operation_mode` field in the
|
||||
response shows one of three states: `STARTED`, `STOPPING`,
|
||||
or `STOPPED`. You can change the status of the {ILM} plugin with the
|
||||
or `STOPPED`. You can change the status of the {ilm-init} plugin with the
|
||||
<<ilm-start, Start ILM>> and <<ilm-stop, Stop ILM>> APIs.
|
||||
|
||||
==== Request Parameters
|
||||
|
@ -32,7 +32,7 @@ For more information, see {stack-ov}/security-privileges.html[Security Privilege
|
|||
|
||||
==== Examples
|
||||
|
||||
The following example gets the {ILM} plugin status.
|
||||
The following example gets the {ilm-init} plugin status.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[[index-lifecycle-management-api]]
|
||||
== Index Lifecycle Management API
|
||||
== {ilm-cap} API
|
||||
|
||||
beta[]
|
||||
|
||||
|
@ -7,24 +7,24 @@ You can use the following APIs to manage policies on indices.
|
|||
|
||||
[float]
|
||||
[[ilm-api-policy-endpoint]]
|
||||
=== Policy Management APIs
|
||||
=== Policy management APIs
|
||||
|
||||
* <<ilm-put-lifecycle,Create Lifecycle Policy>>
|
||||
* <<ilm-get-lifecycle,Get Lifecycle Policy>>
|
||||
* <<ilm-delete-lifecycle,Delete Lifecycle Policy>>
|
||||
* <<ilm-put-lifecycle,Create lifecycle policy>>
|
||||
* <<ilm-get-lifecycle,Get lifecycle policy>>
|
||||
* <<ilm-delete-lifecycle,Delete lifecycle policy>>
|
||||
|
||||
[float]
|
||||
[[ilm-api-index-endpoint]]
|
||||
=== Index Management APIs
|
||||
=== Index management APIs
|
||||
|
||||
* <<ilm-move-to-step,Move Index To Step>>
|
||||
* <<ilm-retry-policy,Retry Policy On Indices>>
|
||||
* <<ilm-move-to-step,Move index to step>>
|
||||
* <<ilm-retry-policy,Retry policy on indices>>
|
||||
|
||||
[float]
|
||||
[[ilm-api-management-endpoint]]
|
||||
=== Operation Management APIs
|
||||
=== Operation management APIs
|
||||
|
||||
* <<ilm-get-status,Get ILM Operation Mode>>
|
||||
* <<ilm-get-status,Get ILM operation mode>>
|
||||
* <<ilm-start,Start ILM>>
|
||||
* <<ilm-stop,Stop ILM>>
|
||||
* <<ilm-explain-lifecycle,Explain API>>
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-move-to-step]]
|
||||
=== Move to Lifecycle Step API
|
||||
=== Move to lifecycle step API
|
||||
++++
|
||||
<titleabbrev>Move to Step</titleabbrev>
|
||||
<titleabbrev>Move to step</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-put-lifecycle]]
|
||||
=== Create Lifecycle Policy API
|
||||
=== Create lifecycle policy API
|
||||
++++
|
||||
<titleabbrev>Create Policy</titleabbrev>
|
||||
<titleabbrev>Create policy</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
@ -35,7 +35,7 @@ include::{docdir}/rest-api/timeoutparms.asciidoc[]
|
|||
|
||||
You must have the `manage_ilm` cluster privilege to use this API. You must
|
||||
also have the `manage` index privilege on all indices being managed by `policy`.
|
||||
All operations executed by {Ilm} for a policy are executed as the user that
|
||||
All operations executed by {ilm} for a policy are executed as the user that
|
||||
put the latest version of a policy.
|
||||
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-remove-policy]]
|
||||
=== Remove Policy from Index API
|
||||
=== Remove policy from index API
|
||||
++++
|
||||
<titleabbrev>Remove Policy</titleabbrev>
|
||||
<titleabbrev>Remove policy</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-retry-policy]]
|
||||
=== Retry Policy Execution API
|
||||
=== Retry policy execution API
|
||||
++++
|
||||
<titleabbrev>Retry Policy</titleabbrev>
|
||||
<titleabbrev>Retry policy</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-start]]
|
||||
=== Start {ILM} API
|
||||
=== Start {ilm} API
|
||||
++++
|
||||
<titleabbrev>Start {ILM}</titleabbrev>
|
||||
<titleabbrev>Start {ilm}</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
||||
Start the {ILM} plugin.
|
||||
Start the {ilm} ({ilm-init}) plugin.
|
||||
|
||||
==== Request
|
||||
|
||||
|
@ -16,9 +16,9 @@ Start the {ILM} plugin.
|
|||
|
||||
==== Description
|
||||
|
||||
Starts the {ILM} plugin if it is currently stopped. {ILM} is started
|
||||
automatically when the cluster is formed. Restarting {ILM} is only
|
||||
necessary if it has been stopped using the <<ilm-stop, Stop {ILM} API>>.
|
||||
Starts the {ilm-init} plugin if it is currently stopped. {ilm-init} is started
|
||||
automatically when the cluster is formed. Restarting {ilm-init} is only
|
||||
necessary if it has been stopped using the <<ilm-stop, Stop {ilm-init} API>>.
|
||||
|
||||
==== Request Parameters
|
||||
|
||||
|
@ -27,7 +27,7 @@ include::{docdir}/rest-api/timeoutparms.asciidoc[]
|
|||
==== Authorization
|
||||
|
||||
You must have the `manage_ilm` cluster privilege to use this API.
|
||||
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
|
||||
For more information, see {stack-ov}/security-privileges.html[Security privileges].
|
||||
|
||||
==== Examples
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-stop]]
|
||||
=== Stop {ILM} API
|
||||
=== Stop {ilm} API
|
||||
++++
|
||||
<titleabbrev>Stop {ILM}</titleabbrev>
|
||||
<titleabbrev>Stop {ilm}</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
||||
Stop the {ILM} plugin.
|
||||
Stop the {ilm} ({ilm-init}) plugin.
|
||||
|
||||
==== Request
|
||||
|
||||
|
@ -16,14 +16,14 @@ Stop the {ILM} plugin.
|
|||
|
||||
==== Description
|
||||
|
||||
Halts all lifecycle management operations and stops the {ILM} plugin. This is
|
||||
useful when you are performing maintenance on the cluster and need to prevent
|
||||
{ILM} from performing any actions on your indices.
|
||||
Halts all lifecycle management operations and stops the {ilm-init} plugin. This
|
||||
is useful when you are performing maintenance on the cluster and need to prevent
|
||||
{ilm-init} from performing any actions on your indices.
|
||||
|
||||
The API returns as soon as the stop request has been acknowledged, but the
|
||||
plugin might continue to run until in-progress operations complete and the plugin
|
||||
can be safely stopped. Use the <<ilm-get-status, Get ILM Status>> API to see
|
||||
if {ILM} is running.
|
||||
if {ilm-init} is running.
|
||||
|
||||
==== Request Parameters
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[index-lifecycle-error-handling]]
|
||||
== Index Lifecycle Error Handling
|
||||
== Index lifecycle error handling
|
||||
|
||||
beta[]
|
||||
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
|
||||
beta[]
|
||||
|
||||
Let's jump into {ILM} by working through a hands-on scenario.
|
||||
This section will leverage many new concepts unique to {ILM} that
|
||||
Let's jump into {ilm} ({ilm-init}) by working through a hands-on scenario.
|
||||
This section will leverage many new concepts unique to {ilm-init} that
|
||||
you may not be familiar with. The following sections will explore
|
||||
these in more details.
|
||||
|
||||
|
@ -21,7 +21,7 @@ after 90 days.
|
|||
|
||||
beta[]
|
||||
|
||||
There are many new features introduced by {ILM}, but we will only focus on
|
||||
There are many new features introduced by {ilm-init}, but we will only focus on
|
||||
a few that are needed for our example. For starters, we will use the
|
||||
<<ilm-put-lifecycle,Put Policy>> API to define our first policy. Lifecycle
|
||||
policies are defined in JSON and include specific
|
||||
|
@ -99,7 +99,7 @@ PUT _template/datastream_template
|
|||
<3> alias to use for the rollover action, required since a rollover action is
|
||||
defined in the policy.
|
||||
|
||||
The above index template introduces a few new settings specific to {ILM}.
|
||||
The above index template introduces a few new settings specific to {ilm-init}.
|
||||
The first being `index.lifecycle.name`. This setting will configure
|
||||
the "datastream_policy" to the index applying this template. This means
|
||||
that all newly created indices prefixed "datastream-" will be managed by
|
||||
|
@ -148,7 +148,7 @@ beta[]
|
|||
Now that we have an index managed by our policy, how do we tell what is going
|
||||
on? Which phase are we in? Is something broken? This section will go over a
|
||||
few APIs and their responses to help us inspect our indices with respect
|
||||
to {ILM}.
|
||||
to {ilm-init}.
|
||||
|
||||
With the help of the <<ilm-explain-lifecycle,Explain API>>, we can know
|
||||
things like which phase we're in and when we entered that phase. The API
|
||||
|
@ -162,7 +162,7 @@ GET datastream-*/_ilm/explain
|
|||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
The above request will retrieve {ILM} execution information for all our
|
||||
The above request will retrieve {ilm-init} execution information for all our
|
||||
managed indices.
|
||||
|
||||
|
||||
|
@ -214,7 +214,7 @@ You can read about the full details of this response in the
|
|||
<<ilm-explain-lifecycle, explain API docs>>. For now, let's focus on how
|
||||
the response details which phase, action, and step we're in. We are in the
|
||||
"hot" phase, and "rollover" action. Rollover will continue to be called
|
||||
by {ILM} until its conditions are met and it rolls over the index.
|
||||
by {ilm-init} until its conditions are met and it rolls over the index.
|
||||
Afterwards, the original index will stay in the hot phase until 90 more
|
||||
days pass and it is deleted in the delete phase.
|
||||
As time goes on, new indices will be created and deleted.
|
||||
|
@ -226,7 +226,7 @@ that same alias.
|
|||
|
||||
|
||||
|
||||
That's it! We have our first use-case managed by {ILM}.
|
||||
That's it! We have our first use-case managed by {ilm-init}.
|
||||
|
||||
To learn more about all our APIs,
|
||||
check out <<index-lifecycle-management-api,ILM APIs>>.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[index-lifecycle-and-snapshots]]
|
||||
== Restoring Snapshots of Managed Indices
|
||||
== Restoring snapshots of managed indices
|
||||
|
||||
beta[]
|
||||
|
||||
|
|
|
@ -3,17 +3,14 @@
|
|||
[[index-lifecycle-management]]
|
||||
= Managing the index lifecycle
|
||||
|
||||
:ilm: index lifecycle management
|
||||
:Ilm: Index lifecycle management
|
||||
:ILM: ILM
|
||||
[partintro]
|
||||
--
|
||||
beta[]
|
||||
|
||||
The <<index-lifecycle-management-api, {ilm} (ILM) APIs>> enable you to automate how you
|
||||
want to manage your indices over time. Rather than simply performing management
|
||||
actions on your indices on a set schedule, you can base actions on other factors
|
||||
such as shard size and performance requirements.
|
||||
The <<index-lifecycle-management-api,{ilm} ({ilm-init}) APIs>> enable you to
|
||||
automate how you want to manage your indices over time. Rather than simply
|
||||
performing management actions on your indices on a set schedule, you can base
|
||||
actions on other factors such as shard size and performance requirements.
|
||||
|
||||
You control how indices are handled as they age by attaching a
|
||||
lifecycle policy to the index template used to create them. You can update
|
||||
|
|
|
@ -2,7 +2,7 @@ beta[]
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ilm-policy-definition]]
|
||||
== Policy Phases and Actions
|
||||
== Policy phases and actions
|
||||
|
||||
beta[]
|
||||
|
||||
|
@ -71,8 +71,8 @@ index is rolled over, then `min_age` is the time elapsed from the time the index
|
|||
is rolled over. The intention here is to execute following phases and actions
|
||||
relative to when data was written last to a rolled over index.
|
||||
|
||||
The previous phase's actions must complete before {ILM} will check `min_age` and
|
||||
transition into the next phase.
|
||||
The previous phase's actions must complete before {ilm} will check `min_age`
|
||||
and transition into the next phase.
|
||||
|
||||
=== Phase Execution
|
||||
|
||||
|
@ -80,8 +80,8 @@ beta[]
|
|||
|
||||
The current phase definition, of an index's policy being executed, is stored
|
||||
in the index's metadata. The phase and its actions are compiled into a series
|
||||
of discrete steps that are executed sequentially. Since some {ILM} actions are
|
||||
more complex and involve multiple operations against an index, each of these
|
||||
of discrete steps that are executed sequentially. Since some {ilm-init} actions
|
||||
are more complex and involve multiple operations against an index, each of these
|
||||
operations are done in isolation in a unit called a "step". The
|
||||
<<ilm-explain-lifecycle,Explain Lifecycle API>> exposes this information to us
|
||||
to see which step our index is either to execute next, or is currently
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
beta[]
|
||||
|
||||
In order for an index to use an {ilm} policy to manage its lifecycle we must
|
||||
first define a lifecycle policy for it to use. The following request creates
|
||||
a policy called `my_policy` in Elasticsearch which we can later use to manage
|
||||
our indexes.
|
||||
first define a lifecycle policy for it to use. The following request creates a
|
||||
policy called `my_policy` in Elasticsearch which we can later use to manage our
|
||||
indexes.
|
||||
|
||||
[source,js]
|
||||
------------------------
|
||||
|
@ -39,7 +39,7 @@ PUT _ilm/policy/my_policy
|
|||
|
||||
{ilm} will manage an index using the policy defined in the
|
||||
`index.lifecycle.name` index setting. If this setting does not exist in the
|
||||
settings for a particular index {ilm} will not manage that index.
|
||||
settings for a particular index, {ilm} will not manage that index.
|
||||
|
||||
To set the policy for an index there are two options:
|
||||
|
||||
|
@ -90,7 +90,7 @@ PUT test-000001
|
|||
<1> Set this initial index to be the write index for this alias.
|
||||
|
||||
We can now write data to the `test-alias` alias. Because we have a rollover
|
||||
action defined in our policy when the index grows larger than 25GB {ilm} will
|
||||
action defined in our policy, when the index grows larger than 25GB {ilm} will
|
||||
create a new index and roll the alias over to use the new index automatically.
|
||||
|
||||
=== Apply a policy to a create index request
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[start-stop-ilm]]
|
||||
== Start And Stop {ilm}
|
||||
== Start and stop {ilm}
|
||||
|
||||
beta[]
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[update-lifecycle-policy]]
|
||||
== Update Lifecycle Policy
|
||||
== Update lifecycle policy
|
||||
++++
|
||||
<titleabbrev>Update Policy</titleabbrev>
|
||||
<titleabbrev>Update policy</titleabbrev>
|
||||
++++
|
||||
|
||||
beta[]
|
||||
|
|
|
@ -129,19 +129,19 @@ the new index, enabling indexing to continue uninterrupted.
|
|||
|
||||
beta[]
|
||||
|
||||
After an index has been rolled over by {ilm}, the
|
||||
`index.lifecycle.indexing_complete` setting will be set to `true` on the index.
|
||||
This indicates to {ilm} that this index has already been rolled over, and does
|
||||
not need to be rolled over again. If you <<ilm-remove-policy,remove the policy>>
|
||||
from an index and set it to use another policy, this setting indicates that the
|
||||
new policy should skip execution of the Rollover action.
|
||||
|
||||
You can also set this setting to `true` manually if you want to indicate that
|
||||
{ilm} should not roll over a particular index. This is useful if you need to
|
||||
make an exception to your normal Lifecycle Policy and switching the alias to a
|
||||
The `index.lifecycle.indexing_complete` setting indicates to {ilm} whether this
|
||||
index has already been rolled over. If it is set to `true`, that indicates that
|
||||
this index has already been rolled over and does not need to be rolled over
|
||||
again. Therefore, {ilm} will skip any Rollover Action configured in the
|
||||
associated lifecycle policy for this index. This is useful if you need to make
|
||||
an exception to your normal Lifecycle Policy and switching the alias to a
|
||||
different index by hand, but do not want to remove the index from {ilm}
|
||||
completely.
|
||||
|
||||
This setting is set to `true` automatically by ILM upon the successful
|
||||
completion of a Rollover Action. However, it will be removed if
|
||||
<<ilm-remove-policy,the policy is removed>> from the index.
|
||||
|
||||
IMPORTANT: If `index.lifecycle.indexing_complete` is set to `true` on an index,
|
||||
it will not be rolled over by {ilm}, but {ilm} will verify that this index is no
|
||||
longer the write index for the alias specified by
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[freeze-index-api]]
|
||||
== Freeze Index API
|
||||
== Freeze index API
|
||||
++++
|
||||
<titleabbrev>Freeze Index</titleabbrev>
|
||||
<titleabbrev>Freeze index</titleabbrev>
|
||||
++++
|
||||
|
||||
Freezes an index.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[unfreeze-index-api]]
|
||||
== Unfreeze Index API
|
||||
== Unfreeze index API
|
||||
++++
|
||||
<titleabbrev>Unfreeze Index</titleabbrev>
|
||||
<titleabbrev>Unfreeze index</titleabbrev>
|
||||
++++
|
||||
|
||||
Unfreezes an index.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,27 @@
|
|||
[[append-processor]]
|
||||
=== Append Processor
|
||||
Appends one or more values to an existing array if the field already exists and it is an array.
|
||||
Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar.
|
||||
Creates an array containing the provided values if the field doesn't exist.
|
||||
Accepts a single value or an array of values.
|
||||
|
||||
[[append-options]]
|
||||
.Append Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to be appended to. Supports <<accessing-template-fields,template snippets>>.
|
||||
| `value` | yes | - | The value to be appended. Supports <<accessing-template-fields,template snippets>>.
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"append": {
|
||||
"field": "tags",
|
||||
"value": ["production", "{{app}}", "{{owner}}"]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,27 @@
|
|||
[[bytes-processor]]
|
||||
=== Bytes Processor
|
||||
Converts a human readable byte value (e.g. 1kb) to its value in bytes (e.g. 1024).
|
||||
|
||||
Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. An error will occur if
|
||||
the field is not a supported format or resultant value exceeds 2^63.
|
||||
|
||||
[[bytes-options]]
|
||||
.Bytes Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to convert
|
||||
| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"bytes": {
|
||||
"field": "file.size"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,45 @@
|
|||
[[convert-processor]]
|
||||
=== Convert Processor
|
||||
Converts a field in the currently ingested document to a different type, such as converting a string to an integer.
|
||||
If the field value is an array, all members will be converted.
|
||||
|
||||
The supported types include: `integer`, `long`, `float`, `double`, `string`, `boolean`, and `auto`.
|
||||
|
||||
Specifying `boolean` will set the field to true if its string value is equal to `true` (ignore case), to
|
||||
false if its string value is equal to `false` (ignore case), or it will throw an exception otherwise.
|
||||
|
||||
Specifying `auto` will attempt to convert the string-valued `field` into the closest non-string type.
|
||||
For example, a field whose value is `"true"` will be converted to its respective boolean type: `true`. Do note
|
||||
that float takes precedence of double in `auto`. A value of `"242.15"` will "automatically" be converted to
|
||||
`242.15` of type `float`. If a provided field cannot be appropriately converted, the Convert Processor will
|
||||
still process successfully and leave the field value as-is. In such a case, `target_field` will
|
||||
still be updated with the unconverted field value.
|
||||
|
||||
[[convert-options]]
|
||||
.Convert Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field whose value is to be converted
|
||||
| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place
|
||||
| `type` | yes | - | The type to convert the existing value to
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _ingest/pipeline/my-pipeline-id
|
||||
{
|
||||
"description": "converts the content of the id field to an integer",
|
||||
"processors" : [
|
||||
{
|
||||
"convert" : {
|
||||
"field" : "id",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,145 @@
|
|||
[[date-index-name-processor]]
|
||||
=== Date Index Name Processor
|
||||
|
||||
The purpose of this processor is to point documents to the right time based index based
|
||||
on a date or timestamp field in a document by using the <<date-math-index-names, date math index name support>>.
|
||||
|
||||
The processor sets the `_index` meta field with a date math index name expression based on the provided index name
|
||||
prefix, a date or timestamp field in the documents being processed and the provided date rounding.
|
||||
|
||||
First, this processor fetches the date or timestamp from a field in the document being processed. Optionally,
|
||||
date formatting can be configured on how the field's value should be parsed into a date. Then this date,
|
||||
the provided index name prefix and the provided date rounding get formatted into a date math index name expression.
|
||||
Also here optionally date formatting can be specified on how the date should be formatted into a date math index name
|
||||
expression.
|
||||
|
||||
An example pipeline that points documents to a monthly index that starts with a `myindex-` prefix based on a
|
||||
date in the `date1` field:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _ingest/pipeline/monthlyindex
|
||||
{
|
||||
"description": "monthly date-time index naming",
|
||||
"processors" : [
|
||||
{
|
||||
"date_index_name" : {
|
||||
"field" : "date1",
|
||||
"index_name_prefix" : "myindex-",
|
||||
"date_rounding" : "M"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
||||
Using that pipeline for an index request:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /myindex/_doc/1?pipeline=monthlyindex
|
||||
{
|
||||
"date1" : "2016-04-25T12:02:01.789Z"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"_index" : "myindex-2016-04-01",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"_version" : 1,
|
||||
"result" : "created",
|
||||
"_shards" : {
|
||||
"total" : 2,
|
||||
"successful" : 1,
|
||||
"failed" : 0
|
||||
},
|
||||
"_seq_no" : 55,
|
||||
"_primary_term" : 1
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
||||
|
||||
|
||||
The above request will not index this document into the `myindex` index, but into the `myindex-2016-04-01` index because
|
||||
it was rounded by month. This is because the date-index-name-processor overrides the `_index` property of the document.
|
||||
|
||||
To see the date-math value of the index supplied in the actual index request which resulted in the above document being
|
||||
indexed into `myindex-2016-04-01` we can inspect the effects of the processor using a simulate request.
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _ingest/pipeline/_simulate
|
||||
{
|
||||
"pipeline" :
|
||||
{
|
||||
"description": "monthly date-time index naming",
|
||||
"processors" : [
|
||||
{
|
||||
"date_index_name" : {
|
||||
"field" : "date1",
|
||||
"index_name_prefix" : "myindex-",
|
||||
"date_rounding" : "M"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"docs": [
|
||||
{
|
||||
"_source": {
|
||||
"date1": "2016-04-25T12:02:01.789Z"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
and the result:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"docs" : [
|
||||
{
|
||||
"doc" : {
|
||||
"_id" : "_id",
|
||||
"_index" : "<myindex-{2016-04-25||/M{yyyy-MM-dd|UTC}}>",
|
||||
"_type" : "_type",
|
||||
"_source" : {
|
||||
"date1" : "2016-04-25T12:02:01.789Z"
|
||||
},
|
||||
"_ingest" : {
|
||||
"timestamp" : "2016-11-08T19:43:03.850+0000"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/2016-11-08T19:43:03.850\+0000/$body.docs.0.doc._ingest.timestamp/]
|
||||
|
||||
The above example shows that `_index` was set to `<myindex-{2016-04-25||/M{yyyy-MM-dd|UTC}}>`. Elasticsearch
|
||||
understands this to mean `2016-04-01` as is explained in the <<date-math-index-names, date math index name documentation>>
|
||||
|
||||
[[date-index-name-options]]
|
||||
.Date index name options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to get the date or timestamp from.
|
||||
| `index_name_prefix` | no | - | A prefix of the index name to be prepended before the printed date. Supports <<accessing-template-fields,template snippets>>.
|
||||
| `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). Supports <<accessing-template-fields,template snippets>>.
|
||||
| `date_formats` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.
|
||||
| `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.
|
||||
| `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days.
|
||||
| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid Joda pattern is expected here. Supports <<accessing-template-fields,template snippets>>.
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
|
@ -0,0 +1,65 @@
|
|||
[[date-processor]]
|
||||
=== Date Processor
|
||||
|
||||
Parses dates from fields, and then uses the date or timestamp as the timestamp for the document.
|
||||
By default, the date processor adds the parsed date as a new field called `@timestamp`. You can specify a
|
||||
different field by setting the `target_field` configuration parameter. Multiple date formats are supported
|
||||
as part of the same date processor definition. They will be used sequentially to attempt parsing the date field,
|
||||
in the same order they were defined as part of the processor definition.
|
||||
|
||||
[[date-options]]
|
||||
.Date options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to get the date from.
|
||||
| `target_field` | no | @timestamp | The field that will hold the parsed date.
|
||||
| `formats` | yes | - | An array of the expected date formats. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.
|
||||
| `timezone` | no | UTC | The timezone to use when parsing the date. Supports <<accessing-template-fields,template snippets>>.
|
||||
| `locale` | no | ENGLISH | The locale to use when parsing the date, relevant when parsing month names or week days. Supports <<accessing-template-fields,template snippets>>.
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
Here is an example that adds the parsed date to the `timestamp` field based on the `initial_date` field:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"description" : "...",
|
||||
"processors" : [
|
||||
{
|
||||
"date" : {
|
||||
"field" : "initial_date",
|
||||
"target_field" : "timestamp",
|
||||
"formats" : ["dd/MM/yyyy hh:mm:ss"],
|
||||
"timezone" : "Europe/Amsterdam"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
The `timezone` and `locale` processor parameters are templated. This means that their values can be
|
||||
extracted from fields within documents. The example below shows how to extract the locale/timezone
|
||||
details from existing fields, `my_timezone` and `my_locale`, in the ingested document that contain
|
||||
the timezone and locale values.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"description" : "...",
|
||||
"processors" : [
|
||||
{
|
||||
"date" : {
|
||||
"field" : "initial_date",
|
||||
"target_field" : "timestamp",
|
||||
"formats" : ["ISO8601"],
|
||||
"timezone" : "{{my_timezone}}",
|
||||
"locale" : "{{my_locale}}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,191 @@
|
|||
[[dissect-processor]]
|
||||
=== Dissect Processor
|
||||
|
||||
Similar to the <<grok-processor,Grok Processor>>, dissect also extracts structured fields out of a single text field
|
||||
within a document. However unlike the <<grok-processor,Grok Processor>>, dissect does not use
|
||||
https://en.wikipedia.org/wiki/Regular_expression[Regular Expressions]. This allows dissect's syntax to be simple and for
|
||||
some cases faster than the <<grok-processor,Grok Processor>>.
|
||||
|
||||
Dissect matches a single text field against a defined pattern.
|
||||
|
||||
For example the following pattern:
|
||||
[source,txt]
|
||||
--------------------------------------------------
|
||||
%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size}
|
||||
--------------------------------------------------
|
||||
will match a log line of this format:
|
||||
[source,txt]
|
||||
--------------------------------------------------
|
||||
1.2.3.4 - - [30/Apr/1998:22:00:52 +0000] \"GET /english/venues/cities/images/montpellier/18.gif HTTP/1.0\" 200 3171
|
||||
--------------------------------------------------
|
||||
and result in a document with the following fields:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
"doc": {
|
||||
"_index": "_index",
|
||||
"_type": "_type",
|
||||
"_id": "_id",
|
||||
"_source": {
|
||||
"request": "/english/venues/cities/images/montpellier/18.gif",
|
||||
"auth": "-",
|
||||
"ident": "-",
|
||||
"verb": "GET",
|
||||
"@timestamp": "30/Apr/1998:22:00:52 +0000",
|
||||
"size": "3171",
|
||||
"clientip": "1.2.3.4",
|
||||
"httpversion": "1.0",
|
||||
"status": "200"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
A dissect pattern is defined by the parts of the string that will be discarded. In the example above the first part
|
||||
to be discarded is a single space. Dissect finds this space, then assigns the value of `clientip` is everything up
|
||||
until that space.
|
||||
Later dissect matches the `[` and then `]` and then assigns `@timestamp` to everything in-between `[` and `]`.
|
||||
Paying special attention the parts of the string to discard will help build successful dissect patterns.
|
||||
|
||||
Successful matches require all keys in a pattern to have a value. If any of the `%{keyname}` defined in the pattern do
|
||||
not have a value, then an exception is thrown and may be handled by the <<handling-failure-in-pipelines,on_falure>> directive.
|
||||
An empty key `%{}` or a <<dissect-modifier-named-skip-key, named skip key>> can be used to match values, but exclude the value from
|
||||
the final document. All matched values are represented as string data types. The <<convert-processor, convert processor>>
|
||||
may be used to convert to expected data type.
|
||||
|
||||
Dissect also supports <<dissect-key-modifiers,key modifiers>> that can change dissect's default
|
||||
behavior. For example you can instruct dissect to ignore certain fields, append fields, skip over padding, etc.
|
||||
See <<dissect-key-modifiers, below>> for more information.
|
||||
|
||||
[[dissect-options]]
|
||||
.Dissect Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to dissect
|
||||
| `pattern` | yes | - | The pattern to apply to the field
|
||||
| `append_separator`| no | "" (empty string) | The character(s) that separate the appended fields.
|
||||
| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"dissect": {
|
||||
"field": "message",
|
||||
"pattern" : "%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size}"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
[[dissect-key-modifiers]]
|
||||
==== Dissect key modifiers
|
||||
Key modifiers can change the default behavior for dissection. Key modifiers may be found on the left or right
|
||||
of the `%{keyname}` always inside the `%{` and `}`. For example `%{+keyname ->}` has the append and right padding
|
||||
modifiers.
|
||||
|
||||
.Dissect Key Modifiers
|
||||
[options="header"]
|
||||
|======
|
||||
| Modifier | Name | Position | Example | Description | Details
|
||||
| `->` | Skip right padding | (far) right | `%{keyname1->}` | Skips any repeated characters to the right | <<dissect-modifier-skip-right-padding,link>>
|
||||
| `+` | Append | left | `%{+keyname} %{+keyname}` | Appends two or more fields together | <<dissect-modifier-append-key,link>>
|
||||
| `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <<dissect-modifier-append-key-with-order,link>>
|
||||
| `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <<dissect-modifier-named-skip-key,link>>
|
||||
| `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <<dissect-modifier-reference-keys,link>>
|
||||
|======
|
||||
|
||||
[[dissect-modifier-skip-right-padding]]
|
||||
===== Right padding modifier (`->`)
|
||||
|
||||
The algorithm that performs the dissection is very strict in that it requires all characters in the pattern to match
|
||||
the source string. For example, the pattern `%{fookey} %{barkey}` (1 space), will match the string "foo{nbsp}bar"
|
||||
(1 space), but will not match the string "foo{nbsp}{nbsp}bar" (2 spaces) since the pattern has only 1 space and the
|
||||
source string has 2 spaces.
|
||||
|
||||
The right padding modifier helps with this case. Adding the right padding modifier to the pattern `%{fookey->} %{barkey}`,
|
||||
It will now will match "foo{nbsp}bar" (1 space) and "foo{nbsp}{nbsp}bar" (2 spaces)
|
||||
and even "foo{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}bar" (10 spaces).
|
||||
|
||||
Use the right padding modifier to allow for repetition of the characters after a `%{keyname->}`.
|
||||
|
||||
The right padding modifier may be placed on any key with any other modifiers. It should always be the furthest right
|
||||
modifier. For example: `%{+keyname/1->}` and `%{->}`
|
||||
|
||||
Right padding modifier example
|
||||
|======
|
||||
| *Pattern* | `%{ts->} %{level}`
|
||||
| *Input* | 1998-08-10T17:15:42,466{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}WARN
|
||||
| *Result* a|
|
||||
* ts = 1998-08-10T17:15:42,466
|
||||
* level = WARN
|
||||
|======
|
||||
|
||||
The right padding modifier may be used with an empty key to help skip unwanted data. For example, the same input string, but wrapped with brackets requires the use of an empty right padded key to achieve the same result.
|
||||
|
||||
Right padding modifier with empty key example
|
||||
|======
|
||||
| *Pattern* | `[%{ts}]%{->}[%{level}]`
|
||||
| *Input* | [1998-08-10T17:15:42,466]{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}{nbsp}[WARN]
|
||||
| *Result* a|
|
||||
* ts = 1998-08-10T17:15:42,466
|
||||
* level = WARN
|
||||
|======
|
||||
|
||||
===== Append modifier (`+`)
|
||||
[[dissect-modifier-append-key]]
|
||||
Dissect supports appending two or more results together for the output.
|
||||
Values are appended left to right. An append separator can be specified.
|
||||
In this example the append_separator is defined as a space.
|
||||
|
||||
Append modifier example
|
||||
|======
|
||||
| *Pattern* | `%{+name} %{+name} %{+name} %{+name}`
|
||||
| *Input* | john jacob jingleheimer schmidt
|
||||
| *Result* a|
|
||||
* name = john jacob jingleheimer schmidt
|
||||
|======
|
||||
|
||||
===== Append with order modifier (`+` and `/n`)
|
||||
[[dissect-modifier-append-key-with-order]]
|
||||
Dissect supports appending two or more results together for the output.
|
||||
Values are appended based on the order defined (`/n`). An append separator can be specified.
|
||||
In this example the append_separator is defined as a comma.
|
||||
|
||||
Append with order modifier example
|
||||
|======
|
||||
| *Pattern* | `%{+name/2} %{+name/4} %{+name/3} %{+name/1}`
|
||||
| *Input* | john jacob jingleheimer schmidt
|
||||
| *Result* a|
|
||||
* name = schmidt,john,jingleheimer,jacob
|
||||
|======
|
||||
|
||||
===== Named skip key (`?`)
|
||||
[[dissect-modifier-named-skip-key]]
|
||||
Dissect supports ignoring matches in the final result. This can be done with an empty key `%{}`, but for readability
|
||||
it may be desired to give that empty key a name.
|
||||
|
||||
Named skip key modifier example
|
||||
|======
|
||||
| *Pattern* | `%{clientip} %{?ident} %{?auth} [%{@timestamp}]`
|
||||
| *Input* | 1.2.3.4 - - [30/Apr/1998:22:00:52 +0000]
|
||||
| *Result* a|
|
||||
* ip = 1.2.3.4
|
||||
* @timestamp = 30/Apr/1998:22:00:52 +0000
|
||||
|======
|
||||
|
||||
===== Reference keys (`*` and `&`)
|
||||
[[dissect-modifier-reference-keys]]
|
||||
Dissect support using parsed values as the key/value pairings for the structured content. Imagine a system that
|
||||
partially logs in key/value pairs. Reference keys allow you to maintain that key/value relationship.
|
||||
|
||||
Reference key modifier example
|
||||
|======
|
||||
| *Pattern* | `[%{ts}] [%{level}] %{*p1}:%{&p1} %{*p2}:%{&p2}`
|
||||
| *Input* | [2018-08-10T17:15:42,466] [ERR] ip:1.2.3.4 error:REFUSED
|
||||
| *Result* a|
|
||||
* ts = 1998-08-10T17:15:42,466
|
||||
* level = ERR
|
||||
* ip = 1.2.3.4
|
||||
* error = REFUSED
|
||||
|======
|
|
@ -0,0 +1,119 @@
|
|||
[[dot-expand-processor]]
|
||||
=== Dot Expander Processor
|
||||
|
||||
Expands a field with dots into an object field. This processor allows fields
|
||||
with dots in the name to be accessible by other processors in the pipeline.
|
||||
Otherwise these <<accessing-data-in-pipelines,fields>> can't be accessed by any processor.
|
||||
|
||||
[[dot-expender-options]]
|
||||
.Dot Expand Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to expand into an object field
|
||||
| `path` | no | - | The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields.
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"dot_expander": {
|
||||
"field": "foo.bar"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
For example the dot expand processor would turn this document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"foo.bar" : "value"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
into:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"foo" : {
|
||||
"bar" : "value"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
If there is already a `bar` field nested under `foo` then
|
||||
this processor merges the `foo.bar` field into it. If the field is
|
||||
a scalar value then it will turn that field into an array field.
|
||||
|
||||
For example, the following document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"foo.bar" : "value2",
|
||||
"foo" : {
|
||||
"bar" : "value1"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
is transformed by the `dot_expander` processor into:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"foo" : {
|
||||
"bar" : ["value1", "value2"]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
If any field outside of the leaf field conflicts with a pre-existing field of the same name,
|
||||
then that field needs to be renamed first.
|
||||
|
||||
Consider the following document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"foo": "value1",
|
||||
"foo.bar": "value2"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
Then the `foo` needs to be renamed first before the `dot_expander`
|
||||
processor is applied. So in order for the `foo.bar` field to properly
|
||||
be expanded into the `bar` field under the `foo` field the following
|
||||
pipeline should be used:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"processors" : [
|
||||
{
|
||||
"rename" : {
|
||||
"field" : "foo",
|
||||
"target_field" : "foo.bar""
|
||||
}
|
||||
},
|
||||
{
|
||||
"dot_expander": {
|
||||
"field": "foo.bar"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
The reason for this is that Ingest doesn't know how to automatically cast
|
||||
a scalar field to an object field.
|
|
@ -0,0 +1,22 @@
|
|||
[[drop-processor]]
|
||||
=== Drop Processor
|
||||
Drops the document without raising any errors. This is useful to prevent the document from
|
||||
getting indexed based on some condition.
|
||||
|
||||
[[drop-options]]
|
||||
.Drop Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"drop": {
|
||||
"if" : "ctx.network_name == 'Guest'"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,25 @@
|
|||
[[fail-processor]]
|
||||
=== Fail Processor
|
||||
Raises an exception. This is useful for when
|
||||
you expect a pipeline to fail and want to relay a specific message
|
||||
to the requester.
|
||||
|
||||
[[fail-options]]
|
||||
.Fail Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `message` | yes | - | The error message thrown by the processor. Supports <<accessing-template-fields,template snippets>>.
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"fail": {
|
||||
"if" : "ctx.tags.contains('production') != true",
|
||||
"message": "The production tag is not present, found tags: {{tags}}"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,160 @@
|
|||
[[foreach-processor]]
|
||||
=== Foreach Processor
|
||||
|
||||
Processes elements in an array of unknown length.
|
||||
|
||||
All processors can operate on elements inside an array, but if all elements of an array need to
|
||||
be processed in the same way, defining a processor for each element becomes cumbersome and tricky
|
||||
because it is likely that the number of elements in an array is unknown. For this reason the `foreach`
|
||||
processor exists. By specifying the field holding array elements and a processor that
|
||||
defines what should happen to each element, array fields can easily be preprocessed.
|
||||
|
||||
A processor inside the foreach processor works in the array element context and puts that in the ingest metadata
|
||||
under the `_ingest._value` key. If the array element is a json object it holds all immediate fields of that json object.
|
||||
and if the nested object is a value is `_ingest._value` just holds that value. Note that if a processor prior to the
|
||||
`foreach` processor used `_ingest._value` key then the specified value will not be available to the processor inside
|
||||
the `foreach` processor. The `foreach` processor does restore the original value, so that value is available to processors
|
||||
after the `foreach` processor.
|
||||
|
||||
Note that any other field from the document are accessible and modifiable like with all other processors. This processor
|
||||
just puts the current array element being read into `_ingest._value` ingest metadata attribute, so that it may be
|
||||
pre-processed.
|
||||
|
||||
If the `foreach` processor fails to process an element inside the array, and no `on_failure` processor has been specified,
|
||||
then it aborts the execution and leaves the array unmodified.
|
||||
|
||||
[[foreach-options]]
|
||||
.Foreach Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The array field
|
||||
| `processor` | yes | - | The processor to execute against each field
|
||||
| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
Assume the following document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"values" : ["foo", "bar", "baz"]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
When this `foreach` processor operates on this sample document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"foreach" : {
|
||||
"field" : "values",
|
||||
"processor" : {
|
||||
"uppercase" : {
|
||||
"field" : "_ingest._value"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
Then the document will look like this after preprocessing:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"values" : ["FOO", "BAR", "BAZ"]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
Let's take a look at another example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"persons" : [
|
||||
{
|
||||
"id" : "1",
|
||||
"name" : "John Doe"
|
||||
},
|
||||
{
|
||||
"id" : "2",
|
||||
"name" : "Jane Doe"
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
In this case, the `id` field needs to be removed,
|
||||
so the following `foreach` processor is used:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"foreach" : {
|
||||
"field" : "persons",
|
||||
"processor" : {
|
||||
"remove" : {
|
||||
"field" : "_ingest._value.id"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
After preprocessing the result is:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"persons" : [
|
||||
{
|
||||
"name" : "John Doe"
|
||||
},
|
||||
{
|
||||
"name" : "Jane Doe"
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
The wrapped processor can have a `on_failure` definition.
|
||||
For example, the `id` field may not exist on all person objects.
|
||||
Instead of failing the index request, you can use an `on_failure`
|
||||
block to send the document to the 'failure_index' index for later inspection:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"foreach" : {
|
||||
"field" : "persons",
|
||||
"processor" : {
|
||||
"remove" : {
|
||||
"field" : "_value.id",
|
||||
"on_failure" : [
|
||||
{
|
||||
"set" : {
|
||||
"field", "_index",
|
||||
"value", "failure_index"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
In this example, if the `remove` processor does fail, then
|
||||
the array elements that have been processed thus far will
|
||||
be updated.
|
||||
|
||||
Another advanced example can be found in the {plugins}/ingest-attachment-with-arrays.html[attachment processor documentation].
|
|
@ -0,0 +1,323 @@
|
|||
[[grok-processor]]
|
||||
=== Grok Processor
|
||||
|
||||
Extracts structured fields out of a single text field within a document. You choose which field to
|
||||
extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular
|
||||
expression that supports aliased expressions that can be reused.
|
||||
|
||||
This tool is perfect for syslog logs, apache and other webserver logs, mysql logs, and in general, any log format
|
||||
that is generally written for humans and not computer consumption.
|
||||
This processor comes packaged with many
|
||||
https://github.com/elastic/elasticsearch/blob/{branch}/libs/grok/src/main/resources/patterns[reusable patterns].
|
||||
|
||||
If you need help building patterns to match your logs, you will find the {kibana-ref}/xpack-grokdebugger.html[Grok Debugger] tool quite useful! The Grok Debugger is an {xpack} feature under the Basic License and is therefore *free to use*. The Grok Constructor at <http://grokconstructor.appspot.com/> is also a useful tool.
|
||||
|
||||
[[grok-basics]]
|
||||
==== Grok Basics
|
||||
|
||||
Grok sits on top of regular expressions, so any regular expressions are valid in grok as well.
|
||||
The regular expression library is Oniguruma, and you can see the full supported regexp syntax
|
||||
https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Onigiruma site].
|
||||
|
||||
Grok works by leveraging this regular expression language to allow naming existing patterns and combining them into more
|
||||
complex patterns that match your fields.
|
||||
|
||||
The syntax for reusing a grok pattern comes in three forms: `%{SYNTAX:SEMANTIC}`, `%{SYNTAX}`, `%{SYNTAX:SEMANTIC:TYPE}`.
|
||||
|
||||
The `SYNTAX` is the name of the pattern that will match your text. For example, `3.44` will be matched by the `NUMBER`
|
||||
pattern and `55.3.244.1` will be matched by the `IP` pattern. The syntax is how you match. `NUMBER` and `IP` are both
|
||||
patterns that are provided within the default patterns set.
|
||||
|
||||
The `SEMANTIC` is the identifier you give to the piece of text being matched. For example, `3.44` could be the
|
||||
duration of an event, so you could call it simply `duration`. Further, a string `55.3.244.1` might identify
|
||||
the `client` making a request.
|
||||
|
||||
The `TYPE` is the type you wish to cast your named field. `int`, `long`, `double`, `float` and `boolean` are supported types for coercion.
|
||||
|
||||
For example, you might want to match the following text:
|
||||
|
||||
[source,txt]
|
||||
--------------------------------------------------
|
||||
3.44 55.3.244.1
|
||||
--------------------------------------------------
|
||||
|
||||
You may know that the message in the example is a number followed by an IP address. You can match this text by using the following
|
||||
Grok expression.
|
||||
|
||||
[source,txt]
|
||||
--------------------------------------------------
|
||||
%{NUMBER:duration} %{IP:client}
|
||||
--------------------------------------------------
|
||||
|
||||
[[using-grok]]
|
||||
==== Using the Grok Processor in a Pipeline
|
||||
|
||||
[[grok-options]]
|
||||
.Grok Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to use for grok expression parsing
|
||||
| `patterns` | yes | - | An ordered list of grok expression to match and extract named captures with. Returns on the first expression in the list that matches.
|
||||
| `pattern_definitions` | no | - | A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. Patterns matching existing names will override the pre-existing definition.
|
||||
| `trace_match` | no | false | when true, `_ingest._grok_match_index` will be inserted into your matched document's metadata with the index into the pattern found in `patterns` that matched.
|
||||
| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
Here is an example of using the provided patterns to extract out and name structured fields from a string field in
|
||||
a document.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"message": "55.3.244.1 GET /index.html 15824 0.043"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
The pattern for this could be:
|
||||
|
||||
[source,txt]
|
||||
--------------------------------------------------
|
||||
%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}
|
||||
--------------------------------------------------
|
||||
|
||||
Here is an example pipeline for processing the above document by using Grok:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"description" : "...",
|
||||
"processors": [
|
||||
{
|
||||
"grok": {
|
||||
"field": "message",
|
||||
"patterns": ["%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
This pipeline will insert these named captures as new fields within the document, like so:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"message": "55.3.244.1 GET /index.html 15824 0.043",
|
||||
"client": "55.3.244.1",
|
||||
"method": "GET",
|
||||
"request": "/index.html",
|
||||
"bytes": 15824,
|
||||
"duration": "0.043"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
[[custom-patterns]]
|
||||
==== Custom Patterns
|
||||
|
||||
The Grok processor comes pre-packaged with a base set of pattern. These patterns may not always have
|
||||
what you are looking for. Pattern have a very basic format. Each entry describes has a name and the pattern itself.
|
||||
|
||||
You can add your own patterns to a processor definition under the `pattern_definitions` option.
|
||||
Here is an example of a pipeline specifying custom pattern definitions:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"description" : "...",
|
||||
"processors": [
|
||||
{
|
||||
"grok": {
|
||||
"field": "message",
|
||||
"patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"],
|
||||
"pattern_definitions" : {
|
||||
"FAVORITE_DOG" : "beagle",
|
||||
"RGB" : "RED|GREEN|BLUE"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
[[trace-match]]
|
||||
==== Providing Multiple Match Patterns
|
||||
|
||||
Sometimes one pattern is not enough to capture the potential structure of a field. Let's assume we
|
||||
want to match all messages that contain your favorite pet breeds of either cats or dogs. One way to accomplish
|
||||
this is to provide two distinct patterns that can be matched, instead of one really complicated expression capturing
|
||||
the same `or` behavior.
|
||||
|
||||
Here is an example of such a configuration executed against the simulate API:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _ingest/pipeline/_simulate
|
||||
{
|
||||
"pipeline": {
|
||||
"description" : "parse multiple patterns",
|
||||
"processors": [
|
||||
{
|
||||
"grok": {
|
||||
"field": "message",
|
||||
"patterns": ["%{FAVORITE_DOG:pet}", "%{FAVORITE_CAT:pet}"],
|
||||
"pattern_definitions" : {
|
||||
"FAVORITE_DOG" : "beagle",
|
||||
"FAVORITE_CAT" : "burmese"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"docs":[
|
||||
{
|
||||
"_source": {
|
||||
"message": "I love burmese cats!"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
response:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"docs": [
|
||||
{
|
||||
"doc": {
|
||||
"_type": "_type",
|
||||
"_index": "_index",
|
||||
"_id": "_id",
|
||||
"_source": {
|
||||
"message": "I love burmese cats!",
|
||||
"pet": "burmese"
|
||||
},
|
||||
"_ingest": {
|
||||
"timestamp": "2016-11-08T19:43:03.850+0000"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/2016-11-08T19:43:03.850\+0000/$body.docs.0.doc._ingest.timestamp/]
|
||||
|
||||
Both patterns will set the field `pet` with the appropriate match, but what if we want to trace which of our
|
||||
patterns matched and populated our fields? We can do this with the `trace_match` parameter. Here is the output of
|
||||
that same pipeline, but with `"trace_match": true` configured:
|
||||
|
||||
////
|
||||
Hidden setup for example:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _ingest/pipeline/_simulate
|
||||
{
|
||||
"pipeline": {
|
||||
"description" : "parse multiple patterns",
|
||||
"processors": [
|
||||
{
|
||||
"grok": {
|
||||
"field": "message",
|
||||
"patterns": ["%{FAVORITE_DOG:pet}", "%{FAVORITE_CAT:pet}"],
|
||||
"trace_match": true,
|
||||
"pattern_definitions" : {
|
||||
"FAVORITE_DOG" : "beagle",
|
||||
"FAVORITE_CAT" : "burmese"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"docs":[
|
||||
{
|
||||
"_source": {
|
||||
"message": "I love burmese cats!"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
////
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"docs": [
|
||||
{
|
||||
"doc": {
|
||||
"_type": "_type",
|
||||
"_index": "_index",
|
||||
"_id": "_id",
|
||||
"_source": {
|
||||
"message": "I love burmese cats!",
|
||||
"pet": "burmese"
|
||||
},
|
||||
"_ingest": {
|
||||
"_grok_match_index": "1",
|
||||
"timestamp": "2016-11-08T19:43:03.850+0000"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/2016-11-08T19:43:03.850\+0000/$body.docs.0.doc._ingest.timestamp/]
|
||||
|
||||
In the above response, you can see that the index of the pattern that matched was `"1"`. This is to say that it was the
|
||||
second (index starts at zero) pattern in `patterns` to match.
|
||||
|
||||
This trace metadata enables debugging which of the patterns matched. This information is stored in the ingest
|
||||
metadata and will not be indexed.
|
||||
|
||||
[[grok-processor-rest-get]]
|
||||
==== Retrieving patterns from REST endpoint
|
||||
|
||||
The Grok Processor comes packaged with its own REST endpoint for retrieving which patterns the processor is packaged with.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET _ingest/processor/grok
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
The above request will return a response body containing a key-value representation of the built-in patterns dictionary.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"patterns" : {
|
||||
"BACULA_CAPACITY" : "%{INT}{1,3}(,%{INT}{3})*",
|
||||
"PATH" : "(?:%{UNIXPATH}|%{WINPATH})",
|
||||
...
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
This can be useful to reference as the built-in patterns change across versions.
|
||||
|
||||
[[grok-watchdog]]
|
||||
==== Grok watchdog
|
||||
|
||||
Grok expressions that take too long to execute are interrupted and
|
||||
the grok processor then fails with an exception. The grok
|
||||
processor has a watchdog thread that determines when evaluation of
|
||||
a grok expression takes too long and is controlled by the following
|
||||
settings:
|
||||
|
||||
[[grok-watchdog-options]]
|
||||
.Grok watchdog settings
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Default | Description
|
||||
| `ingest.grok.watchdog.interval` | 1s | How often to check whether there are grok evaluations that take longer than the maximum allowed execution time.
|
||||
| `ingest.grok.watchdog.max_execution_time` | 1s | The maximum allowed execution of a grok expression evaluation.
|
||||
|======
|
|
@ -0,0 +1,29 @@
|
|||
[[gsub-processor]]
|
||||
=== Gsub Processor
|
||||
Converts a string field by applying a regular expression and a replacement.
|
||||
If the field is not a string, the processor will throw an exception.
|
||||
|
||||
[[gsub-options]]
|
||||
.Gsub Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to apply the replacement to
|
||||
| `pattern` | yes | - | The pattern to be replaced
|
||||
| `replacement` | yes | - | The string to replace the matching patterns with
|
||||
| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"gsub": {
|
||||
"field": "field1",
|
||||
"pattern": "\.",
|
||||
"replacement": "-"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,26 @@
|
|||
[[join-processor]]
|
||||
=== Join Processor
|
||||
Joins each element of an array into a single string using a separator character between each element.
|
||||
Throws an error when the field is not an array.
|
||||
|
||||
[[join-options]]
|
||||
.Join Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to be separated
|
||||
| `separator` | yes | - | The separator character
|
||||
| `target_field` | no | `field` | The field to assign the joined value to, by default `field` is updated in-place
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"join": {
|
||||
"field": "joined_array_field",
|
||||
"separator": "-"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,88 @@
|
|||
[[json-processor]]
|
||||
=== JSON Processor
|
||||
Converts a JSON string into a structured JSON object.
|
||||
|
||||
[[json-options]]
|
||||
.Json Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to be parsed
|
||||
| `target_field` | no | `field` | The field to insert the converted structured object into
|
||||
| `add_to_root` | no | false | Flag that forces the serialized json to be injected into the top level of the document. `target_field` must not be set when this option is chosen.
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
All JSON-supported types will be parsed (null, boolean, number, array, object, string).
|
||||
|
||||
Suppose you provide this configuration of the `json` processor:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"json" : {
|
||||
"field" : "string_source",
|
||||
"target_field" : "json_target"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
If the following document is processed:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"string_source": "{\"foo\": 2000}"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
after the `json` processor operates on it, it will look like:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"string_source": "{\"foo\": 2000}",
|
||||
"json_target": {
|
||||
"foo": 2000
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
If the following configuration is provided, omitting the optional `target_field` setting:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"json" : {
|
||||
"field" : "source_and_target"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
then after the `json` processor operates on this document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"source_and_target": "{\"foo\": 2000}"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
it will look like:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"source_and_target": {
|
||||
"foo": 2000
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
This illustrates that, unless it is explicitly named in the processor configuration, the `target_field`
|
||||
is the same field provided in the required `field` configuration.
|
|
@ -0,0 +1,37 @@
|
|||
[[kv-processor]]
|
||||
=== KV Processor
|
||||
This processor helps automatically parse messages (or specific event fields) which are of the foo=bar variety.
|
||||
|
||||
For example, if you have a log message which contains `ip=1.2.3.4 error=REFUSED`, you can parse those automatically by configuring:
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"kv": {
|
||||
"field": "message",
|
||||
"field_split": " ",
|
||||
"value_split": "="
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
[[kv-options]]
|
||||
.Kv Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to be parsed
|
||||
| `field_split` | yes | - | Regex pattern to use for splitting key-value pairs
|
||||
| `value_split` | yes | - | Regex pattern to use for splitting the key from the value within a key-value pair
|
||||
| `target_field` | no | `null` | The field to insert the extracted keys into. Defaults to the root of the document
|
||||
| `include_keys` | no | `null` | List of keys to filter and insert into document. Defaults to including all keys
|
||||
| `exclude_keys` | no | `null` | List of keys to exclude from document
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
| `prefix` | no | `null` | Prefix to be added to extracted keys
|
||||
| `trim_key` | no | `null` | String of characters to trim from extracted keys
|
||||
| `trim_value` | no | `null` | String of characters to trim from extracted values
|
||||
| `strip_brackets` | no | `false` | If `true` strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
|
@ -0,0 +1,24 @@
|
|||
[[lowercase-processor]]
|
||||
=== Lowercase Processor
|
||||
Converts a string to its lowercase equivalent.
|
||||
|
||||
[[lowercase-options]]
|
||||
.Lowercase Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to make lowercase
|
||||
| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"lowercase": {
|
||||
"field": "foo"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,114 @@
|
|||
[[pipeline-processor]]
|
||||
=== Pipeline Processor
|
||||
Executes another pipeline.
|
||||
|
||||
[[pipeline-options]]
|
||||
.Pipeline Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `name` | yes | - | The name of the pipeline to execute
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "inner-pipeline"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
An example of using this processor for nesting pipelines would be:
|
||||
|
||||
Define an inner pipeline:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _ingest/pipeline/pipelineA
|
||||
{
|
||||
"description" : "inner pipeline",
|
||||
"processors" : [
|
||||
{
|
||||
"set" : {
|
||||
"field": "inner_pipeline_set",
|
||||
"value": "inner"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
Define another pipeline that uses the previously defined inner pipeline:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _ingest/pipeline/pipelineB
|
||||
{
|
||||
"description" : "outer pipeline",
|
||||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"name": "pipelineA"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set" : {
|
||||
"field": "outer_pipeline_set",
|
||||
"value": "outer"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
Now indexing a document while applying the outer pipeline will see the inner pipeline executed
|
||||
from the outer pipeline:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /myindex/_doc/1?pipeline=pipelineB
|
||||
{
|
||||
"field": "value"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
Response from the index request:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"_index": "myindex",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_version": 1,
|
||||
"result": "created",
|
||||
"_shards": {
|
||||
"total": 2,
|
||||
"successful": 1,
|
||||
"failed": 0
|
||||
},
|
||||
"_seq_no": 66,
|
||||
"_primary_term": 1,
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
||||
|
||||
Indexed document:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"field": "value",
|
||||
"inner_pipeline_set": "inner",
|
||||
"outer_pipeline_set": "outer"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,37 @@
|
|||
[[remove-processor]]
|
||||
=== Remove Processor
|
||||
Removes existing fields. If one field doesn't exist, an exception will be thrown.
|
||||
|
||||
[[remove-options]]
|
||||
.Remove Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | Fields to be removed. Supports <<accessing-template-fields,template snippets>>.
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
Here is an example to remove a single field:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"remove": {
|
||||
"field": "user_agent"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
To remove multiple fields, you can use the following query:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"remove": {
|
||||
"field": ["user_agent", "url"]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,25 @@
|
|||
[[rename-processor]]
|
||||
=== Rename Processor
|
||||
Renames an existing field. If the field doesn't exist or the new name is already used, an exception will be thrown.
|
||||
|
||||
[[rename-options]]
|
||||
.Rename Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to be renamed. Supports <<accessing-template-fields,template snippets>>.
|
||||
| `target_field` | yes | - | The new name of the field. Supports <<accessing-template-fields,template snippets>>.
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"rename": {
|
||||
"field": "provider",
|
||||
"target_field": "cloud.provider"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,108 @@
|
|||
[[script-processor]]
|
||||
=== Script Processor
|
||||
|
||||
Allows inline and stored scripts to be executed within ingest pipelines.
|
||||
|
||||
See <<modules-scripting-using, How to use scripts>> to learn more about writing scripts. The Script Processor
|
||||
leverages caching of compiled scripts for improved performance. Since the
|
||||
script specified within the processor is potentially re-compiled per document, it is important
|
||||
to understand how script caching works. To learn more about
|
||||
caching see <<modules-scripting-using-caching, Script Caching>>.
|
||||
|
||||
[[script-options]]
|
||||
.Script Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `lang` | no | "painless" | The scripting language
|
||||
| `id` | no | - | The stored script id to refer to
|
||||
| `source` | no | - | An inline script to be executed
|
||||
| `params` | no | - | Script Parameters
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
One of `id` or `source` options must be provided in order to properly reference a script to execute.
|
||||
|
||||
You can access the current ingest document from within the script context by using the `ctx` variable.
|
||||
|
||||
The following example sets a new field called `field_a_plus_b_times_c` to be the sum of two existing
|
||||
numeric fields `field_a` and `field_b` multiplied by the parameter param_c:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"source": "ctx.field_a_plus_b_times_c = (ctx.field_a + ctx.field_b) * params.param_c",
|
||||
"params": {
|
||||
"param_c": 10
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
It is possible to use the Script Processor to manipulate document metadata like `_index` and `_type` during
|
||||
ingestion. Here is an example of an Ingest Pipeline that renames the index and type to `my_index` no matter what
|
||||
was provided in the original index request:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _ingest/pipeline/my_index
|
||||
{
|
||||
"description": "use index:my_index and type:_doc",
|
||||
"processors": [
|
||||
{
|
||||
"script": {
|
||||
"source": """
|
||||
ctx._index = 'my_index';
|
||||
ctx._type = '_doc';
|
||||
"""
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
Using the above pipeline, we can attempt to index a document into the `any_index` index.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT any_index/_doc/1?pipeline=my_index
|
||||
{
|
||||
"message": "text"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
The response from the above index request:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"_index": "my_index",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_version": 1,
|
||||
"result": "created",
|
||||
"_shards": {
|
||||
"total": 2,
|
||||
"successful": 1,
|
||||
"failed": 0
|
||||
},
|
||||
"_seq_no": 89,
|
||||
"_primary_term": 1,
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/]
|
||||
|
||||
In the above response, you can see that our document was actually indexed into `my_index` instead of
|
||||
`any_index`. This type of manipulation is often convenient in pipelines that have various branches of transformation,
|
||||
and depending on the progress made, indexed into different indices.
|
||||
|
||||
[[set-processor]]
|
||||
=== Set Processor
|
||||
Sets one field and associates it with the specified value. If the field already exists,
|
||||
its value will be replaced with the provided one.
|
|
@ -0,0 +1,34 @@
|
|||
[[ingest-node-set-security-user-processor]]
|
||||
=== Set Security User Processor
|
||||
Sets user-related details (such as `username`, `roles`, `email`, `full_name`
|
||||
and `metadata` ) from the current
|
||||
authenticated user to the current document by pre-processing the ingest.
|
||||
|
||||
IMPORTANT: Requires an authenticated user for the index request.
|
||||
|
||||
[[set-security-user-options]]
|
||||
.Set Security User Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to store the user information into.
|
||||
| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`.
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
The following example adds all user details for the current authenticated user
|
||||
to the `user` field for all documents that are processed by this pipeline:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"processors" : [
|
||||
{
|
||||
"set_security_user": {
|
||||
"field": "user"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,21 @@
|
|||
[[set-options]]
|
||||
.Set Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to insert, upsert, or update. Supports <<accessing-template-fields,template snippets>>.
|
||||
| `value` | yes | - | The value to be set for the field. Supports <<accessing-template-fields,template snippets>>.
|
||||
| `override` | no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched.
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"set": {
|
||||
"field": "host.os.name",
|
||||
"value": "{{os}}"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,27 @@
|
|||
[[sort-processor]]
|
||||
=== Sort Processor
|
||||
Sorts the elements of an array ascending or descending. Homogeneous arrays of numbers will be sorted
|
||||
numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically.
|
||||
Throws an error when the field is not an array.
|
||||
|
||||
[[sort-options]]
|
||||
.Sort Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to be sorted
|
||||
| `order` | no | `"asc"` | The sort order to use. Accepts `"asc"` or `"desc"`.
|
||||
| `target_field` | no | `field` | The field to assign the sorted value to, by default `field` is updated in-place
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"sort": {
|
||||
"field": "array_field_to_sort",
|
||||
"order": "desc"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,27 @@
|
|||
[[split-processor]]
|
||||
=== Split Processor
|
||||
Splits a field into an array using a separator character. Only works on string fields.
|
||||
|
||||
[[split-options]]
|
||||
.Split Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to split
|
||||
| `separator` | yes | - | A regex which matches the separator, eg `,` or `\s+`
|
||||
| `target_field` | no | `field` | The field to assign the split value to, by default `field` is updated in-place
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"split": {
|
||||
"field": "my_field",
|
||||
"separator": "\\s+" <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
<1> Treat all consecutive whitespace characters as a single separator
|
|
@ -0,0 +1,26 @@
|
|||
[[trim-processor]]
|
||||
=== Trim Processor
|
||||
Trims whitespace from field.
|
||||
|
||||
NOTE: This only works on leading and trailing whitespace.
|
||||
|
||||
[[trim-options]]
|
||||
.Trim Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The string-valued field to trim whitespace from
|
||||
| `target_field` | no | `field` | The field to assign the trimmed value to, by default `field` is updated in-place
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"trim": {
|
||||
"field": "foo"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,24 @@
|
|||
[[uppercase-processor]]
|
||||
=== Uppercase Processor
|
||||
Converts a string to its uppercase equivalent.
|
||||
|
||||
[[uppercase-options]]
|
||||
.Uppercase Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to make uppercase
|
||||
| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"uppercase": {
|
||||
"field": "foo"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,24 @@
|
|||
[[urldecode-processor]]
|
||||
=== URL Decode Processor
|
||||
URL-decodes a string
|
||||
|
||||
[[urldecode-options]]
|
||||
.URL Decode Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to decode
|
||||
| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"urldecode": {
|
||||
"field": "my_url_to_decode"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[delete-license]]
|
||||
=== Delete License API
|
||||
=== Delete license API
|
||||
++++
|
||||
<titleabbrev>Delete license</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to delete licensing information.
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[get-basic-status]]
|
||||
=== Get Basic Status API
|
||||
=== Get basic status API
|
||||
++++
|
||||
<titleabbrev>Get basic status</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to check the status of your basic license.
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[get-license]]
|
||||
=== Get License API
|
||||
=== Get license API
|
||||
++++
|
||||
<titleabbrev>Get license</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve licensing information.
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[get-trial-status]]
|
||||
=== Get Trial Status API
|
||||
=== Get trial status API
|
||||
++++
|
||||
<titleabbrev>Get trial status</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to check the status of your trial license.
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[start-basic]]
|
||||
=== Start Basic API
|
||||
=== Start basic API
|
||||
++++
|
||||
<titleabbrev>Start basic</titleabbrev>
|
||||
++++
|
||||
|
||||
This API starts an indefinite basic license.
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[start-trial]]
|
||||
=== Start Trial API
|
||||
=== Start trial API
|
||||
++++
|
||||
<titleabbrev>Start trial</titleabbrev>
|
||||
++++
|
||||
|
||||
This API starts a 30-day trial license.
|
||||
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[update-license]]
|
||||
=== Update License API
|
||||
=== Update license API
|
||||
++++
|
||||
<titleabbrev>Update license</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to update your license.
|
||||
|
||||
|
@ -40,12 +43,12 @@ https://www.elastic.co/subscriptions.
|
|||
[float]
|
||||
==== Authorization
|
||||
|
||||
If {security} is enabled, you need `manage` cluster privileges to install the
|
||||
license.
|
||||
If {es} {security-features} are enabled, you need `manage` cluster privileges to
|
||||
install the license.
|
||||
|
||||
If {security} is enabled and you are installing a gold or platinum license, you
|
||||
must enable TLS on the transport networking layer before you install the license.
|
||||
See <<configuring-tls>>.
|
||||
If {es} {security-features} are enabled and you are installing a gold or platinum
|
||||
license, you must enable TLS on the transport networking layer before you
|
||||
install the license. See <<configuring-tls>>.
|
||||
|
||||
[float]
|
||||
==== Examples
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[migration-api-assistance]]
|
||||
=== Migration Assistance API
|
||||
=== Migration assistance API
|
||||
++++
|
||||
<titleabbrev>Migration assistance</titleabbrev>
|
||||
++++
|
||||
|
||||
The Migration Assistance API analyzes existing indices in the cluster and
|
||||
returns the information about indices that require some changes before the
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[migration-api-deprecation]]
|
||||
=== Deprecation Info APIs
|
||||
=== Deprecation info APIs
|
||||
++++
|
||||
<titleabbrev>Deprecation info</titleabbrev>
|
||||
++++
|
||||
|
||||
The deprecation API is to be used to retrieve information about different
|
||||
cluster, node, and index level settings that use deprecated features that will
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[migration-api-upgrade]]
|
||||
=== Migration Upgrade API
|
||||
=== Migration upgrade API
|
||||
++++
|
||||
<titleabbrev>Migration upgrade</titleabbrev>
|
||||
++++
|
||||
|
||||
The Migration Upgrade API performs the upgrade of internal indices to make them
|
||||
compatible with the next major version.
|
||||
|
|
|
@ -11,6 +11,7 @@ See also <<release-highlights>> and <<es-release-notes>>.
|
|||
|
||||
* <<breaking_70_aggregations_changes>>
|
||||
* <<breaking_70_cluster_changes>>
|
||||
* <<breaking_70_discovery_changes>>
|
||||
* <<breaking_70_indices_changes>>
|
||||
* <<breaking_70_mappings_changes>>
|
||||
* <<breaking_70_search_changes>>
|
||||
|
@ -44,6 +45,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x.
|
|||
include::migrate_7_0/aggregations.asciidoc[]
|
||||
include::migrate_7_0/analysis.asciidoc[]
|
||||
include::migrate_7_0/cluster.asciidoc[]
|
||||
include::migrate_7_0/discovery.asciidoc[]
|
||||
include::migrate_7_0/indices.asciidoc[]
|
||||
include::migrate_7_0/mappings.asciidoc[]
|
||||
include::migrate_7_0/search.asciidoc[]
|
||||
|
|
|
@ -88,10 +88,10 @@ When putting stored scripts, support for storing them with the deprecated `templ
|
|||
now removed. Scripts must be stored using the `script` context as mentioned in the documentation.
|
||||
|
||||
[float]
|
||||
==== Get Aliases API limitations when {security} is enabled removed
|
||||
==== Removed Get Aliases API limitations when {security-features} are enabled
|
||||
|
||||
The behavior and response codes of the get aliases API no longer vary
|
||||
depending on whether {security} is enabled. Previously a
|
||||
depending on whether {security-features} are enabled. Previously a
|
||||
404 - NOT FOUND (IndexNotFoundException) could be returned in case the
|
||||
current user was not authorized for any alias. An empty response with
|
||||
status 200 - OK is now returned instead at all times.
|
||||
|
|
|
@ -25,12 +25,3 @@ Clusters now have soft limits on the total number of open shards in the cluster
|
|||
based on the number of nodes and the `cluster.max_shards_per_node` cluster
|
||||
setting, to prevent accidental operations that would destabilize the cluster.
|
||||
More information can be found in the <<misc-cluster,documentation for that setting>>.
|
||||
|
||||
[float]
|
||||
==== Discovery configuration is required in production
|
||||
Production deployments of Elasticsearch now require at least one of the following settings
|
||||
to be specified in the `elasticsearch.yml` configuration file:
|
||||
|
||||
- `discovery.zen.ping.unicast.hosts`
|
||||
- `discovery.zen.hosts_provider`
|
||||
- `cluster.initial_master_nodes`
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
[float]
|
||||
[[breaking_70_discovery_changes]]
|
||||
=== Discovery changes
|
||||
|
||||
[float]
|
||||
==== Cluster bootstrapping is required if discovery is configured
|
||||
|
||||
The first time a cluster is started, `cluster.initial_master_nodes` must be set
|
||||
to perform cluster bootstrapping. It should contain the names of the
|
||||
master-eligible nodes in the initial cluster and be defined on every
|
||||
master-eligible node in the cluster. See <<discovery-settings,the discovery
|
||||
settings summary>> for an example, and the
|
||||
<<modules-discovery-bootstrap-cluster,cluster bootstrapping reference
|
||||
documentation>> describes this setting in more detail.
|
||||
|
||||
The `discovery.zen.minimum_master_nodes` setting is required during a rolling
|
||||
upgrade from 6.x, but can be removed in all other circumstances.
|
||||
|
||||
[float]
|
||||
==== Removing master-eligible nodes sometimes requires voting exclusions
|
||||
|
||||
If you wish to remove half or more of the master-eligible nodes from a cluster,
|
||||
you must first exclude the affected nodes from the voting configuration using
|
||||
the <<modules-discovery-adding-removing-nodes,voting config exclusions API>>.
|
||||
If you remove fewer than half of the master-eligible nodes at the same time,
|
||||
voting exclusions are not required. If you remove only master-ineligible nodes
|
||||
such as data-only nodes or coordinating-only nodes, voting exclusions are not
|
||||
required. Likewise, if you add nodes to the cluster, voting exclusions are not
|
||||
required.
|
||||
|
||||
[float]
|
||||
==== Discovery configuration is required in production
|
||||
|
||||
Production deployments of Elasticsearch now require at least one of the
|
||||
following settings to be specified in the `elasticsearch.yml` configuration
|
||||
file:
|
||||
|
||||
- `discovery.zen.ping.unicast.hosts`
|
||||
- `discovery.zen.hosts_provider`
|
||||
- `cluster.initial_master_nodes`
|
|
@ -54,13 +54,17 @@ An error will now be thrown when unknown configuration options are provided
|
|||
to similarities. Such unknown parameters were ignored before.
|
||||
|
||||
[float]
|
||||
==== deprecated `geo_shape` Prefix Tree indexing
|
||||
==== Changed default `geo_shape` indexing strategy
|
||||
|
||||
`geo_shape` types now default to using a vector indexing approach based on Lucene's new
|
||||
`LatLonShape` field type. This indexes shapes as a triangular mesh instead of decomposing
|
||||
them into individual grid cells. To index using legacy prefix trees `recursive` or `term`
|
||||
strategy must be explicitly defined. Note that these strategies are now deprecated and will
|
||||
be removed in a future version.
|
||||
them into individual grid cells. To index using legacy prefix trees the `tree` parameter
|
||||
must be explicitly set to one of `quadtree` or `geohash`. Note that these strategies are
|
||||
now deprecated and will be removed in a future version.
|
||||
|
||||
IMPORTANT NOTE: If using timed index creation from templates, the `geo_shape` mapping
|
||||
should also be changed in the template to explicitly define `tree` to one of `geohash`
|
||||
or `quadtree`. This will ensure compatibility with previously created indexes.
|
||||
|
||||
[float]
|
||||
==== deprecated `geo_shape` parameters
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-calendar-resource]]
|
||||
=== Calendar Resources
|
||||
=== Calendar resources
|
||||
|
||||
A calendar resource has the following properties:
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-close-job]]
|
||||
=== Close Jobs API
|
||||
=== Close jobs API
|
||||
++++
|
||||
<titleabbrev>Close Jobs</titleabbrev>
|
||||
<titleabbrev>Close jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
Closes one or more jobs.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-datafeed-resource]]
|
||||
=== {dfeed-cap} Resources
|
||||
=== {dfeed-cap} resources
|
||||
|
||||
A {dfeed} resource has the following properties:
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-calendar-event]]
|
||||
=== Delete Events from Calendar API
|
||||
=== Delete events from calendar API
|
||||
++++
|
||||
<titleabbrev>Delete Events from Calendar</titleabbrev>
|
||||
<titleabbrev>Delete events from calendar</titleabbrev>
|
||||
++++
|
||||
|
||||
Deletes scheduled events from a calendar.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-calendar-job]]
|
||||
=== Delete Jobs from Calendar API
|
||||
=== Delete jobs from calendar API
|
||||
++++
|
||||
<titleabbrev>Delete Jobs from Calendar</titleabbrev>
|
||||
<titleabbrev>Delete jobs from calendar</titleabbrev>
|
||||
++++
|
||||
|
||||
Deletes jobs from a calendar.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-calendar]]
|
||||
=== Delete Calendar API
|
||||
=== Delete calendar API
|
||||
++++
|
||||
<titleabbrev>Delete Calendar</titleabbrev>
|
||||
<titleabbrev>Delete calendar</titleabbrev>
|
||||
++++
|
||||
|
||||
Deletes a calendar.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-datafeed]]
|
||||
=== Delete {dfeeds-cap} API
|
||||
=== Delete {dfeeds} API
|
||||
++++
|
||||
<titleabbrev>Delete {dfeeds-cap}</titleabbrev>
|
||||
<titleabbrev>Delete {dfeeds}</titleabbrev>
|
||||
++++
|
||||
|
||||
Deletes an existing {dfeed}.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-expired-data]]
|
||||
=== Delete Expired Data API
|
||||
=== Delete expired data API
|
||||
++++
|
||||
<titleabbrev>Delete Expired Data</titleabbrev>
|
||||
<titleabbrev>Delete expired data</titleabbrev>
|
||||
++++
|
||||
|
||||
Deletes expired and unused machine learning data.
|
||||
|
@ -14,9 +14,9 @@ Deletes expired and unused machine learning data.
|
|||
|
||||
==== Description
|
||||
|
||||
Deletes all job results, model snapshots and forecast data that have exceeded their
|
||||
`retention days` period.
|
||||
Machine Learning state documents that are not associated with any job are also deleted.
|
||||
Deletes all job results, model snapshots and forecast data that have exceeded
|
||||
their `retention days` period. Machine learning state documents that are not
|
||||
associated with any job are also deleted.
|
||||
|
||||
==== Authorization
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-filter]]
|
||||
=== Delete Filter API
|
||||
=== Delete filter API
|
||||
++++
|
||||
<titleabbrev>Delete Filter</titleabbrev>
|
||||
<titleabbrev>Delete filter</titleabbrev>
|
||||
++++
|
||||
|
||||
Deletes a filter.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-forecast]]
|
||||
=== Delete Forecast API
|
||||
=== Delete forecast API
|
||||
++++
|
||||
<titleabbrev>Delete Forecast</titleabbrev>
|
||||
<titleabbrev>Delete forecast</titleabbrev>
|
||||
++++
|
||||
|
||||
Deletes forecasts from a {ml} job.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ml-delete-job]]
|
||||
=== Delete Jobs API
|
||||
=== Delete jobs API
|
||||
++++
|
||||
<titleabbrev>Delete Jobs</titleabbrev>
|
||||
<titleabbrev>Delete jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
Deletes an existing anomaly detection job.
|
||||
|
@ -19,9 +19,9 @@ Deletes an existing anomaly detection job.
|
|||
All job configuration, model state and results are deleted.
|
||||
|
||||
IMPORTANT: Deleting a job must be done via this API only. Do not delete the
|
||||
job directly from the `.ml-*` indices using the Elasticsearch
|
||||
DELETE Document API. When {security} is enabled, make sure no `write`
|
||||
privileges are granted to anyone over the `.ml-*` indices.
|
||||
job directly from the `.ml-*` indices using the Elasticsearch delete document
|
||||
API. When {es} {security-features} are enabled, make sure no `write` privileges
|
||||
are granted to anyone over the `.ml-*` indices.
|
||||
|
||||
Before you can delete a job, you must delete the {dfeeds} that are associated
|
||||
with it. See <<ml-delete-datafeed,Delete {dfeeds-cap}>>. Unless the `force` parameter
|
||||
|
@ -47,8 +47,9 @@ separated list.
|
|||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
If {es} {security-features} are enabled, you must have `manage_ml`, or `manage`
|
||||
cluster privileges to use this API.
|
||||
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
==== Examples
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue