Merge branch 'master' into ccr
* master: [DOCS] Fix formatting error in Slack action Painless: Fix documentation links to use existing refs (#32335) Painless: Decouple PainlessLookupBuilder and Whitelists (#32346) [DOCS] Adds recommendation for xpack.security.enabled (#32345) [TEST] Mute ConvertProcessortTests.testConvertIntHexError [TEST] Fix failure due to exception message in java11 (#32321) [DOCS] Fixes typo in ML aggregations page [DOCS] Adds link from bucket_span property to common time units [ML][DOCS] Add documentation for detector rules and filters (#32013) Add opaque_id to index audit logging (#32260) Add 6.5.0 version to master fixes broken build for third-party-tests (#32353)
This commit is contained in:
commit
caefed4d33
|
@ -1,9 +1,6 @@
|
||||||
[[painless-contexts]]
|
[[painless-contexts]]
|
||||||
== Painless contexts
|
== Painless contexts
|
||||||
|
|
||||||
:es_version: https://www.elastic.co/guide/en/elasticsearch/reference/master
|
|
||||||
:xp_version: https://www.elastic.co/guide/en/x-pack/current
|
|
||||||
|
|
||||||
A Painless script is evaluated within a context. Each context has values that
|
A Painless script is evaluated within a context. Each context has values that
|
||||||
are available as local variables, a whitelist that controls the available
|
are available as local variables, a whitelist that controls the available
|
||||||
classes, and the methods and fields within those classes (API), and
|
classes, and the methods and fields within those classes (API), and
|
||||||
|
@ -18,41 +15,41 @@ specialized code may define new ways to use a Painless script.
|
||||||
| Name | Painless Documentation
|
| Name | Painless Documentation
|
||||||
| Elasticsearch Documentation
|
| Elasticsearch Documentation
|
||||||
| Update | <<painless-update-context, Painless Documentation>>
|
| Update | <<painless-update-context, Painless Documentation>>
|
||||||
| {es_version}/docs-update.html[Elasticsearch Documentation]
|
| {ref}/docs-update.html[Elasticsearch Documentation]
|
||||||
| Update by query | <<painless-update-by-query-context, Painless Documentation>>
|
| Update by query | <<painless-update-by-query-context, Painless Documentation>>
|
||||||
| {es_version}/docs-update-by-query.html[Elasticsearch Documentation]
|
| {ref}/docs-update-by-query.html[Elasticsearch Documentation]
|
||||||
| Reindex | <<painless-reindex-context, Painless Documentation>>
|
| Reindex | <<painless-reindex-context, Painless Documentation>>
|
||||||
| {es_version}/docs-reindex.html[Elasticsearch Documentation]
|
| {ref}/docs-reindex.html[Elasticsearch Documentation]
|
||||||
| Sort | <<painless-sort-context, Painless Documentation>>
|
| Sort | <<painless-sort-context, Painless Documentation>>
|
||||||
| {es_version}/search-request-sort.html[Elasticsearch Documentation]
|
| {ref}/search-request-sort.html[Elasticsearch Documentation]
|
||||||
| Similarity | <<painless-similarity-context, Painless Documentation>>
|
| Similarity | <<painless-similarity-context, Painless Documentation>>
|
||||||
| {es_version}/index-modules-similarity.html[Elasticsearch Documentation]
|
| {ref}/index-modules-similarity.html[Elasticsearch Documentation]
|
||||||
| Weight | <<painless-similarity-context, Painless Documentation>>
|
| Weight | <<painless-weight-context, Painless Documentation>>
|
||||||
| {es_version}/index-modules-similarity.html[Elasticsearch Documentation]
|
| {ref}/index-modules-similarity.html[Elasticsearch Documentation]
|
||||||
| Score | <<painless-score-context, Painless Documentation>>
|
| Score | <<painless-score-context, Painless Documentation>>
|
||||||
| {es_version}/query-dsl-function-score-query.html[Elasticsearch Documentation]
|
| {ref}/query-dsl-function-score-query.html[Elasticsearch Documentation]
|
||||||
| Field | <<painless-field-context, Painless Documentation>>
|
| Field | <<painless-field-context, Painless Documentation>>
|
||||||
| {es_version}/search-request-script-fields.html[Elasticsearch Documentation]
|
| {ref}/search-request-script-fields.html[Elasticsearch Documentation]
|
||||||
| Filter | <<painless-filter-context, Painless Documentation>>
|
| Filter | <<painless-filter-context, Painless Documentation>>
|
||||||
| {es_version}/query-dsl-script-query.html[Elasticsearch Documentation]
|
| {ref}/query-dsl-script-query.html[Elasticsearch Documentation]
|
||||||
| Minimum should match | <<painless-min-should-match-context, Painless Documentation>>
|
| Minimum should match | <<painless-min-should-match-context, Painless Documentation>>
|
||||||
| {es_version}/query-dsl-terms-set-query.html[Elasticsearch Documentation]
|
| {ref}/query-dsl-terms-set-query.html[Elasticsearch Documentation]
|
||||||
| Metric aggregation initialization | <<painless-metric-agg-init-context, Painless Documentation>>
|
| Metric aggregation initialization | <<painless-metric-agg-init-context, Painless Documentation>>
|
||||||
| {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation]
|
| {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation]
|
||||||
| Metric aggregation map | <<painless-metric-agg-map-context, Painless Documentation>>
|
| Metric aggregation map | <<painless-metric-agg-map-context, Painless Documentation>>
|
||||||
| {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation]
|
| {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation]
|
||||||
| Metric aggregation combine | <<painless-metric-agg-combine-context, Painless Documentation>>
|
| Metric aggregation combine | <<painless-metric-agg-combine-context, Painless Documentation>>
|
||||||
| {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation]
|
| {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation]
|
||||||
| Metric aggregation reduce | <<painless-metric-agg-reduce-context, Painless Documentation>>
|
| Metric aggregation reduce | <<painless-metric-agg-reduce-context, Painless Documentation>>
|
||||||
| {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation]
|
| {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation]
|
||||||
| Bucket aggregation | <<painless-bucket-agg-context, Painless Documentation>>
|
| Bucket aggregation | <<painless-bucket-agg-context, Painless Documentation>>
|
||||||
| {es_version}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation]
|
| {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation]
|
||||||
| Ingest processor | <<painless-ingest-processor-context, Painless Documentation>>
|
| Ingest processor | <<painless-ingest-processor-context, Painless Documentation>>
|
||||||
| {es_version}/script-processor.html[Elasticsearch Documentation]
|
| {ref}/script-processor.html[Elasticsearch Documentation]
|
||||||
| Watcher condition | <<painless-watcher-condition-context, Painless Documentation>>
|
| Watcher condition | <<painless-watcher-condition-context, Painless Documentation>>
|
||||||
| {xp_version}/condition-script.html[Elasticsearch Documentation]
|
| {xpack-ref}/condition-script.html[Elasticsearch Documentation]
|
||||||
| Watcher transform | <<painless-watcher-transform-context, Painless Documentation>>
|
| Watcher transform | <<painless-watcher-transform-context, Painless Documentation>>
|
||||||
| {xp_version}/transform-script.html[Elasticsearch Documentation]
|
| {xpack-ref}/transform-script.html[Elasticsearch Documentation]
|
||||||
|====
|
|====
|
||||||
|
|
||||||
include::painless-contexts/index.asciidoc[]
|
include::painless-contexts/index.asciidoc[]
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Bucket aggregation context
|
=== Bucket aggregation context
|
||||||
|
|
||||||
Use a Painless script in an
|
Use a Painless script in an
|
||||||
{es_version}/search-aggregations-pipeline-bucket-script-aggregation.html[bucket aggregation]
|
{ref}/search-aggregations-pipeline-bucket-script-aggregation.html[bucket aggregation]
|
||||||
to calculate a value as a result in a bucket.
|
to calculate a value as a result in a bucket.
|
||||||
|
|
||||||
*Variables*
|
*Variables*
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Field context
|
=== Field context
|
||||||
|
|
||||||
Use a Painless script to create a
|
Use a Painless script to create a
|
||||||
{es_version}/search-request-script-fields.html[script field] to return
|
{ref}/search-request-script-fields.html[script field] to return
|
||||||
a customized value for each document in the results of a query.
|
a customized value for each document in the results of a query.
|
||||||
|
|
||||||
*Variables*
|
*Variables*
|
||||||
|
@ -14,7 +14,7 @@ a customized value for each document in the results of a query.
|
||||||
Contains the fields of the specified document where each field is a
|
Contains the fields of the specified document where each field is a
|
||||||
`List` of values.
|
`List` of values.
|
||||||
|
|
||||||
{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`)::
|
{ref}/mapping-source-field.html[`ctx['_source']`] (`Map`)::
|
||||||
Contains extracted JSON in a `Map` and `List` structure for the fields
|
Contains extracted JSON in a `Map` and `List` structure for the fields
|
||||||
existing in a stored document.
|
existing in a stored document.
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[[painless-filter-context]]
|
[[painless-filter-context]]
|
||||||
=== Filter context
|
=== Filter context
|
||||||
|
|
||||||
Use a Painless script as a {es_version}/query-dsl-script-query.html[filter] in a
|
Use a Painless script as a {ref}/query-dsl-script-query.html[filter] in a
|
||||||
query to include and exclude documents.
|
query to include and exclude documents.
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[[painless-ingest-processor-context]]
|
[[painless-ingest-processor-context]]
|
||||||
=== Ingest processor context
|
=== Ingest processor context
|
||||||
|
|
||||||
Use a Painless script in an {es_version}/script-processor.html[ingest processor]
|
Use a Painless script in an {ref}/script-processor.html[ingest processor]
|
||||||
to modify documents upon insertion.
|
to modify documents upon insertion.
|
||||||
|
|
||||||
*Variables*
|
*Variables*
|
||||||
|
@ -9,10 +9,10 @@ to modify documents upon insertion.
|
||||||
`params` (`Map`, read-only)::
|
`params` (`Map`, read-only)::
|
||||||
User-defined parameters passed in as part of the query.
|
User-defined parameters passed in as part of the query.
|
||||||
|
|
||||||
{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`)::
|
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`)::
|
||||||
The name of the index.
|
The name of the index.
|
||||||
|
|
||||||
{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`)::
|
{ref}/mapping-type-field.html[`ctx['_type']`] (`String`)::
|
||||||
The type of document within an index.
|
The type of document within an index.
|
||||||
|
|
||||||
`ctx` (`Map`)::
|
`ctx` (`Map`)::
|
||||||
|
@ -21,10 +21,10 @@ to modify documents upon insertion.
|
||||||
|
|
||||||
*Side Effects*
|
*Side Effects*
|
||||||
|
|
||||||
{es_version}/mapping-index-field.html[`ctx['_index']`]::
|
{ref}/mapping-index-field.html[`ctx['_index']`]::
|
||||||
Modify this to change the destination index for the current document.
|
Modify this to change the destination index for the current document.
|
||||||
|
|
||||||
{es_version}/mapping-type-field.html[`ctx['_type']`]::
|
{ref}/mapping-type-field.html[`ctx['_type']`]::
|
||||||
Modify this to change the type for the current document.
|
Modify this to change the type for the current document.
|
||||||
|
|
||||||
`ctx` (`Map`, read-only)::
|
`ctx` (`Map`, read-only)::
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Metric aggregation combine context
|
=== Metric aggregation combine context
|
||||||
|
|
||||||
Use a Painless script to
|
Use a Painless script to
|
||||||
{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[combine]
|
{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[combine]
|
||||||
values for use in a scripted metric aggregation. A combine script is run once
|
values for use in a scripted metric aggregation. A combine script is run once
|
||||||
per shard following a <<painless-metric-agg-map-context, map script>> and is
|
per shard following a <<painless-metric-agg-map-context, map script>> and is
|
||||||
optional as part of a full metric aggregation.
|
optional as part of a full metric aggregation.
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Metric aggregation initialization context
|
=== Metric aggregation initialization context
|
||||||
|
|
||||||
Use a Painless script to
|
Use a Painless script to
|
||||||
{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[initialize]
|
{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[initialize]
|
||||||
values for use in a scripted metric aggregation. An initialization script is
|
values for use in a scripted metric aggregation. An initialization script is
|
||||||
run prior to document collection once per shard and is optional as part of the
|
run prior to document collection once per shard and is optional as part of the
|
||||||
full metric aggregation.
|
full metric aggregation.
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Metric aggregation map context
|
=== Metric aggregation map context
|
||||||
|
|
||||||
Use a Painless script to
|
Use a Painless script to
|
||||||
{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[map]
|
{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[map]
|
||||||
values for use in a scripted metric aggregation. A map script is run once per
|
values for use in a scripted metric aggregation. A map script is run once per
|
||||||
collected document following an optional
|
collected document following an optional
|
||||||
<<painless-metric-agg-init-context, initialization script>> and is required as
|
<<painless-metric-agg-init-context, initialization script>> and is required as
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Metric aggregation reduce context
|
=== Metric aggregation reduce context
|
||||||
|
|
||||||
Use a Painless script to
|
Use a Painless script to
|
||||||
{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[reduce]
|
{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[reduce]
|
||||||
values to produce the result of a scripted metric aggregation. A reduce script
|
values to produce the result of a scripted metric aggregation. A reduce script
|
||||||
is run once on the coordinating node following a
|
is run once on the coordinating node following a
|
||||||
<<painless-metric-agg-combine-context, combine script>> (or a
|
<<painless-metric-agg-combine-context, combine script>> (or a
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Minimum should match context
|
=== Minimum should match context
|
||||||
|
|
||||||
Use a Painless script to specify the
|
Use a Painless script to specify the
|
||||||
{es_version}/query-dsl-terms-set-query.html[minimum] number of terms that a
|
{ref}/query-dsl-terms-set-query.html[minimum] number of terms that a
|
||||||
specified field needs to match with for a document to be part of the query
|
specified field needs to match with for a document to be part of the query
|
||||||
results.
|
results.
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[[painless-reindex-context]]
|
[[painless-reindex-context]]
|
||||||
=== Reindex context
|
=== Reindex context
|
||||||
|
|
||||||
Use a Painless script in a {es_version}/docs-reindex.html[reindex] operation to
|
Use a Painless script in a {ref}/docs-reindex.html[reindex] operation to
|
||||||
add, modify, or delete fields within each document in an original index as its
|
add, modify, or delete fields within each document in an original index as its
|
||||||
reindexed into a target index.
|
reindexed into a target index.
|
||||||
|
|
||||||
|
@ -13,22 +13,22 @@ reindexed into a target index.
|
||||||
`ctx['_op']` (`String`)::
|
`ctx['_op']` (`String`)::
|
||||||
The name of the operation.
|
The name of the operation.
|
||||||
|
|
||||||
{es_version}/mapping-routing-field.html[`ctx['_routing']`] (`String`)::
|
{ref}/mapping-routing-field.html[`ctx['_routing']`] (`String`)::
|
||||||
The value used to select a shard for document storage.
|
The value used to select a shard for document storage.
|
||||||
|
|
||||||
{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`)::
|
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`)::
|
||||||
The name of the index.
|
The name of the index.
|
||||||
|
|
||||||
{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`)::
|
{ref}/mapping-type-field.html[`ctx['_type']`] (`String`)::
|
||||||
The type of document within an index.
|
The type of document within an index.
|
||||||
|
|
||||||
{es_version}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
|
{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
|
||||||
The unique document id.
|
The unique document id.
|
||||||
|
|
||||||
`ctx['_version']` (`int`)::
|
`ctx['_version']` (`int`)::
|
||||||
The current version of the document.
|
The current version of the document.
|
||||||
|
|
||||||
{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`)::
|
{ref}/mapping-source-field.html[`ctx['_source']`] (`Map`)::
|
||||||
Contains extracted JSON in a `Map` and `List` structure for the fields
|
Contains extracted JSON in a `Map` and `List` structure for the fields
|
||||||
existing in a stored document.
|
existing in a stored document.
|
||||||
|
|
||||||
|
@ -39,22 +39,22 @@ reindexed into a target index.
|
||||||
specify no operation or `delete` to delete the current document from
|
specify no operation or `delete` to delete the current document from
|
||||||
the index.
|
the index.
|
||||||
|
|
||||||
{es_version}/mapping-routing-field.html[`ctx['_routing']`]::
|
{ref}/mapping-routing-field.html[`ctx['_routing']`]::
|
||||||
Modify this to change the routing value for the current document.
|
Modify this to change the routing value for the current document.
|
||||||
|
|
||||||
{es_version}/mapping-index-field.html[`ctx['_index']`]::
|
{ref}/mapping-index-field.html[`ctx['_index']`]::
|
||||||
Modify this to change the destination index for the current document.
|
Modify this to change the destination index for the current document.
|
||||||
|
|
||||||
{es_version}/mapping-type-field.html[`ctx['_type']`]::
|
{ref}/mapping-type-field.html[`ctx['_type']`]::
|
||||||
Modify this to change the type for the current document.
|
Modify this to change the type for the current document.
|
||||||
|
|
||||||
{es_version}/mapping-id-field.html[`ctx['_id']`]::
|
{ref}/mapping-id-field.html[`ctx['_id']`]::
|
||||||
Modify this to change the id for the current document.
|
Modify this to change the id for the current document.
|
||||||
|
|
||||||
`ctx['_version']` (`int`)::
|
`ctx['_version']` (`int`)::
|
||||||
Modify this to modify the version for the current document.
|
Modify this to modify the version for the current document.
|
||||||
|
|
||||||
{es_version}/mapping-source-field.html[`ctx['_source']`]::
|
{ref}/mapping-source-field.html[`ctx['_source']`]::
|
||||||
Modify the values in the `Map/List` structure to add, modify, or delete
|
Modify the values in the `Map/List` structure to add, modify, or delete
|
||||||
the fields of a document.
|
the fields of a document.
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Score context
|
=== Score context
|
||||||
|
|
||||||
Use a Painless script in a
|
Use a Painless script in a
|
||||||
{es_version}/query-dsl-function-score-query.html[function score] to apply a new
|
{ref}/query-dsl-function-score-query.html[function score] to apply a new
|
||||||
score to documents returned from a query.
|
score to documents returned from a query.
|
||||||
|
|
||||||
*Variables*
|
*Variables*
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Similarity context
|
=== Similarity context
|
||||||
|
|
||||||
Use a Painless script to create a
|
Use a Painless script to create a
|
||||||
{es_version}/index-modules-similarity.html[similarity] equation for scoring
|
{ref}/index-modules-similarity.html[similarity] equation for scoring
|
||||||
documents in a query.
|
documents in a query.
|
||||||
|
|
||||||
*Variables*
|
*Variables*
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Sort context
|
=== Sort context
|
||||||
|
|
||||||
Use a Painless script to
|
Use a Painless script to
|
||||||
{es_version}/search-request-sort.html[sort] the documents in a query.
|
{ref}/search-request-sort.html[sort] the documents in a query.
|
||||||
|
|
||||||
*Variables*
|
*Variables*
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Update by query context
|
=== Update by query context
|
||||||
|
|
||||||
Use a Painless script in an
|
Use a Painless script in an
|
||||||
{es_version}/docs-update-by-query.html[update by query] operation to add,
|
{ref}/docs-update-by-query.html[update by query] operation to add,
|
||||||
modify, or delete fields within each of a set of documents collected as the
|
modify, or delete fields within each of a set of documents collected as the
|
||||||
result of query.
|
result of query.
|
||||||
|
|
||||||
|
@ -14,22 +14,22 @@ result of query.
|
||||||
`ctx['_op']` (`String`)::
|
`ctx['_op']` (`String`)::
|
||||||
The name of the operation.
|
The name of the operation.
|
||||||
|
|
||||||
{es_version}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only)::
|
{ref}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only)::
|
||||||
The value used to select a shard for document storage.
|
The value used to select a shard for document storage.
|
||||||
|
|
||||||
{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only)::
|
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only)::
|
||||||
The name of the index.
|
The name of the index.
|
||||||
|
|
||||||
{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only)::
|
{ref}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only)::
|
||||||
The type of document within an index.
|
The type of document within an index.
|
||||||
|
|
||||||
{es_version}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
|
{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
|
||||||
The unique document id.
|
The unique document id.
|
||||||
|
|
||||||
`ctx['_version']` (`int`, read-only)::
|
`ctx['_version']` (`int`, read-only)::
|
||||||
The current version of the document.
|
The current version of the document.
|
||||||
|
|
||||||
{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`)::
|
{ref}/mapping-source-field.html[`ctx['_source']`] (`Map`)::
|
||||||
Contains extracted JSON in a `Map` and `List` structure for the fields
|
Contains extracted JSON in a `Map` and `List` structure for the fields
|
||||||
existing in a stored document.
|
existing in a stored document.
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ result of query.
|
||||||
specify no operation or `delete` to delete the current document from
|
specify no operation or `delete` to delete the current document from
|
||||||
the index.
|
the index.
|
||||||
|
|
||||||
{es_version}/mapping-source-field.html[`ctx['_source']`]::
|
{ref}/mapping-source-field.html[`ctx['_source']`]::
|
||||||
Modify the values in the `Map/List` structure to add, modify, or delete
|
Modify the values in the `Map/List` structure to add, modify, or delete
|
||||||
the fields of a document.
|
the fields of a document.
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[[painless-update-context]]
|
[[painless-update-context]]
|
||||||
=== Update context
|
=== Update context
|
||||||
|
|
||||||
Use a Painless script in an {es_version}/docs-update.html[update] operation to
|
Use a Painless script in an {ref}/docs-update.html[update] operation to
|
||||||
add, modify, or delete fields within a single document.
|
add, modify, or delete fields within a single document.
|
||||||
|
|
||||||
*Variables*
|
*Variables*
|
||||||
|
@ -12,16 +12,16 @@ add, modify, or delete fields within a single document.
|
||||||
`ctx['_op']` (`String`)::
|
`ctx['_op']` (`String`)::
|
||||||
The name of the operation.
|
The name of the operation.
|
||||||
|
|
||||||
{es_version}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only)::
|
{ref}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only)::
|
||||||
The value used to select a shard for document storage.
|
The value used to select a shard for document storage.
|
||||||
|
|
||||||
{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only)::
|
{ref}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only)::
|
||||||
The name of the index.
|
The name of the index.
|
||||||
|
|
||||||
{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only)::
|
{ref}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only)::
|
||||||
The type of document within an index.
|
The type of document within an index.
|
||||||
|
|
||||||
{es_version}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
|
{ref}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only)::
|
||||||
The unique document id.
|
The unique document id.
|
||||||
|
|
||||||
`ctx['_version']` (`int`, read-only)::
|
`ctx['_version']` (`int`, read-only)::
|
||||||
|
@ -30,7 +30,7 @@ add, modify, or delete fields within a single document.
|
||||||
`ctx['_now']` (`long`, read-only)::
|
`ctx['_now']` (`long`, read-only)::
|
||||||
The current timestamp in milliseconds.
|
The current timestamp in milliseconds.
|
||||||
|
|
||||||
{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`)::
|
{ref}/mapping-source-field.html[`ctx['_source']`] (`Map`)::
|
||||||
Contains extracted JSON in a `Map` and `List` structure for the fields
|
Contains extracted JSON in a `Map` and `List` structure for the fields
|
||||||
existing in a stored document.
|
existing in a stored document.
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ add, modify, or delete fields within a single document.
|
||||||
specify no operation or `delete` to delete the current document from
|
specify no operation or `delete` to delete the current document from
|
||||||
the index.
|
the index.
|
||||||
|
|
||||||
{es_version}/mapping-source-field.html[`ctx['_source']`]::
|
{ref}/mapping-source-field.html[`ctx['_source']`]::
|
||||||
Modify the values in the `Map/List` structure to add, modify, or delete
|
Modify the values in the `Map/List` structure to add, modify, or delete
|
||||||
the fields of a document.
|
the fields of a document.
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[[painless-watcher-condition-context]]
|
[[painless-watcher-condition-context]]
|
||||||
=== Watcher condition context
|
=== Watcher condition context
|
||||||
|
|
||||||
Use a Painless script as a {xp_version}/condition-script.html[watcher condition]
|
Use a Painless script as a {xpack-ref}/condition-script.html[watcher condition]
|
||||||
to test if a response is necessary.
|
to test if a response is necessary.
|
||||||
|
|
||||||
*Variables*
|
*Variables*
|
||||||
|
@ -26,7 +26,7 @@ to test if a response is necessary.
|
||||||
|
|
||||||
`ctx['payload']` (`Map`, read-only)::
|
`ctx['payload']` (`Map`, read-only)::
|
||||||
The accessible watch data based upon the
|
The accessible watch data based upon the
|
||||||
{xp_version}/input.html[watch input].
|
{xpack-ref}/input.html[watch input].
|
||||||
|
|
||||||
*Return*
|
*Return*
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[[painless-watcher-transform-context]]
|
[[painless-watcher-transform-context]]
|
||||||
=== Watcher transform context
|
=== Watcher transform context
|
||||||
|
|
||||||
Use a Painless script to {xp_version}/transform-script.html[transform] watch
|
Use a Painless script to {xpack-ref}/transform-script.html[transform] watch
|
||||||
data into a new payload for use in a response to a condition.
|
data into a new payload for use in a response to a condition.
|
||||||
|
|
||||||
*Variables*
|
*Variables*
|
||||||
|
@ -26,7 +26,7 @@ data into a new payload for use in a response to a condition.
|
||||||
|
|
||||||
`ctx['payload']` (`Map`, read-only)::
|
`ctx['payload']` (`Map`, read-only)::
|
||||||
The accessible watch data based upon the
|
The accessible watch data based upon the
|
||||||
{xp_version}/input.html[watch input].
|
{xpack-ref}/input.html[watch input].
|
||||||
|
|
||||||
|
|
||||||
*Return*
|
*Return*
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Weight context
|
=== Weight context
|
||||||
|
|
||||||
Use a Painless script to create a
|
Use a Painless script to create a
|
||||||
{es_version}/index-modules-similarity.html[weight] for use in a
|
{ref}/index-modules-similarity.html[weight] for use in a
|
||||||
<<painless-similarity-context, similarity script>>. Weight is used to prevent
|
<<painless-similarity-context, similarity script>>. Weight is used to prevent
|
||||||
recalculation of constants that remain the same across documents.
|
recalculation of constants that remain the same across documents.
|
||||||
|
|
||||||
|
|
|
@ -27,11 +27,17 @@ For more information about creating and updating the {es} keystore, see
|
||||||
`xpack.security.enabled`::
|
`xpack.security.enabled`::
|
||||||
Set to `true` to enable {security} on the node. +
|
Set to `true` to enable {security} on the node. +
|
||||||
+
|
+
|
||||||
|
--
|
||||||
If set to `false`, which is the default value for basic and trial licenses,
|
If set to `false`, which is the default value for basic and trial licenses,
|
||||||
{security} is disabled. It also affects all {kib} instances that connect to this
|
{security} is disabled. It also affects all {kib} instances that connect to this
|
||||||
{es} instance; you do not need to disable {security} in those `kibana.yml` files.
|
{es} instance; you do not need to disable {security} in those `kibana.yml` files.
|
||||||
For more information about disabling {security} in specific {kib} instances, see {kibana-ref}/security-settings-kb.html[{kib} security settings].
|
For more information about disabling {security} in specific {kib} instances, see {kibana-ref}/security-settings-kb.html[{kib} security settings].
|
||||||
|
|
||||||
|
TIP: If you have gold or higher licenses, the default value is `true`; we
|
||||||
|
recommend that you explicitly add this setting to avoid confusion.
|
||||||
|
|
||||||
|
--
|
||||||
|
|
||||||
`xpack.security.hide_settings`::
|
`xpack.security.hide_settings`::
|
||||||
A comma-separated list of settings that are omitted from the results of the
|
A comma-separated list of settings that are omitted from the results of the
|
||||||
<<cluster-nodes-info,cluster nodes info API>>. You can use wildcards to include
|
<<cluster-nodes-info,cluster nodes info API>>. You can use wildcards to include
|
||||||
|
|
|
@ -67,6 +67,7 @@ public class ConvertProcessorTests extends ESTestCase {
|
||||||
assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(10));
|
assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(10));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32370")
|
||||||
public void testConvertIntHexError() {
|
public void testConvertIntHexError() {
|
||||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||||
String value = "0x" + randomAlphaOfLengthBetween(1, 10);
|
String value = "0x" + randomAlphaOfLengthBetween(1, 10);
|
||||||
|
|
|
@ -103,10 +103,10 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr
|
||||||
ScriptContext<?> context = entry.getKey();
|
ScriptContext<?> context = entry.getKey();
|
||||||
if (context.instanceClazz.equals(SearchScript.class) || context.instanceClazz.equals(ExecutableScript.class)) {
|
if (context.instanceClazz.equals(SearchScript.class) || context.instanceClazz.equals(ExecutableScript.class)) {
|
||||||
contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class,
|
contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class,
|
||||||
new PainlessLookupBuilder(entry.getValue()).build()));
|
PainlessLookupBuilder.buildFromWhitelists(entry.getValue())));
|
||||||
} else {
|
} else {
|
||||||
contextsToCompilers.put(context, new Compiler(context.instanceClazz,
|
contextsToCompilers.put(context, new Compiler(context.instanceClazz,
|
||||||
new PainlessLookupBuilder(entry.getValue()).build()));
|
PainlessLookupBuilder.buildFromWhitelists(entry.getValue())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -126,14 +126,55 @@ public class PainlessLookupBuilder {
|
||||||
private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$");
|
private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$");
|
||||||
private static final Pattern FIELD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$");
|
private static final Pattern FIELD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$");
|
||||||
|
|
||||||
private final List<Whitelist> whitelists;
|
public static PainlessLookup buildFromWhitelists(List<Whitelist> whitelists) {
|
||||||
|
PainlessLookupBuilder painlessLookupBuilder = new PainlessLookupBuilder();
|
||||||
|
String origin = "internal error";
|
||||||
|
|
||||||
|
try {
|
||||||
|
for (Whitelist whitelist : whitelists) {
|
||||||
|
for (WhitelistClass whitelistClass : whitelist.whitelistStructs) {
|
||||||
|
origin = whitelistClass.origin;
|
||||||
|
painlessLookupBuilder.addPainlessClass(
|
||||||
|
whitelist.javaClassLoader, whitelistClass.javaClassName, whitelistClass.onlyFQNJavaClassName == false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (Whitelist whitelist : whitelists) {
|
||||||
|
for (WhitelistClass whitelistClass : whitelist.whitelistStructs) {
|
||||||
|
String targetCanonicalClassName = whitelistClass.javaClassName.replace('$', '.');
|
||||||
|
|
||||||
|
for (WhitelistConstructor whitelistConstructor : whitelistClass.whitelistConstructors) {
|
||||||
|
origin = whitelistConstructor.origin;
|
||||||
|
painlessLookupBuilder.addPainlessConstructor(
|
||||||
|
targetCanonicalClassName, whitelistConstructor.painlessParameterTypeNames);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (WhitelistMethod whitelistMethod : whitelistClass.whitelistMethods) {
|
||||||
|
origin = whitelistMethod.origin;
|
||||||
|
painlessLookupBuilder.addPainlessMethod(
|
||||||
|
whitelist.javaClassLoader, targetCanonicalClassName, whitelistMethod.javaAugmentedClassName,
|
||||||
|
whitelistMethod.javaMethodName, whitelistMethod.painlessReturnTypeName,
|
||||||
|
whitelistMethod.painlessParameterTypeNames);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (WhitelistField whitelistField : whitelistClass.whitelistFields) {
|
||||||
|
origin = whitelistField.origin;
|
||||||
|
painlessLookupBuilder.addPainlessField(
|
||||||
|
targetCanonicalClassName, whitelistField.javaFieldName, whitelistField.painlessFieldTypeName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (Exception exception) {
|
||||||
|
throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception);
|
||||||
|
}
|
||||||
|
|
||||||
|
return painlessLookupBuilder.build();
|
||||||
|
}
|
||||||
|
|
||||||
private final Map<String, Class<?>> canonicalClassNamesToClasses;
|
private final Map<String, Class<?>> canonicalClassNamesToClasses;
|
||||||
private final Map<Class<?>, PainlessClassBuilder> classesToPainlessClassBuilders;
|
private final Map<Class<?>, PainlessClassBuilder> classesToPainlessClassBuilders;
|
||||||
|
|
||||||
public PainlessLookupBuilder(List<Whitelist> whitelists) {
|
public PainlessLookupBuilder() {
|
||||||
this.whitelists = whitelists;
|
|
||||||
|
|
||||||
canonicalClassNamesToClasses = new HashMap<>();
|
canonicalClassNamesToClasses = new HashMap<>();
|
||||||
classesToPainlessClassBuilders = new HashMap<>();
|
classesToPainlessClassBuilders = new HashMap<>();
|
||||||
|
|
||||||
|
@ -666,60 +707,6 @@ public class PainlessLookupBuilder {
|
||||||
}
|
}
|
||||||
|
|
||||||
public PainlessLookup build() {
|
public PainlessLookup build() {
|
||||||
String origin = "internal error";
|
|
||||||
|
|
||||||
try {
|
|
||||||
// first iteration collects all the Painless type names that
|
|
||||||
// are used for validation during the second iteration
|
|
||||||
for (Whitelist whitelist : whitelists) {
|
|
||||||
for (WhitelistClass whitelistStruct : whitelist.whitelistStructs) {
|
|
||||||
String painlessTypeName = whitelistStruct.javaClassName.replace('$', '.');
|
|
||||||
PainlessClassBuilder painlessStruct =
|
|
||||||
classesToPainlessClassBuilders.get(canonicalClassNamesToClasses.get(painlessTypeName));
|
|
||||||
|
|
||||||
if (painlessStruct != null && painlessStruct.clazz.getName().equals(whitelistStruct.javaClassName) == false) {
|
|
||||||
throw new IllegalArgumentException("struct [" + painlessStruct.name + "] cannot represent multiple classes " +
|
|
||||||
"[" + painlessStruct.clazz.getName() + "] and [" + whitelistStruct.javaClassName + "]");
|
|
||||||
}
|
|
||||||
|
|
||||||
origin = whitelistStruct.origin;
|
|
||||||
addPainlessClass(
|
|
||||||
whitelist.javaClassLoader, whitelistStruct.javaClassName, whitelistStruct.onlyFQNJavaClassName == false);
|
|
||||||
|
|
||||||
painlessStruct = classesToPainlessClassBuilders.get(canonicalClassNamesToClasses.get(painlessTypeName));
|
|
||||||
classesToPainlessClassBuilders.put(painlessStruct.clazz, painlessStruct);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// second iteration adds all the constructors, methods, and fields that will
|
|
||||||
// be available in Painless along with validating they exist and all their types have
|
|
||||||
// been white-listed during the first iteration
|
|
||||||
for (Whitelist whitelist : whitelists) {
|
|
||||||
for (WhitelistClass whitelistStruct : whitelist.whitelistStructs) {
|
|
||||||
String painlessTypeName = whitelistStruct.javaClassName.replace('$', '.');
|
|
||||||
|
|
||||||
for (WhitelistConstructor whitelistConstructor : whitelistStruct.whitelistConstructors) {
|
|
||||||
origin = whitelistConstructor.origin;
|
|
||||||
addPainlessConstructor(painlessTypeName, whitelistConstructor.painlessParameterTypeNames);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (WhitelistMethod whitelistMethod : whitelistStruct.whitelistMethods) {
|
|
||||||
origin = whitelistMethod.origin;
|
|
||||||
addPainlessMethod(whitelist.javaClassLoader, painlessTypeName, whitelistMethod.javaAugmentedClassName,
|
|
||||||
whitelistMethod.javaMethodName, whitelistMethod.painlessReturnTypeName,
|
|
||||||
whitelistMethod.painlessParameterTypeNames);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (WhitelistField whitelistField : whitelistStruct.whitelistFields) {
|
|
||||||
origin = whitelistField.origin;
|
|
||||||
addPainlessField(painlessTypeName, whitelistField.javaFieldName, whitelistField.painlessFieldTypeName);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (Exception exception) {
|
|
||||||
throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception);
|
|
||||||
}
|
|
||||||
|
|
||||||
copyPainlessClassMembers();
|
copyPainlessClassMembers();
|
||||||
cacheRuntimeHandles();
|
cacheRuntimeHandles();
|
||||||
setFunctionalInterfaceMethods();
|
setFunctionalInterfaceMethods();
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.painless;
|
package org.elasticsearch.painless;
|
||||||
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessCast;
|
import org.elasticsearch.painless.lookup.PainlessCast;
|
||||||
|
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
public class AnalyzerCasterTests extends ESTestCase {
|
public class AnalyzerCasterTests extends ESTestCase {
|
||||||
|
|
|
@ -19,14 +19,14 @@
|
||||||
|
|
||||||
package org.elasticsearch.painless;
|
package org.elasticsearch.painless;
|
||||||
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookup;
|
import org.elasticsearch.painless.lookup.PainlessLookup;
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupBuilder;
|
import org.elasticsearch.painless.lookup.PainlessLookupBuilder;
|
||||||
import org.elasticsearch.painless.spi.Whitelist;
|
import org.elasticsearch.painless.spi.Whitelist;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import static java.util.Collections.emptyMap;
|
import static java.util.Collections.emptyMap;
|
||||||
import static java.util.Collections.singletonMap;
|
import static java.util.Collections.singletonMap;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
@ -38,7 +38,7 @@ import static org.hamcrest.Matchers.startsWith;
|
||||||
*/
|
*/
|
||||||
public class BaseClassTests extends ScriptTestCase {
|
public class BaseClassTests extends ScriptTestCase {
|
||||||
|
|
||||||
private final PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build();
|
private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS);
|
||||||
|
|
||||||
public abstract static class Gets {
|
public abstract static class Gets {
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,9 @@ package org.elasticsearch.painless;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
@ -22,10 +25,6 @@ import java.util.Collections;
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
public class BasicStatementTests extends ScriptTestCase {
|
public class BasicStatementTests extends ScriptTestCase {
|
||||||
|
|
||||||
public void testIfStatement() {
|
public void testIfStatement() {
|
||||||
|
|
|
@ -37,7 +37,7 @@ import static org.hamcrest.Matchers.hasKey;
|
||||||
import static org.hamcrest.Matchers.not;
|
import static org.hamcrest.Matchers.not;
|
||||||
|
|
||||||
public class DebugTests extends ScriptTestCase {
|
public class DebugTests extends ScriptTestCase {
|
||||||
private final PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build();
|
private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS);
|
||||||
|
|
||||||
public void testExplain() {
|
public void testExplain() {
|
||||||
// Debug.explain can explain an object
|
// Debug.explain can explain an object
|
||||||
|
|
|
@ -40,7 +40,7 @@ final class Debugger {
|
||||||
PrintWriter outputWriter = new PrintWriter(output);
|
PrintWriter outputWriter = new PrintWriter(output);
|
||||||
Textifier textifier = new Textifier();
|
Textifier textifier = new Textifier();
|
||||||
try {
|
try {
|
||||||
new Compiler(iface, new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build())
|
new Compiler(iface, PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS))
|
||||||
.compile("<debugging>", source, settings, textifier);
|
.compile("<debugging>", source, settings, textifier);
|
||||||
} catch (RuntimeException e) {
|
} catch (RuntimeException e) {
|
||||||
textifier.print(outputWriter);
|
textifier.print(outputWriter);
|
||||||
|
|
|
@ -19,6 +19,11 @@
|
||||||
|
|
||||||
package org.elasticsearch.painless;
|
package org.elasticsearch.painless;
|
||||||
|
|
||||||
|
import org.elasticsearch.painless.lookup.PainlessLookup;
|
||||||
|
import org.elasticsearch.painless.lookup.PainlessLookupBuilder;
|
||||||
|
import org.elasticsearch.painless.spi.Whitelist;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
import java.lang.invoke.CallSite;
|
import java.lang.invoke.CallSite;
|
||||||
import java.lang.invoke.MethodHandle;
|
import java.lang.invoke.MethodHandle;
|
||||||
import java.lang.invoke.MethodHandles;
|
import java.lang.invoke.MethodHandles;
|
||||||
|
@ -27,13 +32,8 @@ import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookup;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupBuilder;
|
|
||||||
import org.elasticsearch.painless.spi.Whitelist;
|
|
||||||
import org.elasticsearch.test.ESTestCase;
|
|
||||||
|
|
||||||
public class DefBootstrapTests extends ESTestCase {
|
public class DefBootstrapTests extends ESTestCase {
|
||||||
private final PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build();
|
private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS);
|
||||||
|
|
||||||
/** calls toString() on integers, twice */
|
/** calls toString() on integers, twice */
|
||||||
public void testOneType() throws Throwable {
|
public void testOneType() throws Throwable {
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.painless;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||||
|
|
||||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ import static java.util.stream.Collectors.toList;
|
||||||
*/
|
*/
|
||||||
public class PainlessDocGenerator {
|
public class PainlessDocGenerator {
|
||||||
|
|
||||||
private static final PainlessLookup PAINLESS_LOOKUP = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build();
|
private static final PainlessLookup PAINLESS_LOOKUP = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS);
|
||||||
private static final Logger logger = ESLoggerFactory.getLogger(PainlessDocGenerator.class);
|
private static final Logger logger = ESLoggerFactory.getLogger(PainlessDocGenerator.class);
|
||||||
private static final Comparator<PainlessField> FIELD_NAME = comparing(f -> f.name);
|
private static final Comparator<PainlessField> FIELD_NAME = comparing(f -> f.name);
|
||||||
private static final Comparator<PainlessMethod> METHOD_NAME = comparing(m -> m.name);
|
private static final Comparator<PainlessMethod> METHOD_NAME = comparing(m -> m.name);
|
||||||
|
|
|
@ -92,7 +92,7 @@ public abstract class ScriptTestCase extends ESTestCase {
|
||||||
public Object exec(String script, Map<String, Object> vars, Map<String,String> compileParams, Scorer scorer, boolean picky) {
|
public Object exec(String script, Map<String, Object> vars, Map<String,String> compileParams, Scorer scorer, boolean picky) {
|
||||||
// test for ambiguity errors before running the actual script if picky is true
|
// test for ambiguity errors before running the actual script if picky is true
|
||||||
if (picky) {
|
if (picky) {
|
||||||
PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build();
|
PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS);
|
||||||
ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, GenericElasticsearchScript.class);
|
ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, GenericElasticsearchScript.class);
|
||||||
CompilerSettings pickySettings = new CompilerSettings();
|
CompilerSettings pickySettings = new CompilerSettings();
|
||||||
pickySettings.setPicky(true);
|
pickySettings.setPicky(true);
|
||||||
|
|
|
@ -22,8 +22,8 @@ package org.elasticsearch.painless;
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
import org.elasticsearch.painless.spi.Whitelist;
|
import org.elasticsearch.painless.spi.Whitelist;
|
||||||
import org.elasticsearch.script.ScriptedMetricAggContexts;
|
|
||||||
import org.elasticsearch.script.ScriptContext;
|
import org.elasticsearch.script.ScriptContext;
|
||||||
|
import org.elasticsearch.script.ScriptedMetricAggContexts;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.painless;
|
package org.elasticsearch.painless;
|
||||||
|
|
||||||
import junit.framework.AssertionFailedError;
|
import junit.framework.AssertionFailedError;
|
||||||
|
|
||||||
import org.apache.lucene.util.Constants;
|
import org.apache.lucene.util.Constants;
|
||||||
import org.elasticsearch.script.ScriptException;
|
import org.elasticsearch.script.ScriptException;
|
||||||
|
|
||||||
|
|
|
@ -20,21 +20,21 @@
|
||||||
package org.elasticsearch.painless.node;
|
package org.elasticsearch.painless.node;
|
||||||
|
|
||||||
import org.elasticsearch.painless.CompilerSettings;
|
import org.elasticsearch.painless.CompilerSettings;
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookup;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessCast;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessField;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupBuilder;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
|
||||||
import org.elasticsearch.painless.FeatureTest;
|
import org.elasticsearch.painless.FeatureTest;
|
||||||
import org.elasticsearch.painless.GenericElasticsearchScript;
|
import org.elasticsearch.painless.GenericElasticsearchScript;
|
||||||
import org.elasticsearch.painless.Locals.Variable;
|
import org.elasticsearch.painless.Locals.Variable;
|
||||||
import org.elasticsearch.painless.Location;
|
import org.elasticsearch.painless.Location;
|
||||||
import org.elasticsearch.painless.Operation;
|
import org.elasticsearch.painless.Operation;
|
||||||
import org.elasticsearch.painless.ScriptClassInfo;
|
import org.elasticsearch.painless.ScriptClassInfo;
|
||||||
import org.elasticsearch.painless.spi.Whitelist;
|
|
||||||
import org.elasticsearch.painless.antlr.Walker;
|
import org.elasticsearch.painless.antlr.Walker;
|
||||||
|
import org.elasticsearch.painless.lookup.PainlessCast;
|
||||||
|
import org.elasticsearch.painless.lookup.PainlessClass;
|
||||||
|
import org.elasticsearch.painless.lookup.PainlessField;
|
||||||
|
import org.elasticsearch.painless.lookup.PainlessLookup;
|
||||||
|
import org.elasticsearch.painless.lookup.PainlessLookupBuilder;
|
||||||
|
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||||
|
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||||
|
import org.elasticsearch.painless.spi.Whitelist;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -49,7 +49,7 @@ import static org.elasticsearch.painless.node.SSource.MainMethodReserved;
|
||||||
* Tests {@link Object#toString} implementations on all extensions of {@link ANode}.
|
* Tests {@link Object#toString} implementations on all extensions of {@link ANode}.
|
||||||
*/
|
*/
|
||||||
public class NodeToStringTests extends ESTestCase {
|
public class NodeToStringTests extends ESTestCase {
|
||||||
private final PainlessLookup painlessLookup = new PainlessLookupBuilder(Whitelist.BASE_WHITELISTS).build();
|
private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS);
|
||||||
|
|
||||||
public void testEAssignment() {
|
public void testEAssignment() {
|
||||||
assertToString(
|
assertToString(
|
||||||
|
|
|
@ -115,7 +115,7 @@ if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3P
|
||||||
useFixture = true
|
useFixture = true
|
||||||
|
|
||||||
} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath) {
|
} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath) {
|
||||||
throw new IllegalArgumentException("not all options specified to run against external S3 service")
|
throw new IllegalArgumentException("not all options specified to run against external S3 service as permanent credentials are present")
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) {
|
if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) {
|
||||||
|
@ -126,7 +126,7 @@ if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3T
|
||||||
s3TemporarySessionToken = 's3_integration_test_temporary_session_token'
|
s3TemporarySessionToken = 's3_integration_test_temporary_session_token'
|
||||||
|
|
||||||
} else if (!s3TemporaryAccessKey || !s3TemporarySecretKey || !s3TemporaryBucket || !s3TemporaryBasePath || !s3TemporarySessionToken) {
|
} else if (!s3TemporaryAccessKey || !s3TemporarySecretKey || !s3TemporaryBucket || !s3TemporaryBasePath || !s3TemporarySessionToken) {
|
||||||
throw new IllegalArgumentException("not all options specified to run against external S3 service")
|
throw new IllegalArgumentException("not all options specified to run against external S3 service as temporary credentials are present")
|
||||||
}
|
}
|
||||||
|
|
||||||
final String minioVersion = 'RELEASE.2018-06-22T23-48-46Z'
|
final String minioVersion = 'RELEASE.2018-06-22T23-48-46Z'
|
||||||
|
@ -381,7 +381,7 @@ integTestCluster {
|
||||||
|
|
||||||
integTestRunner.systemProperty 'tests.rest.blacklist', 'repository_s3/50_repository_ecs_credentials/*'
|
integTestRunner.systemProperty 'tests.rest.blacklist', 'repository_s3/50_repository_ecs_credentials/*'
|
||||||
|
|
||||||
///
|
if (useFixture) {
|
||||||
RestIntegTestTask integTestECS = project.tasks.create('integTestECS', RestIntegTestTask.class) {
|
RestIntegTestTask integTestECS = project.tasks.create('integTestECS', RestIntegTestTask.class) {
|
||||||
description = "Runs tests using the ECS repository."
|
description = "Runs tests using the ECS repository."
|
||||||
}
|
}
|
||||||
|
@ -405,7 +405,7 @@ project.afterEvaluate {
|
||||||
].join(",")
|
].join(",")
|
||||||
}
|
}
|
||||||
project.check.dependsOn(integTestECS)
|
project.check.dependsOn(integTestECS)
|
||||||
///
|
}
|
||||||
|
|
||||||
thirdPartyAudit.excludes = [
|
thirdPartyAudit.excludes = [
|
||||||
// classes are missing
|
// classes are missing
|
||||||
|
|
|
@ -178,6 +178,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||||
public static final Version V_6_3_3 = new Version(V_6_3_3_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
|
public static final Version V_6_3_3 = new Version(V_6_3_3_ID, org.apache.lucene.util.Version.LUCENE_7_3_1);
|
||||||
public static final int V_6_4_0_ID = 6040099;
|
public static final int V_6_4_0_ID = 6040099;
|
||||||
public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0);
|
public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0);
|
||||||
|
public static final int V_6_5_0_ID = 6050099;
|
||||||
|
public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0);
|
||||||
public static final int V_7_0_0_alpha1_ID = 7000001;
|
public static final int V_7_0_0_alpha1_ID = 7000001;
|
||||||
public static final Version V_7_0_0_alpha1 =
|
public static final Version V_7_0_0_alpha1 =
|
||||||
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_5_0);
|
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_5_0);
|
||||||
|
@ -196,6 +198,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
||||||
switch (id) {
|
switch (id) {
|
||||||
case V_7_0_0_alpha1_ID:
|
case V_7_0_0_alpha1_ID:
|
||||||
return V_7_0_0_alpha1;
|
return V_7_0_0_alpha1;
|
||||||
|
case V_6_5_0_ID:
|
||||||
|
return V_6_5_0;
|
||||||
case V_6_4_0_ID:
|
case V_6_4_0_ID:
|
||||||
return V_6_4_0;
|
return V_6_4_0;
|
||||||
case V_6_3_3_ID:
|
case V_6_3_3_ID:
|
||||||
|
|
|
@ -310,6 +310,16 @@ setups['farequote_datafeed'] = setups['farequote_job'] + '''
|
||||||
"indexes":"farequote"
|
"indexes":"farequote"
|
||||||
}
|
}
|
||||||
'''
|
'''
|
||||||
|
setups['ml_filter_safe_domains'] = '''
|
||||||
|
- do:
|
||||||
|
xpack.ml.put_filter:
|
||||||
|
filter_id: "safe_domains"
|
||||||
|
body: >
|
||||||
|
{
|
||||||
|
"description": "A list of safe domains",
|
||||||
|
"items": ["*.google.com", "wikipedia.org"]
|
||||||
|
}
|
||||||
|
'''
|
||||||
setups['server_metrics_index'] = '''
|
setups['server_metrics_index'] = '''
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
|
|
|
@ -105,8 +105,8 @@ For all other aggregations, if the aggregation name doesn't match the field name
|
||||||
there are limitations in the drill-down functionality within the {ml} page in
|
there are limitations in the drill-down functionality within the {ml} page in
|
||||||
{kib}.
|
{kib}.
|
||||||
|
|
||||||
{dfeeds} support complex nested aggregations, this example uses the `derivative`
|
{dfeeds-cap} support complex nested aggregations, this example uses the `derivative`
|
||||||
pipeline aggregation to find the 1st order derivative of the counter
|
pipeline aggregation to find the first order derivative of the counter
|
||||||
`system.network.out.bytes` for each value of the field `beat.name`.
|
`system.network.out.bytes` for each value of the field `beat.name`.
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
|
|
|
@ -47,6 +47,15 @@ The main {ml} resources can be accessed with a variety of endpoints:
|
||||||
* {ref}/ml-delete-calendar-job.html[DELETE /calendars/<calendar_id+++>+++/jobs/<job_id+++>+++]: Disassociate a job from a calendar
|
* {ref}/ml-delete-calendar-job.html[DELETE /calendars/<calendar_id+++>+++/jobs/<job_id+++>+++]: Disassociate a job from a calendar
|
||||||
* {ref}/ml-delete-calendar.html[DELETE /calendars/<calendar_id+++>+++]: Delete a calendar
|
* {ref}/ml-delete-calendar.html[DELETE /calendars/<calendar_id+++>+++]: Delete a calendar
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[ml-api-filters]]
|
||||||
|
=== /filters/
|
||||||
|
|
||||||
|
* {ref}/ml-put-filter.html[PUT /filters/<filter_id+++>+++]: Create a filter
|
||||||
|
* {ref}/ml-update-filter.html[POST /filters/<filter_id+++>+++/_update]: Update a filter
|
||||||
|
* {ref}/ml-get-filter.html[GET /filters/<filter_id+++>+++]: List filters
|
||||||
|
* {ref}/ml-delete-filter.html[DELETE /filter/<filter_id+++>+++]: Delete a filter
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[ml-api-datafeeds]]
|
[[ml-api-datafeeds]]
|
||||||
=== /datafeeds/
|
=== /datafeeds/
|
||||||
|
|
|
@ -34,6 +34,7 @@ The scenarios in this section describe some best practices for generating useful
|
||||||
* <<ml-configuring-categories>>
|
* <<ml-configuring-categories>>
|
||||||
* <<ml-configuring-pop>>
|
* <<ml-configuring-pop>>
|
||||||
* <<ml-configuring-transform>>
|
* <<ml-configuring-transform>>
|
||||||
|
* <<ml-configuring-detector-custom-rules>>
|
||||||
|
|
||||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/customurl.asciidoc
|
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/customurl.asciidoc
|
||||||
include::customurl.asciidoc[]
|
include::customurl.asciidoc[]
|
||||||
|
@ -49,3 +50,6 @@ include::populations.asciidoc[]
|
||||||
|
|
||||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/transforms.asciidoc
|
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/transforms.asciidoc
|
||||||
include::transforms.asciidoc[]
|
include::transforms.asciidoc[]
|
||||||
|
|
||||||
|
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/detector-custom-rules.asciidoc
|
||||||
|
include::detector-custom-rules.asciidoc[]
|
|
@ -0,0 +1,230 @@
|
||||||
|
[role="xpack"]
|
||||||
|
[[ml-configuring-detector-custom-rules]]
|
||||||
|
=== Customizing detectors with rules and filters
|
||||||
|
|
||||||
|
<<ml-rules,Rules and filters>> enable you to change the behavior of anomaly
|
||||||
|
detectors based on domain-specific knowledge.
|
||||||
|
|
||||||
|
Rules describe _when_ a detector should take a certain _action_ instead
|
||||||
|
of following its default behavior. To specify the _when_ a rule uses
|
||||||
|
a `scope` and `conditions`. You can think of `scope` as the categorical
|
||||||
|
specification of a rule, while `conditions` are the numerical part.
|
||||||
|
A rule can have a scope, one or more conditions, or a combination of
|
||||||
|
scope and conditions.
|
||||||
|
|
||||||
|
Let us see how those can be configured by examples.
|
||||||
|
|
||||||
|
==== Specifying rule scope
|
||||||
|
|
||||||
|
Let us assume we are configuring a job in order to DNS data exfiltration.
|
||||||
|
Our data contain fields "subdomain" and "highest_registered_domain".
|
||||||
|
We can use a detector that looks like `high_info_content(subdomain) over highest_registered_domain`.
|
||||||
|
If we run such a job it is possible that we discover a lot of anomalies on
|
||||||
|
frequently used domains that we have reasons to trust. As security analysts, we
|
||||||
|
are not interested in such anomalies. Ideally, we could instruct the detector to
|
||||||
|
skip results for domains that we consider safe. Using a rule with a scope allows
|
||||||
|
us to achieve this.
|
||||||
|
|
||||||
|
First, we need to create a list with our safe domains. Those lists are called
|
||||||
|
`filters` in {ml}. Filters can be shared across jobs.
|
||||||
|
|
||||||
|
We create our filter using the {ref}/ml-put-filter.html[put filter API]:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----------------------------------
|
||||||
|
PUT _xpack/ml/filters/safe_domains
|
||||||
|
{
|
||||||
|
"description": "Our list of safe domains",
|
||||||
|
"items": ["safe.com", "trusted.com"]
|
||||||
|
}
|
||||||
|
----------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
Now, we can create our job specifying a scope that uses the filter for the
|
||||||
|
`highest_registered_domain` field:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----------------------------------
|
||||||
|
PUT _xpack/ml/anomaly_detectors/dns_exfiltration_with_rule
|
||||||
|
{
|
||||||
|
"analysis_config" : {
|
||||||
|
"bucket_span":"5m",
|
||||||
|
"detectors" :[{
|
||||||
|
"function":"high_info_content",
|
||||||
|
"field_name": "subdomain",
|
||||||
|
"over_field_name": "highest_registered_domain",
|
||||||
|
"custom_rules": [{
|
||||||
|
"actions": ["skip_result"],
|
||||||
|
"scope": {
|
||||||
|
"highest_registered_domain": {
|
||||||
|
"filter_id": "safe_domains",
|
||||||
|
"filter_type": "include"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
"data_description" : {
|
||||||
|
"time_field":"timestamp"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
----------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
As time advances and we see more data and more results, we might encounter new
|
||||||
|
domains that we want to add in the filter. We can do that by using the
|
||||||
|
{ref}/ml-update-filter.html[update filter API]:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----------------------------------
|
||||||
|
POST _xpack/ml/filters/safe_domains/_update
|
||||||
|
{
|
||||||
|
"add_items": ["another-safe.com"]
|
||||||
|
}
|
||||||
|
----------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:ml_filter_safe_domains]
|
||||||
|
|
||||||
|
Note that we can provide scope for any of the partition/over/by fields.
|
||||||
|
In the following example we scope multiple fields:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----------------------------------
|
||||||
|
PUT _xpack/ml/anomaly_detectors/scoping_multiple_fields
|
||||||
|
{
|
||||||
|
"analysis_config" : {
|
||||||
|
"bucket_span":"5m",
|
||||||
|
"detectors" :[{
|
||||||
|
"function":"count",
|
||||||
|
"partition_field_name": "my_partition",
|
||||||
|
"over_field_name": "my_over",
|
||||||
|
"by_field_name": "my_by",
|
||||||
|
"custom_rules": [{
|
||||||
|
"actions": ["skip_result"],
|
||||||
|
"scope": {
|
||||||
|
"my_partition": {
|
||||||
|
"filter_id": "filter_1"
|
||||||
|
},
|
||||||
|
"my_over": {
|
||||||
|
"filter_id": "filter_2"
|
||||||
|
},
|
||||||
|
"my_by": {
|
||||||
|
"filter_id": "filter_3"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
"data_description" : {
|
||||||
|
"time_field":"timestamp"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
----------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
Such a detector will skip results when the values of all 3 scoped fields
|
||||||
|
are included in the referenced filters.
|
||||||
|
|
||||||
|
==== Specifying rule conditions
|
||||||
|
|
||||||
|
Imagine a detector that looks for anomalies in CPU utilization.
|
||||||
|
Given a machine that is idle for long enough, small movement in CPU could
|
||||||
|
result in anomalous results where the `actual` value is quite small, for
|
||||||
|
example, 0.02. Given our knowledge about how CPU utilization behaves we might
|
||||||
|
determine that anomalies with such small actual values are not interesting for
|
||||||
|
investigation.
|
||||||
|
|
||||||
|
Let us now configure a job with a rule that will skip results where CPU
|
||||||
|
utilization is less than 0.20.
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----------------------------------
|
||||||
|
PUT _xpack/ml/anomaly_detectors/cpu_with_rule
|
||||||
|
{
|
||||||
|
"analysis_config" : {
|
||||||
|
"bucket_span":"5m",
|
||||||
|
"detectors" :[{
|
||||||
|
"function":"high_mean",
|
||||||
|
"field_name": "cpu_utilization",
|
||||||
|
"custom_rules": [{
|
||||||
|
"actions": ["skip_result"],
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"applies_to": "actual",
|
||||||
|
"operator": "lt",
|
||||||
|
"value": 0.20
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
"data_description" : {
|
||||||
|
"time_field":"timestamp"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
----------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
When there are multiple conditions they are combined with a logical `and`.
|
||||||
|
This is useful when we want the rule to apply to a range. We simply create
|
||||||
|
a rule with two conditions, one for each end of the desired range.
|
||||||
|
|
||||||
|
Here is an example where a count detector will skip results when the count
|
||||||
|
is greater than 30 and less than 50:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----------------------------------
|
||||||
|
PUT _xpack/ml/anomaly_detectors/rule_with_range
|
||||||
|
{
|
||||||
|
"analysis_config" : {
|
||||||
|
"bucket_span":"5m",
|
||||||
|
"detectors" :[{
|
||||||
|
"function":"count",
|
||||||
|
"custom_rules": [{
|
||||||
|
"actions": ["skip_result"],
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"applies_to": "actual",
|
||||||
|
"operator": "gt",
|
||||||
|
"value": 30
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"applies_to": "actual",
|
||||||
|
"operator": "lt",
|
||||||
|
"value": 50
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
"data_description" : {
|
||||||
|
"time_field":"timestamp"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
----------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
==== Rules in the life-cycle of a job
|
||||||
|
|
||||||
|
Rules only affect results created after the rules were applied.
|
||||||
|
Let us imagine that we have configured a job and it has been running
|
||||||
|
for some time. After observing its results we decide that we can employ
|
||||||
|
rules in order to get rid of some uninteresting results. We can use
|
||||||
|
the update-job API to do so. However, the rule we added will only be in effect
|
||||||
|
for any results created from the moment we added the rule onwards. Past results
|
||||||
|
will remain unaffected.
|
||||||
|
|
||||||
|
==== Using rules VS filtering data
|
||||||
|
|
||||||
|
It might appear like using rules is just another way of filtering the data
|
||||||
|
that feeds into a job. For example, a rule that skips results when the
|
||||||
|
partition field value is in a filter sounds equivalent to having a query
|
||||||
|
that filters out such documents. But it is not. There is a fundamental
|
||||||
|
difference. When the data is filtered before reaching a job it is as if they
|
||||||
|
never existed for the job. With rules, the data still reaches the job and
|
||||||
|
affects its behavior (depending on the rule actions).
|
||||||
|
|
||||||
|
For example, a rule with the `skip_result` action means all data will still
|
||||||
|
be modeled. On the other hand, a rule with the `skip_model_update` action means
|
||||||
|
results will still be created even though the model will not be updated by
|
||||||
|
data matched by a rule.
|
|
@ -8,6 +8,8 @@ input data.
|
||||||
The {xpackml} features include the following geographic function: `lat_long`.
|
The {xpackml} features include the following geographic function: `lat_long`.
|
||||||
|
|
||||||
NOTE: You cannot create forecasts for jobs that contain geographic functions.
|
NOTE: You cannot create forecasts for jobs that contain geographic functions.
|
||||||
|
You also cannot add rules with conditions to detectors that use geographic
|
||||||
|
functions.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[ml-lat-long]]
|
[[ml-lat-long]]
|
||||||
|
|
|
@ -15,6 +15,9 @@ The {xpackml} features include the following metric functions:
|
||||||
* <<ml-metric-metric,`metric`>>
|
* <<ml-metric-metric,`metric`>>
|
||||||
* xref:ml-metric-varp[`varp`, `high_varp`, `low_varp`]
|
* xref:ml-metric-varp[`varp`, `high_varp`, `low_varp`]
|
||||||
|
|
||||||
|
NOTE: You cannot add rules with conditions to detectors that use the `metric`
|
||||||
|
function.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[ml-metric-min]]
|
[[ml-metric-min]]
|
||||||
==== Min
|
==== Min
|
||||||
|
@ -221,7 +224,6 @@ mean `responsetime` for each application over time. It detects when the mean
|
||||||
The `metric` function combines `min`, `max`, and `mean` functions. You can use
|
The `metric` function combines `min`, `max`, and `mean` functions. You can use
|
||||||
it as a shorthand for a combined analysis. If you do not specify a function in
|
it as a shorthand for a combined analysis. If you do not specify a function in
|
||||||
a detector, this is the default function.
|
a detector, this is the default function.
|
||||||
//TBD: Is that default behavior still true?
|
|
||||||
|
|
||||||
High- and low-sided functions are not applicable. You cannot use this function
|
High- and low-sided functions are not applicable. You cannot use this function
|
||||||
when a `summary_count_field_name` is specified.
|
when a `summary_count_field_name` is specified.
|
||||||
|
|
|
@ -15,6 +15,8 @@ number of times (frequency) rare values occur.
|
||||||
`exclude_frequent`.
|
`exclude_frequent`.
|
||||||
* You cannot create forecasts for jobs that contain `rare` or `freq_rare`
|
* You cannot create forecasts for jobs that contain `rare` or `freq_rare`
|
||||||
functions.
|
functions.
|
||||||
|
* You cannot add rules with conditions to detectors that use `rare` or
|
||||||
|
`freq_rare` functions.
|
||||||
* Shorter bucket spans (less than 1 hour, for example) are recommended when
|
* Shorter bucket spans (less than 1 hour, for example) are recommended when
|
||||||
looking for rare events. The functions model whether something happens in a
|
looking for rare events. The functions model whether something happens in a
|
||||||
bucket at least once. With longer bucket spans, it is more likely that
|
bucket at least once. With longer bucket spans, it is more likely that
|
||||||
|
|
|
@ -8,6 +8,7 @@ job configuration options.
|
||||||
* <<ml-calendar-resource,Calendars>>
|
* <<ml-calendar-resource,Calendars>>
|
||||||
* <<ml-datafeed-resource,{dfeeds-cap}>>
|
* <<ml-datafeed-resource,{dfeeds-cap}>>
|
||||||
* <<ml-datafeed-counts,{dfeed-cap} counts>>
|
* <<ml-datafeed-counts,{dfeed-cap} counts>>
|
||||||
|
* <<ml-filter-resource,Filters>>
|
||||||
* <<ml-job-resource,Jobs>>
|
* <<ml-job-resource,Jobs>>
|
||||||
* <<ml-jobstats,Job statistics>>
|
* <<ml-jobstats,Job statistics>>
|
||||||
* <<ml-snapshot-resource,Model snapshots>>
|
* <<ml-snapshot-resource,Model snapshots>>
|
||||||
|
@ -19,6 +20,8 @@ include::ml/calendarresource.asciidoc[]
|
||||||
[role="xpack"]
|
[role="xpack"]
|
||||||
include::ml/datafeedresource.asciidoc[]
|
include::ml/datafeedresource.asciidoc[]
|
||||||
[role="xpack"]
|
[role="xpack"]
|
||||||
|
include::ml/filterresource.asciidoc[]
|
||||||
|
[role="xpack"]
|
||||||
include::ml/jobresource.asciidoc[]
|
include::ml/jobresource.asciidoc[]
|
||||||
[role="xpack"]
|
[role="xpack"]
|
||||||
include::ml/jobcounts.asciidoc[]
|
include::ml/jobcounts.asciidoc[]
|
||||||
|
|
|
@ -15,6 +15,14 @@ machine learning APIs and in advanced job configuration options in Kibana.
|
||||||
* <<ml-post-calendar-event,Add scheduled events to calendar>>, <<ml-delete-calendar-event,Delete scheduled events from calendar>>
|
* <<ml-post-calendar-event,Add scheduled events to calendar>>, <<ml-delete-calendar-event,Delete scheduled events from calendar>>
|
||||||
* <<ml-get-calendar,Get calendars>>, <<ml-get-calendar-event,Get scheduled events>>
|
* <<ml-get-calendar,Get calendars>>, <<ml-get-calendar-event,Get scheduled events>>
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[ml-api-filter-endpoint]]
|
||||||
|
=== Filters
|
||||||
|
|
||||||
|
* <<ml-put-filter,Create filter>>, <<ml-delete-filter,Delete filter>>
|
||||||
|
* <<ml-update-filter,Update filters>>
|
||||||
|
* <<ml-get-filter,Get filters>>
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[ml-api-datafeed-endpoint]]
|
[[ml-api-datafeed-endpoint]]
|
||||||
=== {dfeeds-cap}
|
=== {dfeeds-cap}
|
||||||
|
@ -69,11 +77,13 @@ include::ml/close-job.asciidoc[]
|
||||||
//CREATE
|
//CREATE
|
||||||
include::ml/put-calendar.asciidoc[]
|
include::ml/put-calendar.asciidoc[]
|
||||||
include::ml/put-datafeed.asciidoc[]
|
include::ml/put-datafeed.asciidoc[]
|
||||||
|
include::ml/put-filter.asciidoc[]
|
||||||
include::ml/put-job.asciidoc[]
|
include::ml/put-job.asciidoc[]
|
||||||
//DELETE
|
//DELETE
|
||||||
include::ml/delete-calendar.asciidoc[]
|
include::ml/delete-calendar.asciidoc[]
|
||||||
include::ml/delete-datafeed.asciidoc[]
|
include::ml/delete-datafeed.asciidoc[]
|
||||||
include::ml/delete-calendar-event.asciidoc[]
|
include::ml/delete-calendar-event.asciidoc[]
|
||||||
|
include::ml/delete-filter.asciidoc[]
|
||||||
include::ml/delete-job.asciidoc[]
|
include::ml/delete-job.asciidoc[]
|
||||||
include::ml/delete-calendar-job.asciidoc[]
|
include::ml/delete-calendar-job.asciidoc[]
|
||||||
include::ml/delete-snapshot.asciidoc[]
|
include::ml/delete-snapshot.asciidoc[]
|
||||||
|
@ -93,6 +103,7 @@ include::ml/get-job.asciidoc[]
|
||||||
include::ml/get-job-stats.asciidoc[]
|
include::ml/get-job-stats.asciidoc[]
|
||||||
include::ml/get-snapshot.asciidoc[]
|
include::ml/get-snapshot.asciidoc[]
|
||||||
include::ml/get-calendar-event.asciidoc[]
|
include::ml/get-calendar-event.asciidoc[]
|
||||||
|
include::ml/get-filter.asciidoc[]
|
||||||
include::ml/get-record.asciidoc[]
|
include::ml/get-record.asciidoc[]
|
||||||
//OPEN
|
//OPEN
|
||||||
include::ml/open-job.asciidoc[]
|
include::ml/open-job.asciidoc[]
|
||||||
|
@ -107,6 +118,7 @@ include::ml/start-datafeed.asciidoc[]
|
||||||
include::ml/stop-datafeed.asciidoc[]
|
include::ml/stop-datafeed.asciidoc[]
|
||||||
//UPDATE
|
//UPDATE
|
||||||
include::ml/update-datafeed.asciidoc[]
|
include::ml/update-datafeed.asciidoc[]
|
||||||
|
include::ml/update-filter.asciidoc[]
|
||||||
include::ml/update-job.asciidoc[]
|
include::ml/update-job.asciidoc[]
|
||||||
include::ml/update-snapshot.asciidoc[]
|
include::ml/update-snapshot.asciidoc[]
|
||||||
//VALIDATE
|
//VALIDATE
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
[role="xpack"]
|
||||||
|
[[ml-delete-filter]]
|
||||||
|
=== Delete Filter API
|
||||||
|
++++
|
||||||
|
<titleabbrev>Delete Filter</titleabbrev>
|
||||||
|
++++
|
||||||
|
|
||||||
|
Deletes a filter.
|
||||||
|
|
||||||
|
|
||||||
|
==== Request
|
||||||
|
|
||||||
|
`DELETE _xpack/ml/filters/<filter_id>`
|
||||||
|
|
||||||
|
|
||||||
|
==== Description
|
||||||
|
|
||||||
|
This API deletes a {stack-ov}/ml-rules.html[filter].
|
||||||
|
If a {ml} job references the filter, you cannot delete the filter. You must
|
||||||
|
update or delete the job before you can delete the filter.
|
||||||
|
|
||||||
|
|
||||||
|
==== Path Parameters
|
||||||
|
|
||||||
|
`filter_id` (required)::
|
||||||
|
(string) Identifier for the filter.
|
||||||
|
|
||||||
|
|
||||||
|
==== Authorization
|
||||||
|
|
||||||
|
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||||
|
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||||
|
|
||||||
|
|
||||||
|
==== Examples
|
||||||
|
|
||||||
|
The following example deletes the `safe_domains` filter:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
DELETE _xpack/ml/filters/safe_domains
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:ml_filter_safe_domains]
|
||||||
|
|
||||||
|
When the filter is deleted, you receive the following results:
|
||||||
|
[source,js]
|
||||||
|
----
|
||||||
|
{
|
||||||
|
"acknowledged": true
|
||||||
|
}
|
||||||
|
----
|
||||||
|
//TESTRESPONSE
|
|
@ -0,0 +1,16 @@
|
||||||
|
[role="xpack"]
|
||||||
|
[[ml-filter-resource]]
|
||||||
|
=== Filter Resources
|
||||||
|
|
||||||
|
A filter resource has the following properties:
|
||||||
|
|
||||||
|
`filter_id`::
|
||||||
|
(string) A string that uniquely identifies the filter.
|
||||||
|
|
||||||
|
`description`::
|
||||||
|
(array) A description of the filter.
|
||||||
|
|
||||||
|
`items`::
|
||||||
|
(array of strings) An array of strings which is the filter item list.
|
||||||
|
|
||||||
|
For more information, see {stack-ov}/ml-rules.html[Machine learning rules and filters].
|
|
@ -0,0 +1,84 @@
|
||||||
|
[role="xpack"]
|
||||||
|
[[ml-get-filter]]
|
||||||
|
=== Get Filters API
|
||||||
|
++++
|
||||||
|
<titleabbrev>Get Filters</titleabbrev>
|
||||||
|
++++
|
||||||
|
|
||||||
|
Retrieves filters.
|
||||||
|
|
||||||
|
|
||||||
|
==== Request
|
||||||
|
|
||||||
|
`GET _xpack/ml/filters/<filter_id>` +
|
||||||
|
|
||||||
|
`GET _xpack/ml/filters/`
|
||||||
|
|
||||||
|
|
||||||
|
===== Description
|
||||||
|
|
||||||
|
You can get a single filter or all filters. For more information, see
|
||||||
|
{stack-ov}/ml-rules.html[Machine learning rules and filters].
|
||||||
|
|
||||||
|
|
||||||
|
==== Path Parameters
|
||||||
|
|
||||||
|
`filter_id`::
|
||||||
|
(string) Identifier for the filter.
|
||||||
|
|
||||||
|
|
||||||
|
==== Request Body
|
||||||
|
|
||||||
|
`from`:::
|
||||||
|
(integer) Skips the specified number of filters.
|
||||||
|
|
||||||
|
`size`:::
|
||||||
|
(integer) Specifies the maximum number of filters to obtain.
|
||||||
|
|
||||||
|
|
||||||
|
==== Results
|
||||||
|
|
||||||
|
The API returns the following information:
|
||||||
|
|
||||||
|
`filters`::
|
||||||
|
(array) An array of filter resources.
|
||||||
|
For more information, see <<ml-filter-resource>>.
|
||||||
|
|
||||||
|
|
||||||
|
==== Authorization
|
||||||
|
|
||||||
|
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||||
|
privileges to use this API. For more information, see
|
||||||
|
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||||
|
|
||||||
|
|
||||||
|
==== Examples
|
||||||
|
|
||||||
|
The following example gets configuration information for the `safe_domains`
|
||||||
|
filter:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
GET _xpack/ml/filters/safe_domains
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:ml_filter_safe_domains]
|
||||||
|
|
||||||
|
The API returns the following results:
|
||||||
|
[source,js]
|
||||||
|
----
|
||||||
|
{
|
||||||
|
"count": 1,
|
||||||
|
"filters": [
|
||||||
|
{
|
||||||
|
"filter_id": "safe_domains",
|
||||||
|
"description": "A list of safe domains",
|
||||||
|
"items": [
|
||||||
|
"*.google.com",
|
||||||
|
"wikipedia.org"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
----
|
||||||
|
//TESTRESPONSE
|
|
@ -106,7 +106,8 @@ An analysis configuration object has the following properties:
|
||||||
|
|
||||||
`bucket_span`::
|
`bucket_span`::
|
||||||
(time units) The size of the interval that the analysis is aggregated into,
|
(time units) The size of the interval that the analysis is aggregated into,
|
||||||
typically between `5m` and `1h`. The default value is `5m`.
|
typically between `5m` and `1h`. The default value is `5m`. For more
|
||||||
|
information about time units, see <<time-units,Common options>>.
|
||||||
|
|
||||||
`categorization_field_name`::
|
`categorization_field_name`::
|
||||||
(string) If this property is specified, the values of the specified field will
|
(string) If this property is specified, the values of the specified field will
|
||||||
|
@ -160,8 +161,7 @@ no analysis can occur and an error is returned.
|
||||||
(time units) The size of the window in which to expect data that is out of
|
(time units) The size of the window in which to expect data that is out of
|
||||||
time order. The default value is 0 (no latency). If you specify a non-zero
|
time order. The default value is 0 (no latency). If you specify a non-zero
|
||||||
value, it must be greater than or equal to one second. For more information
|
value, it must be greater than or equal to one second. For more information
|
||||||
about time units, see
|
about time units, see <<time-units,Common options>>.
|
||||||
{ref}/common-options.html#time-units[Time Units].
|
|
||||||
+
|
+
|
||||||
--
|
--
|
||||||
NOTE: Latency is only applicable when you send data by using
|
NOTE: Latency is only applicable when you send data by using
|
||||||
|
@ -262,7 +262,12 @@ NOTE: The `field_name` cannot contain double quotes or backslashes.
|
||||||
|
|
||||||
`use_null`::
|
`use_null`::
|
||||||
(boolean) Defines whether a new series is used as the null series
|
(boolean) Defines whether a new series is used as the null series
|
||||||
when there is no value for the by or partition fields. The default value is `false`. +
|
when there is no value for the by or partition fields. The default value is `false`.
|
||||||
|
|
||||||
|
`custom_rules`::
|
||||||
|
(array) An array of rule objects, which enable customizing how the detector works.
|
||||||
|
For example, a rule may dictate to the detector conditions under which results should be skipped.
|
||||||
|
For more information see <<ml-detector-custom-rule,detector custom rule objects>>. +
|
||||||
+
|
+
|
||||||
--
|
--
|
||||||
IMPORTANT: Field names are case sensitive, for example a field named 'Bytes'
|
IMPORTANT: Field names are case sensitive, for example a field named 'Bytes'
|
||||||
|
@ -270,9 +275,9 @@ is different from one named 'bytes'.
|
||||||
|
|
||||||
--
|
--
|
||||||
|
|
||||||
After you create a job, the only property you can change in the detector
|
After you create a job, the only properties you can change in the detector
|
||||||
configuration object is the `detector_description`; all other properties are
|
configuration object are the `detector_description` and the `custom_rules`;
|
||||||
informational.
|
all other properties are informational.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[ml-datadescription]]
|
[[ml-datadescription]]
|
||||||
|
@ -408,6 +413,64 @@ the categorization field value came from.
|
||||||
For more information, see
|
For more information, see
|
||||||
{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages].
|
{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages].
|
||||||
|
|
||||||
|
[float]
|
||||||
|
[[ml-detector-custom-rule]]
|
||||||
|
==== Detector Custom Rule
|
||||||
|
|
||||||
|
{stack-ov}/ml-rules.html[Custom rules] enable you to customize the way detectors
|
||||||
|
operate.
|
||||||
|
|
||||||
|
A rule has the following properties:
|
||||||
|
|
||||||
|
`actions`::
|
||||||
|
(array) The set of actions to be triggered when the rule applies.
|
||||||
|
If more than one action is specified the effects of all actions are combined.
|
||||||
|
The available actions include: +
|
||||||
|
`skip_result`::: The result will not be created. This is the default value.
|
||||||
|
Unless you also specify `skip_model_update`, the model will be updated as
|
||||||
|
usual with the corresponding series value.
|
||||||
|
`skip_model_update`::: The value for that series will not be used to update
|
||||||
|
the model. Unless you also specify `skip_result`, the results will be created
|
||||||
|
as usual. This action is suitable when certain values are expected to be
|
||||||
|
consistently anomalous and they affect the model in a way that negatively
|
||||||
|
impacts the rest of the results.
|
||||||
|
`scope`::
|
||||||
|
(object) An optional scope of series where the rule applies. By default the scope
|
||||||
|
includes all series. Scoping is allowed for any of the partition/by/over fields.
|
||||||
|
To add a scope for a field add the field name as a key in the scope object and
|
||||||
|
set its value to an object with properties:
|
||||||
|
`filter_id`::
|
||||||
|
(string) The id of the <<ml-filter-resource,filter>> to be used.
|
||||||
|
`filter_type`::
|
||||||
|
(string) Either `include` (the rule applies for values in the filter)
|
||||||
|
or `exclude` (the rule applies for values not in the filter). Defaults
|
||||||
|
to `include`.
|
||||||
|
|
||||||
|
`conditions`::
|
||||||
|
(array) An optional array of numeric conditions when the rule applies.
|
||||||
|
Multiple conditions are combined together with a logical `AND`.
|
||||||
|
+
|
||||||
|
--
|
||||||
|
NOTE: If your detector uses `lat_long`, `metric`, `rare`, or `freq_rare`
|
||||||
|
functions, you cannot specify `conditions` for your rule.
|
||||||
|
|
||||||
|
|
||||||
|
A condition has the following properties:
|
||||||
|
|
||||||
|
`applies_to`:::
|
||||||
|
(string) Specifies the result property to which the condition applies.
|
||||||
|
The available options are `actual`, `typical`, `diff_from_typical`, `time`.
|
||||||
|
`operator`:::
|
||||||
|
(string) Specifies the condition operator. The available options are
|
||||||
|
`gt` (greater than), `gte` (greater than or equals), `lt` (less than) and `lte` (less than or equals).
|
||||||
|
`value`:::
|
||||||
|
(double) The value that is compared against the `applied_to` field using the `operator`.
|
||||||
|
--
|
||||||
|
|
||||||
|
A rule is required to either have a non-empty scope or at least one condition.
|
||||||
|
For more examples see
|
||||||
|
{stack-ov}/ml-configuring-detector-custom-rules.html[Configuring Detector Custom Rules].
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[ml-apilimits]]
|
[[ml-apilimits]]
|
||||||
==== Analysis Limits
|
==== Analysis Limits
|
||||||
|
@ -448,8 +511,7 @@ Specifying a string is recommended for clarity. If you specify a byte size unit
|
||||||
of `b` or `kb` and the number does not equate to a discrete number of megabytes,
|
of `b` or `kb` and the number does not equate to a discrete number of megabytes,
|
||||||
it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you
|
it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you
|
||||||
specify a value less than 1 MiB, an error occurs. For more information about
|
specify a value less than 1 MiB, an error occurs. For more information about
|
||||||
supported byte size units, see
|
supported byte size units, see <<byte-units,Common options>>.
|
||||||
{ref}/common-options.html#byte-units[Byte size units].
|
|
||||||
|
|
||||||
If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit`
|
If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit`
|
||||||
setting, an error occurs when you try to create jobs that have
|
setting, an error occurs when you try to create jobs that have
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
[role="xpack"]
|
||||||
|
[[ml-put-filter]]
|
||||||
|
=== Create Filter API
|
||||||
|
++++
|
||||||
|
<titleabbrev>Create Filter</titleabbrev>
|
||||||
|
++++
|
||||||
|
|
||||||
|
Instantiates a filter.
|
||||||
|
|
||||||
|
==== Request
|
||||||
|
|
||||||
|
`PUT _xpack/ml/filters/<filter_id>`
|
||||||
|
|
||||||
|
===== Description
|
||||||
|
|
||||||
|
A {stack-ov}/ml-rules.html[filter] contains a list of strings.
|
||||||
|
It can be used by one or more jobs. Specifically, filters are referenced in
|
||||||
|
the `custom_rules` property of <<ml-detectorconfig,detector configuration objects>>.
|
||||||
|
|
||||||
|
==== Path Parameters
|
||||||
|
|
||||||
|
`filter_id` (required)::
|
||||||
|
(string) Identifier for the filter.
|
||||||
|
|
||||||
|
|
||||||
|
==== Request Body
|
||||||
|
|
||||||
|
`description`::
|
||||||
|
(string) A description of the filter.
|
||||||
|
|
||||||
|
`items`::
|
||||||
|
(array of strings) The items of the filter.
|
||||||
|
A wildcard `*` can be used at the beginning
|
||||||
|
or the end of an item. Up to 10000 items
|
||||||
|
are allowed in each filter.
|
||||||
|
|
||||||
|
|
||||||
|
==== Authorization
|
||||||
|
|
||||||
|
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||||
|
For more information, see
|
||||||
|
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||||
|
|
||||||
|
|
||||||
|
==== Examples
|
||||||
|
|
||||||
|
The following example creates the `safe_domains` filter:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
PUT _xpack/ml/filters/safe_domains
|
||||||
|
{
|
||||||
|
"description": "A list of safe domains",
|
||||||
|
"items": ["*.google.com", "wikipedia.org"]
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
|
||||||
|
When the filter is created, you receive the following response:
|
||||||
|
[source,js]
|
||||||
|
----
|
||||||
|
{
|
||||||
|
"filter_id": "safe_domains",
|
||||||
|
"description": "A list of safe domains",
|
||||||
|
"items": ["*.google.com", "wikipedia.org"]
|
||||||
|
}
|
||||||
|
----
|
||||||
|
//TESTRESPONSE
|
|
@ -0,0 +1,67 @@
|
||||||
|
[role="xpack"]
|
||||||
|
[[ml-update-filter]]
|
||||||
|
=== Update Filter API
|
||||||
|
++++
|
||||||
|
<titleabbrev>Update Filter</titleabbrev>
|
||||||
|
++++
|
||||||
|
|
||||||
|
Updates the description of a filter, adds items, or removes items.
|
||||||
|
|
||||||
|
==== Request
|
||||||
|
|
||||||
|
`POST _xpack/ml/filters/<filter_id>/_update`
|
||||||
|
|
||||||
|
//==== Description
|
||||||
|
|
||||||
|
==== Path Parameters
|
||||||
|
|
||||||
|
`filter_id` (required)::
|
||||||
|
(string) Identifier for the filter.
|
||||||
|
|
||||||
|
|
||||||
|
==== Request Body
|
||||||
|
|
||||||
|
`description`::
|
||||||
|
(string) A description for the filter. See <<ml-filter-resource>>.
|
||||||
|
|
||||||
|
`add_items`::
|
||||||
|
(array of strings) The items to add to the filter.
|
||||||
|
|
||||||
|
`remove_items`::
|
||||||
|
(array of strings) The items to remove from the filter.
|
||||||
|
|
||||||
|
|
||||||
|
==== Authorization
|
||||||
|
|
||||||
|
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||||
|
For more information, see
|
||||||
|
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||||
|
|
||||||
|
|
||||||
|
==== Examples
|
||||||
|
|
||||||
|
You can change the description, add and remove items to the `safe_domains` filter as follows:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
POST _xpack/ml/filters/safe_domains/_update
|
||||||
|
{
|
||||||
|
"description": "Updated list of domains",
|
||||||
|
"add_items": ["*.myorg.com"],
|
||||||
|
"remove_items": ["wikipedia.org"]
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
// CONSOLE
|
||||||
|
// TEST[setup:ml_filter_safe_domains]
|
||||||
|
|
||||||
|
The API returns the following results:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----
|
||||||
|
{
|
||||||
|
"filter_id": "safe_domains",
|
||||||
|
"description": "Updated list of domains",
|
||||||
|
"items": ["*.google.com", "*.myorg.com"]
|
||||||
|
}
|
||||||
|
----
|
||||||
|
//TESTRESPONSE
|
|
@ -35,6 +35,8 @@ each periodic persistence of the model. See <<ml-job-resource>>. | Yes
|
||||||
|
|
||||||
|`description` |A description of the job. See <<ml-job-resource>>. | No
|
|`description` |A description of the job. See <<ml-job-resource>>. | No
|
||||||
|
|
||||||
|
|`detectors` |An array of <<ml-detector-update, detector update objects>>. | No
|
||||||
|
|
||||||
|`groups` |A list of job groups. See <<ml-job-resource>>. | No
|
|`groups` |A list of job groups. See <<ml-job-resource>>. | No
|
||||||
|
|
||||||
|`model_plot_config`: `enabled` |If true, enables calculation and storage of the
|
|`model_plot_config`: `enabled` |If true, enables calculation and storage of the
|
||||||
|
@ -58,12 +60,6 @@ if the job is open when you make the update, you must stop the data feed, close
|
||||||
the job, then restart the data feed and open the job for the changes to take
|
the job, then restart the data feed and open the job for the changes to take
|
||||||
effect.
|
effect.
|
||||||
|
|
||||||
//|`analysis_config`: `detectors`: `detector_index` | A unique identifier of the
|
|
||||||
//detector. Matches the order of detectors returned by
|
|
||||||
//<<ml-get-job,GET job>>, starting from 0. | No
|
|
||||||
//|`analysis_config`: `detectors`: `detector_description` |A description of the
|
|
||||||
//detector. See <<ml-analysisconfig>>. | No
|
|
||||||
|
|
||||||
[NOTE]
|
[NOTE]
|
||||||
--
|
--
|
||||||
* You can update the `analysis_limits` only while the job is closed.
|
* You can update the `analysis_limits` only while the job is closed.
|
||||||
|
@ -73,6 +69,21 @@ of `hard_limit`, this means that it was unable to process some data. You might
|
||||||
want to re-run this job with an increased `model_memory_limit`.
|
want to re-run this job with an increased `model_memory_limit`.
|
||||||
--
|
--
|
||||||
|
|
||||||
|
[[ml-detector-update]]
|
||||||
|
==== Detector Update Objects
|
||||||
|
|
||||||
|
A detector update object has the following properties:
|
||||||
|
|
||||||
|
`detector_index`::
|
||||||
|
(integer) The identifier of the detector to update.
|
||||||
|
|
||||||
|
`description`::
|
||||||
|
(string) The new description for the detector.
|
||||||
|
|
||||||
|
`custom_rules`::
|
||||||
|
(array) The new list of <<ml-detector-custom-rule, rules>> for the detector.
|
||||||
|
|
||||||
|
No other detector property can be updated.
|
||||||
|
|
||||||
==== Authorization
|
==== Authorization
|
||||||
|
|
||||||
|
|
|
@ -169,9 +169,9 @@ aggregation and the Slack action:
|
||||||
more information, see
|
more information, see
|
||||||
<<slack-dynamic-attachment, Using Attachments to Format Slack Messages>>.
|
<<slack-dynamic-attachment, Using Attachments to Format Slack Messages>>.
|
||||||
|
|
||||||
| `proxy.host` | no | - | The proxy host to use (only in combination with `proxy.port`)
|
| `proxy.host` | no | The proxy host to use (only in combination with `proxy.port`)
|
||||||
|
|
||||||
| `proxy.port` | no | - | The proxy port to use (only in combination with `proxy.host`)
|
| `proxy.port` | no | The proxy port to use (only in combination with `proxy.host`)
|
||||||
|======
|
|======
|
||||||
|
|
||||||
[[configuring-slack]]
|
[[configuring-slack]]
|
||||||
|
|
|
@ -80,6 +80,9 @@
|
||||||
},
|
},
|
||||||
"rule": {
|
"rule": {
|
||||||
"type": "keyword"
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"opaque_id": {
|
||||||
|
"type": "keyword"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -508,10 +508,12 @@ public class RollupResponseTranslationTests extends AggregatorTestCase {
|
||||||
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||||
ScriptService scriptService = mock(ScriptService.class);
|
ScriptService scriptService = mock(ScriptService.class);
|
||||||
InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(bigArrays, scriptService, true);
|
InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(bigArrays, scriptService, true);
|
||||||
Exception e = expectThrows(RuntimeException.class,
|
ClassCastException e = expectThrows(ClassCastException.class,
|
||||||
() -> RollupResponseTranslator.combineResponses(msearch, reduceContext));
|
() -> RollupResponseTranslator.combineResponses(msearch, reduceContext));
|
||||||
assertThat(e.getMessage(), equalTo("org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds " +
|
assertThat(e.getMessage(),
|
||||||
"cannot be cast to org.elasticsearch.search.aggregations.InternalMultiBucketAggregation"));
|
containsString("org.elasticsearch.search.aggregations.metrics.geobounds.InternalGeoBounds"));
|
||||||
|
assertThat(e.getMessage(),
|
||||||
|
containsString("org.elasticsearch.search.aggregations.InternalMultiBucketAggregation"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDateHisto() throws IOException {
|
public void testDateHisto() throws IOException {
|
||||||
|
|
|
@ -47,6 +47,7 @@ import org.elasticsearch.gateway.GatewayService;
|
||||||
import org.elasticsearch.node.Node;
|
import org.elasticsearch.node.Node;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
import org.elasticsearch.rest.RestRequest;
|
import org.elasticsearch.rest.RestRequest;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportMessage;
|
import org.elasticsearch.transport.TransportMessage;
|
||||||
import org.elasticsearch.xpack.core.XPackClientPlugin;
|
import org.elasticsearch.xpack.core.XPackClientPlugin;
|
||||||
|
@ -882,6 +883,12 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl
|
||||||
builder.field(Field.NODE_HOST_ADDRESS, nodeHostAddress);
|
builder.field(Field.NODE_HOST_ADDRESS, nodeHostAddress);
|
||||||
builder.field(Field.LAYER, layer);
|
builder.field(Field.LAYER, layer);
|
||||||
builder.field(Field.TYPE, type);
|
builder.field(Field.TYPE, type);
|
||||||
|
|
||||||
|
String opaqueId = threadPool.getThreadContext().getHeader(Task.X_OPAQUE_ID);
|
||||||
|
if (opaqueId != null) {
|
||||||
|
builder.field("opaque_id", opaqueId);
|
||||||
|
}
|
||||||
|
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,8 @@ import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplat
|
||||||
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
|
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
|
import org.elasticsearch.client.RequestOptions;
|
||||||
import org.elasticsearch.client.Response;
|
import org.elasticsearch.client.Response;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||||
|
@ -18,8 +20,10 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
import org.elasticsearch.common.network.NetworkModule;
|
import org.elasticsearch.common.network.NetworkModule;
|
||||||
import org.elasticsearch.common.settings.SecureString;
|
import org.elasticsearch.common.settings.SecureString;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.index.query.QueryBuilder;
|
||||||
import org.elasticsearch.index.query.QueryBuilders;
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.elasticsearch.test.TestCluster;
|
import org.elasticsearch.test.TestCluster;
|
||||||
import org.elasticsearch.xpack.core.XPackClientPlugin;
|
import org.elasticsearch.xpack.core.XPackClientPlugin;
|
||||||
|
@ -112,8 +116,53 @@ public class IndexAuditIT extends ESIntegTestCase {
|
||||||
UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()))));
|
UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray()))));
|
||||||
assertThat(response.getStatusLine().getStatusCode(), is(200));
|
assertThat(response.getStatusLine().getStatusCode(), is(200));
|
||||||
final AtomicReference<ClusterState> lastClusterState = new AtomicReference<>();
|
final AtomicReference<ClusterState> lastClusterState = new AtomicReference<>();
|
||||||
|
final boolean found = awaitSecurityAuditIndex(lastClusterState, QueryBuilders.matchQuery("principal", USER));
|
||||||
|
|
||||||
|
assertTrue("Did not find security audit index. Current cluster state:\n" + lastClusterState.get().toString(), found);
|
||||||
|
|
||||||
|
SearchResponse searchResponse = client().prepareSearch(".security_audit_log*").setQuery(
|
||||||
|
QueryBuilders.matchQuery("principal", USER)).get();
|
||||||
|
assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
|
||||||
|
assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("principal"), is(USER));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testAuditTrailTemplateIsRecreatedAfterDelete() throws Exception {
|
||||||
|
// this is already "tested" by the test framework since we wipe the templates before and after,
|
||||||
|
// but lets be explicit about the behavior
|
||||||
|
awaitIndexTemplateCreation();
|
||||||
|
|
||||||
|
// delete the template
|
||||||
|
DeleteIndexTemplateResponse deleteResponse = client().admin().indices()
|
||||||
|
.prepareDeleteTemplate(IndexAuditTrail.INDEX_TEMPLATE_NAME).execute().actionGet();
|
||||||
|
assertThat(deleteResponse.isAcknowledged(), is(true));
|
||||||
|
awaitIndexTemplateCreation();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testOpaqueIdWorking() throws Exception {
|
||||||
|
Request request = new Request("GET", "/");
|
||||||
|
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||||
|
options.addHeader(Task.X_OPAQUE_ID, "foo");
|
||||||
|
options.addHeader(UsernamePasswordToken.BASIC_AUTH_HEADER,
|
||||||
|
UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())));
|
||||||
|
request.setOptions(options);
|
||||||
|
Response response = getRestClient().performRequest(request);
|
||||||
|
assertThat(response.getStatusLine().getStatusCode(), is(200));
|
||||||
|
final AtomicReference<ClusterState> lastClusterState = new AtomicReference<>();
|
||||||
|
final boolean found = awaitSecurityAuditIndex(lastClusterState, QueryBuilders.matchQuery("opaque_id", "foo"));
|
||||||
|
|
||||||
|
assertTrue("Did not find security audit index. Current cluster state:\n" + lastClusterState.get().toString(), found);
|
||||||
|
|
||||||
|
SearchResponse searchResponse = client().prepareSearch(".security_audit_log*").setQuery(
|
||||||
|
QueryBuilders.matchQuery("opaque_id", "foo")).get();
|
||||||
|
assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
|
||||||
|
|
||||||
|
assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("opaque_id"), is("foo"));
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean awaitSecurityAuditIndex(AtomicReference<ClusterState> lastClusterState,
|
||||||
|
QueryBuilder query) throws InterruptedException {
|
||||||
final AtomicBoolean indexExists = new AtomicBoolean(false);
|
final AtomicBoolean indexExists = new AtomicBoolean(false);
|
||||||
final boolean found = awaitBusy(() -> {
|
return awaitBusy(() -> {
|
||||||
if (indexExists.get() == false) {
|
if (indexExists.get() == false) {
|
||||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||||
lastClusterState.set(state);
|
lastClusterState.set(state);
|
||||||
|
@ -138,28 +187,9 @@ public class IndexAuditIT extends ESIntegTestCase {
|
||||||
logger.info("refreshing audit indices");
|
logger.info("refreshing audit indices");
|
||||||
client().admin().indices().prepareRefresh(".security_audit_log*").get();
|
client().admin().indices().prepareRefresh(".security_audit_log*").get();
|
||||||
logger.info("refreshed audit indices");
|
logger.info("refreshed audit indices");
|
||||||
return client().prepareSearch(".security_audit_log*").setQuery(QueryBuilders.matchQuery("principal", USER))
|
return client().prepareSearch(".security_audit_log*").setQuery(query)
|
||||||
.get().getHits().getTotalHits() > 0;
|
.get().getHits().getTotalHits() > 0;
|
||||||
}, 60L, TimeUnit.SECONDS);
|
}, 60L, TimeUnit.SECONDS);
|
||||||
|
|
||||||
assertTrue("Did not find security audit index. Current cluster state:\n" + lastClusterState.get().toString(), found);
|
|
||||||
|
|
||||||
SearchResponse searchResponse = client().prepareSearch(".security_audit_log*").setQuery(
|
|
||||||
QueryBuilders.matchQuery("principal", USER)).get();
|
|
||||||
assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
|
|
||||||
assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("principal"), is(USER));
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testAuditTrailTemplateIsRecreatedAfterDelete() throws Exception {
|
|
||||||
// this is already "tested" by the test framework since we wipe the templates before and after,
|
|
||||||
// but lets be explicit about the behavior
|
|
||||||
awaitIndexTemplateCreation();
|
|
||||||
|
|
||||||
// delete the template
|
|
||||||
DeleteIndexTemplateResponse deleteResponse = client().admin().indices()
|
|
||||||
.prepareDeleteTemplate(IndexAuditTrail.INDEX_TEMPLATE_NAME).execute().actionGet();
|
|
||||||
assertThat(deleteResponse.isAcknowledged(), is(true));
|
|
||||||
awaitIndexTemplateCreation();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void awaitIndexTemplateCreation() throws InterruptedException {
|
private void awaitIndexTemplateCreation() throws InterruptedException {
|
||||||
|
|
Loading…
Reference in New Issue