Merge branch 'master' into feature/sql
Original commit: elastic/x-pack-elasticsearch@decda51b4f
|
@ -94,3 +94,40 @@ gradle clean assemble
|
|||
-----
|
||||
gradle clean install
|
||||
-----
|
||||
|
||||
= Building documentation
|
||||
|
||||
The source files in this repository can be included in either the X-Pack Reference or the Elasticsearch Reference.
|
||||
|
||||
NOTE: In 5.5 and later, the Elasticsearch Reference includes X-Pack-specific content when it is built from this repo.
|
||||
|
||||
To build the Elasticsearch Reference on your local machine:
|
||||
|
||||
* Use the `index.asciidoc` file in the docs/en directory.
|
||||
* Specify the location of the `elasticsearch/docs` directory with the `--resource` option when you run `build_docs.pl`.
|
||||
|
||||
For example:
|
||||
|
||||
[source, txt]
|
||||
-----
|
||||
./docs/build_docs.pl --doc elasticsearch-extra/x-pack-elasticsearch/docs/en/index.asciidoc
|
||||
--resource=elasticsearch/docs --chunk 1
|
||||
-----
|
||||
|
||||
For information about building the X-Pack Reference, see the README in the x-pack repo.
|
||||
|
||||
== Adding Images
|
||||
|
||||
When you include an image in the documentation, specify the path relative to the location of the asciidoc file. By convention, we put images in an `images` subdirectory.
|
||||
|
||||
For example to insert `watcher-ui-edit-watch.png` in `watcher/limitations.asciidoc`:
|
||||
|
||||
. Add an `images` subdirectory to the watcher directory if it doesn't already exist.
|
||||
. In `limitations.asciidoc` specify:
|
||||
+
|
||||
[source, txt]
|
||||
-----
|
||||
image::images/watcher-ui-edit-watch.png["Editing a watch"]
|
||||
-----
|
||||
|
||||
Please note that image names and anchor IDs must be unique within the book, so do not use generic identifiers.
|
||||
|
|
14
build.gradle
|
@ -44,8 +44,18 @@ subprojects {
|
|||
additionalLicense 'ESCON', 'Elasticsearch Confidential', 'ELASTICSEARCH CONFIDENTIAL'
|
||||
}
|
||||
ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-api:${version}": ':x-pack-elasticsearch:plugin']
|
||||
|
||||
if (wireCompatVersions[-1].snapshot) {
|
||||
ext.projectSubstitutions += [
|
||||
"org.elasticsearch.plugin:x-pack:${wireCompatVersions[-1]}": ':x-pack-elasticsearch:plugin:bwc-zip']
|
||||
/* The last and second to last versions can be snapshots. Rather than use
|
||||
* snapshots built by CI we connect these versions to projects that build
|
||||
* those versions from the HEAD of the appropriate branch. */
|
||||
if (indexCompatVersions[-1].bugfix == 0) {
|
||||
ext.projectSubstitutions += [
|
||||
"org.elasticsearch.plugin:x-pack:${indexCompatVersions[-1]}": ':x-pack-elasticsearch:plugin:bwc:stable-snapshot',
|
||||
"org.elasticsearch.plugin:x-pack:${indexCompatVersions[-2]}": ':x-pack-elasticsearch:plugin:bwc:release-snapshot']
|
||||
} else {
|
||||
ext.projectSubstitutions += [
|
||||
"org.elasticsearch.plugin:x-pack:${indexCompatVersions[-1]}": ':x-pack-elasticsearch:plugin:bwc:release-snapshot']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,6 @@ buildRestTests.expectedUnconvertedCandidates = [
|
|||
'en/watcher/troubleshooting.asciidoc',
|
||||
'en/ml/api-quickref.asciidoc',
|
||||
'en/rest-api/ml/close-job.asciidoc',
|
||||
'en/rest-api/ml/datafeedresource.asciidoc',
|
||||
'en/rest-api/ml/delete-datafeed.asciidoc',
|
||||
'en/rest-api/ml/delete-snapshot.asciidoc',
|
||||
'en/rest-api/ml/flush-job.asciidoc',
|
||||
|
@ -158,7 +157,6 @@ buildRestTests.docs = fileTree(projectDir) {
|
|||
// That is where the snippets go, not where they come from!
|
||||
exclude 'build'
|
||||
// These file simply doesn't pass yet. We should figure out how to fix them.
|
||||
exclude 'en/rest-api/watcher/ack-watch.asciidoc'
|
||||
exclude 'en/watcher/reference/actions.asciidoc'
|
||||
exclude 'en/rest-api/graph/explore.asciidoc'
|
||||
}
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
[[elasticsearch-reference]]
|
||||
= Elasticsearch Reference
|
||||
|
||||
:include-xpack: true
|
||||
|
||||
:xes-repo-dir: {docdir}
|
||||
:es-repo-dir: {docdir}/../../../../elasticsearch/docs
|
||||
:es-test-dir: {docdir}/../../../../elasticsearch/docs/src/test
|
||||
:plugins-examples-dir: {docdir}/../../../../elasticsearch/plugins/examples
|
||||
|
||||
:security: X-Pack security
|
||||
:monitoring: X-Pack monitoring
|
||||
:watcher: Watcher
|
||||
:reporting: X-Pack reporting
|
||||
:graph: X-Pack graph
|
||||
:searchprofiler: X-Pack search profiler
|
||||
:xpackml: X-Pack machine learning
|
||||
:ml: machine learning
|
||||
:dfeed: datafeed
|
||||
:dfeeds: datafeeds
|
||||
:dfeed-cap: Datafeed
|
||||
:dfeeds-cap: Datafeeds
|
||||
|
||||
include::{es-repo-dir}/Versions.asciidoc[]
|
||||
include::{es-repo-dir}/reference/index-shared1.asciidoc[]
|
||||
|
||||
ifdef::include-xpack[]
|
||||
:edit_url!:
|
||||
include::setup-xes.asciidoc[]
|
||||
endif::include-xpack[]
|
||||
|
||||
:edit_url:
|
||||
include::{es-repo-dir}/reference/index-shared2.asciidoc[]
|
||||
|
||||
ifdef::include-xpack[]
|
||||
:edit_url!:
|
||||
include::rest-api/index.asciidoc[]
|
||||
endif::include-xpack[]
|
||||
|
||||
:edit_url:
|
||||
include::{es-repo-dir}/reference/index-shared3.asciidoc[]
|
|
@ -0,0 +1,175 @@
|
|||
[role="xpack"]
|
||||
[[installing-xpack-es]]
|
||||
== Installing X-Pack
|
||||
|
||||
After you install {es}, you can optionally obtain and install {xpack}.
|
||||
For more information about how to obtain {xpack},
|
||||
see https://www.elastic.co/products/x-pack.
|
||||
|
||||
You must run the version of {xpack} that matches the version of {es} you are running.
|
||||
|
||||
IMPORTANT: If you are installing {xpack} for the first time on an existing
|
||||
cluster, you must perform a full cluster restart. Installing {xpack} enables
|
||||
security and security must be enabled on ALL nodes in a cluster for the cluster
|
||||
to operate correctly. When upgrading you can usually perform
|
||||
a {ref}/rolling-upgrades.html[rolling upgrade].
|
||||
|
||||
To install {xpack} in {es}:
|
||||
|
||||
. Run `bin/elasticsearch-plugin install` from `ES_HOME` on each node in your cluster:
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
----------------------------------------------------------
|
||||
bin/elasticsearch-plugin install x-pack
|
||||
----------------------------------------------------------
|
||||
|
||||
NOTE: If you are using a <<xpack-package-installation, DEB/RPM distribution>>
|
||||
of {es}, run the installation with superuser permissions. To
|
||||
perform an offline installation, <<xpack-installing-offline, download the X-Pack binaries>>.
|
||||
|
||||
--
|
||||
|
||||
. Confirm that you want to grant {xpack} additional permissions.
|
||||
+
|
||||
--
|
||||
TIP: Specify the `--batch` option when running the install command to
|
||||
automatically grant these permissions and bypass these install prompts.
|
||||
|
||||
--
|
||||
+
|
||||
.. {xpack} needs these permissions to set the threat context loader during
|
||||
install so {watcher} can send email notifications.
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
----------------------------------------------------------
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@ WARNING: plugin requires additional permissions @
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
* java.lang.RuntimePermission accessClassInPackage.com.sun.activation.registries
|
||||
* java.lang.RuntimePermission getClassLoader
|
||||
* java.lang.RuntimePermission setContextClassLoader
|
||||
* java.lang.RuntimePermission setFactory
|
||||
* java.security.SecurityPermission createPolicy.JavaPolicy
|
||||
* java.security.SecurityPermission getPolicy
|
||||
* java.security.SecurityPermission putProviderProperty.BC
|
||||
* java.security.SecurityPermission setPolicy
|
||||
* java.util.PropertyPermission * read,write
|
||||
* java.util.PropertyPermission sun.nio.ch.bugLevel write
|
||||
* javax.net.ssl.SSLPermission setHostnameVerifier
|
||||
See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html
|
||||
for descriptions of what these permissions allow and the associated risks.
|
||||
|
||||
Continue with installation? [y/N]y
|
||||
----------------------------------------------------------
|
||||
--
|
||||
.. {xpack} requires permissions to enable {es} to launch the {ml} analytical
|
||||
engine. The native controller ensures that the launched process is a valid
|
||||
{ml} component. Once launched, communications between the {ml} processes and
|
||||
{es} are limited to the operating system user that {es} runs as.
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
----------------------------------------------------------
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
@ WARNING: plugin forks a native controller @
|
||||
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
|
||||
This plugin launches a native controller that is not subject to
|
||||
the Java security manager nor to system call filters.
|
||||
|
||||
Continue with installation? [y/N]y
|
||||
----------------------------------------------------------
|
||||
--
|
||||
|
||||
. If you have disabled automatic index creation in {es}, configure
|
||||
{ref}/docs-index_.html#index-creation[`action.auto_create_index`] in
|
||||
`elasticsearch.yml` to allow {xpack} to create the following indices:
|
||||
+
|
||||
--
|
||||
[source,yaml]
|
||||
-----------------------------------------------------------
|
||||
action.auto_create_index: .security,.monitoring*,.watches,.triggered_watches,.watcher-history*
|
||||
-----------------------------------------------------------
|
||||
--
|
||||
|
||||
. Start {es}.
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
----------------------------------------------------------
|
||||
bin/elasticsearch
|
||||
----------------------------------------------------------
|
||||
--
|
||||
|
||||
For information about installing {xpack} in {kib} and Logstash, see
|
||||
{xpack-ref}/installing-xpack.html[Installing {xpack}].
|
||||
|
||||
[IMPORTANT]
|
||||
=============================================================================
|
||||
SSL/TLS encryption is disabled by default, which means user credentials are
|
||||
passed in the clear. **Do not deploy to production without enabling encryption!**
|
||||
For more information, see {xpack-ref}/encrypting-communications.html[Encrypting
|
||||
Communications].
|
||||
|
||||
You must also **change the passwords for the built-in `elastic` user and the
|
||||
`kibana` user that enables {kib} to communicate with {es} before
|
||||
deploying to production**. For more information,
|
||||
see {xpack-ref}/setting-up-authentication.html[Setting Up User Authentication].
|
||||
=============================================================================
|
||||
|
||||
[float]
|
||||
[[xpack-package-installation]]
|
||||
=== Installing {xpack} on a DEB/RPM Package Installation
|
||||
|
||||
If you use the DEB/RPM packages to install {es}, by default {es} is installed
|
||||
in `/usr/share/elasticsearch` and the configuration files are stored
|
||||
in `/etc/elasticsearch`. (For the complete list of default paths, see
|
||||
{ref}/deb.html#deb-layout[Debian Directory Layout] and
|
||||
{ref}/rpm.html#rpm-layout[RPM Directory Layout] in the {es} Reference.)
|
||||
|
||||
To install {xpack} on a DEB/RPM package installation, you need to run
|
||||
`bin/plugin install` from the `/usr/share/elasticsearch` directory with superuser
|
||||
permissions:
|
||||
|
||||
[source,shell]
|
||||
----------------------------------------------------------
|
||||
cd /usr/share/elasticsearch
|
||||
sudo bin/elasticsearch-plugin install x-pack
|
||||
----------------------------------------------------------
|
||||
|
||||
NOTE: If the configuration files are not in `/etc/elasticsearch` you
|
||||
need to specify the location of the configuration files by
|
||||
setting the system property `es.path.conf` to the config path via
|
||||
`ES_JAVA_OPTS="-Des.path.conf=<path>"` or by setting the
|
||||
environment variable `CONF_DIR` via `CONF_DIR=<path>`.
|
||||
|
||||
[float]
|
||||
[[xpack-installing-offline]]
|
||||
=== Installing {xpack} on Offline Machines
|
||||
The plugin install scripts require direct Internet access to download and
|
||||
install {xpack}. If your server doesn’t have Internet access, you
|
||||
can manually download and install {xpack}.
|
||||
|
||||
To install {xpack} on a machine that doesn't have Internet access:
|
||||
|
||||
. Manually download the {xpack} zip file:
|
||||
https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{version}.zip[
|
||||
+https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{version}.zip+]
|
||||
(https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{version}.zip.sha1[sha1])
|
||||
|
||||
. Transfer the zip file to a temporary directory on the offline machine. (Do NOT
|
||||
put the file in the {es} plugins directory.)
|
||||
|
||||
. Run `bin/elasticsearch-plugin install` from the {es} install directory
|
||||
and specify the location of the {xpack} zip file. For example:
|
||||
+
|
||||
--
|
||||
["source","sh",subs="attributes"]
|
||||
----------------------------------------------------------
|
||||
bin/elasticsearch-plugin install file:///path/to/file/x-pack-{version}.zip
|
||||
----------------------------------------------------------
|
||||
|
||||
NOTE: You must specify an absolute path to the zip file after the `file://` protocol.
|
||||
|
||||
--
|
|
@ -14,72 +14,63 @@ The main {ml} resources can be accessed with a variety of endpoints:
|
|||
* <<ml-api-datafeeds,+/datafeeds/+>>: Select data from {es} to be analyzed
|
||||
* <<ml-api-results,+/results/+>>: Access the results of a {ml} job
|
||||
* <<ml-api-snapshots,+/model_snapshots/+>>: Manage model snapshots
|
||||
* <<ml-api-validate,+/validate/+>>: Validate subsections of job configurations
|
||||
//* <<ml-api-validate,+/validate/+>>: Validate subsections of job configurations
|
||||
|
||||
[float]
|
||||
[[ml-api-jobs]]
|
||||
=== /anomaly_detectors/
|
||||
|
||||
* <<ml-put-job,POST /anomaly_detectors>>: Create a job
|
||||
* <<ml-open-job,POST /anomaly_detectors/<job_id>/_open>>: Open a job
|
||||
* <<ml-post-data,POST /anomaly_detectors/<job_id>/_data>>: Send data to a job
|
||||
* <<ml-get-job,GET /anomaly_detectors>>: List jobs
|
||||
* <<ml-get-job,GET /anomaly_detectors/<job_id+++>+++>>: Get job details
|
||||
* <<ml-get-job-stats,GET /anomaly_detectors/<job_id>/_stats>>: Get job statistics
|
||||
* <<ml-update-job,POST /anomaly_detectors/<job_id>/_update>>: Update certain properties of the job configuration
|
||||
* <<ml-flush-job,POST anomaly_detectors/<job_id>/_flush>>: Force a job to analyze buffered data
|
||||
* <<ml-close-job,POST /anomaly_detectors/<job_id>/_close>>: Close a job
|
||||
* <<ml-delete-job,DELETE /anomaly_detectors/<job_id+++>+++>>: Delete a job
|
||||
* {ref}/ml-put-job.html[PUT /anomaly_detectors/<job_id+++>+++]: Create a job
|
||||
* {ref}/ml-open-job.html[POST /anomaly_detectors/<job_id>/_open]: Open a job
|
||||
* {ref}/ml-post-data.html[POST /anomaly_detectors/<job_id>/_data]: Send data to a job
|
||||
* {ref}/ml-get-job.html[GET /anomaly_detectors]: List jobs
|
||||
* {ref}/ml-get-job.html[GET /anomaly_detectors/<job_id+++>+++]: Get job details
|
||||
* {ref}/ml-get-job-stats.html[GET /anomaly_detectors/<job_id>/_stats]: Get job statistics
|
||||
* {ref}/ml-update-job.html[POST /anomaly_detectors/<job_id>/_update]: Update certain properties of the job configuration
|
||||
* {ref}/ml-flush-job.html[POST anomaly_detectors/<job_id>/_flush]: Force a job to analyze buffered data
|
||||
* {ref}/ml-close-job.html[POST /anomaly_detectors/<job_id>/_close]: Close a job
|
||||
* {ref}/ml-delete-job.html[DELETE /anomaly_detectors/<job_id+++>+++]: Delete a job
|
||||
|
||||
[float]
|
||||
[[ml-api-datafeeds]]
|
||||
=== /datafeeds/
|
||||
|
||||
* <<ml-put-datafeed,PUT /datafeeds/<datafeed_id+++>+++>>: Create a {dfeed}
|
||||
* <<ml-start-datafeed,POST /datafeeds/<datafeed_id>/_start>>: Start a {dfeed}
|
||||
* <<ml-get-datafeed,GET /datafeeds>>: List {dfeeds}
|
||||
* <<ml-get-datafeed,GET /datafeeds/<datafeed_id+++>+++>>: Get {dfeed} details
|
||||
* <<ml-get-datafeed-stats,GET /datafeeds/<datafeed_id>/_stats>>: Get statistical information for {dfeeds}
|
||||
* <<ml-preview-datafeed,GET /datafeeds/<datafeed_id>/_preview>>: Get a preview of a {dfeed}
|
||||
* <<ml-update-datafeed,POST /datafeeds/<datafeedid>/_update>>: Update certain settings for a {dfeed}
|
||||
* <<ml-stop-datafeed,POST /datafeeds/<datafeed_id>/_stop>>: Stop a {dfeed}
|
||||
* <<ml-delete-datafeed,DELETE /datafeeds/<datafeed_id+++>+++>>: Delete {dfeed}
|
||||
* {ref}/ml-put-datafeed.html[PUT /datafeeds/<datafeed_id+++>+++]: Create a {dfeed}
|
||||
* {ref}/ml-start-datafeed.html[POST /datafeeds/<datafeed_id>/_start]: Start a {dfeed}
|
||||
* {ref}/ml-get-datafeed.html[GET /datafeeds]: List {dfeeds}
|
||||
* {ref}/ml-get-datafeed.html[GET /datafeeds/<datafeed_id+++>+++]: Get {dfeed} details
|
||||
* {ref}/ml-get-datafeed-stats.html[GET /datafeeds/<datafeed_id>/_stats]: Get statistical information for {dfeeds}
|
||||
* {ref}/ml-preview-datafeed.html[GET /datafeeds/<datafeed_id>/_preview]: Get a preview of a {dfeed}
|
||||
* {ref}/ml-update-datafeed.html[POST /datafeeds/<datafeedid>/_update]: Update certain settings for a {dfeed}
|
||||
* {ref}/ml-stop-datafeed.html[POST /datafeeds/<datafeed_id>/_stop]: Stop a {dfeed}
|
||||
* {ref}/ml-delete-datafeed.html[DELETE /datafeeds/<datafeed_id+++>+++]: Delete {dfeed}
|
||||
|
||||
[float]
|
||||
[[ml-api-results]]
|
||||
=== /results/
|
||||
|
||||
* <<ml-get-bucket,GET /results/buckets>>: List the buckets in the results
|
||||
* <<ml-get-bucket,GET /results/buckets/<bucket_id+++>+++>>: Get bucket details
|
||||
* <<ml-get-category,GET /results/categories>>: List the categories in the results
|
||||
* <<ml-get-category,GET /results/categories/<category_id+++>+++>>: Get category details
|
||||
* <<ml-get-influencer,GET /results/influencers>>: Get influencer details
|
||||
* <<ml-get-record,GET /results/records>>: Get records from the results
|
||||
* {ref}/ml-get-bucket.html[GET /results/buckets]: List the buckets in the results
|
||||
* {ref}/ml-get-bucket.html[GET /results/buckets/<bucket_id+++>+++]: Get bucket details
|
||||
* {ref}/ml-get-category.html[GET /results/categories]: List the categories in the results
|
||||
* {ref}/ml-get-category.html[GET /results/categories/<category_id+++>+++]: Get category details
|
||||
* {ref}/ml-get-influencer.html[GET /results/influencers]: Get influencer details
|
||||
* {ref}/ml-get-record.html[GET /results/records]: Get records from the results
|
||||
|
||||
[float]
|
||||
[[ml-api-snapshots]]
|
||||
=== /model_snapshots/
|
||||
|
||||
* <<ml-get-snapshot,GET /model_snapshots>>: List model snapshots
|
||||
* <<ml-get-snapshot,GET /model_snapshots/<snapshot_id+++>+++>>: Get model snapshot details
|
||||
* <<ml-revert-snapshot,POST /model_snapshots/<snapshot_id>/_revert>>: Revert a model snapshot
|
||||
* <<ml-update-snapshot,POST /model_snapshots/<snapshot_id>/_update>>: Update certain settings for a model snapshot
|
||||
* <<ml-delete-snapshot,DELETE /model_snapshots/<snapshot_id+++>+++>>: Delete a model snapshot
|
||||
* {ref}/ml-get-snapshot.html[GET /model_snapshots]: List model snapshots
|
||||
* {ref}/ml-get-snapshot.html[GET /model_snapshots/<snapshot_id+++>+++]: Get model snapshot details
|
||||
* {ref}/ml-revert-snapshot.html[POST /model_snapshots/<snapshot_id>/_revert]: Revert a model snapshot
|
||||
* {ref}/ml-update-snapshot.html[POST /model_snapshots/<snapshot_id>/_update]: Update certain settings for a model snapshot
|
||||
* {ref}/ml-delete-snapshot.html[DELETE /model_snapshots/<snapshot_id+++>+++]: Delete a model snapshot
|
||||
|
||||
////
|
||||
[float]
|
||||
[[ml-api-validate]]
|
||||
=== /validate/
|
||||
|
||||
* <<ml-valid-detector,POST /anomaly_detectors/_validate/detector>>: Validate a detector
|
||||
* <<ml-valid-job, POST /anomaly_detectors/_validate>>: Validate a job
|
||||
//[float]
|
||||
//== Where to Go Next
|
||||
|
||||
//<<ml-getting-started, Getting Started>> :: Enable machine learning and start
|
||||
//discovering anomalies in your data.
|
||||
|
||||
//[float]
|
||||
//== Have Comments, Questions, or Feedback?
|
||||
|
||||
//Head over to our {forum}[Graph Discussion Forum] to share your experience, questions, and
|
||||
//suggestions.
|
||||
* {ref}/ml-valid-detector.html[POST /anomaly_detectors/_validate/detector]: Validate a detector
|
||||
* {ref}/ml-valid-job.html[POST /anomaly_detectors/_validate]: Validate a job
|
||||
////
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
[[ml-configuring-categories]]
|
||||
=== Categorizing log messages
|
||||
|
||||
Application log events are often unstructured and contain variable data. For
|
||||
example:
|
||||
//Obtained from it_ops_new_app_logs.json
|
||||
[source,js]
|
||||
----------------------------------
|
||||
{"time":1454516381000,"message":"org.jdbi.v2.exceptions.UnableToExecuteStatementException: com.mysql.jdbc.exceptions.MySQLTimeoutException: Statement cancelled due to timeout or client request [statement:\"SELECT id, customer_id, name, force_disabled, enabled FROM customers\"]","type":"logs"}
|
||||
----------------------------------
|
||||
//NOTCONSOLE
|
||||
|
||||
You can use {ml} to observe the static parts of the message, cluster similar
|
||||
messages together, and classify them into message categories. The {ml} model
|
||||
learns what volume and pattern is normal for each category over time. You can
|
||||
then detect anomalies and surface rare events or unusual types of messages by
|
||||
using count or rare functions. For example:
|
||||
|
||||
//Obtained from it_ops_new_app_logs.sh
|
||||
[source,js]
|
||||
----------------------------------
|
||||
PUT _xpack/ml/anomaly_detectors/it_ops_new_logs
|
||||
{
|
||||
"description" : "IT Ops Application Logs",
|
||||
"analysis_config" : {
|
||||
"categorization_field_name": "message", <1>
|
||||
"bucket_span":"30m",
|
||||
"detectors" :[{
|
||||
"function":"count",
|
||||
"by_field_name": "mlcategory", <2>
|
||||
"detector_description": "Unusual message counts"
|
||||
}],
|
||||
"categorization_filters":[ "\\[statement:.*\\]"]
|
||||
},
|
||||
"analysis_limits":{
|
||||
"categorization_examples_limit": 5
|
||||
},
|
||||
"data_description" : {
|
||||
"time_field":"time",
|
||||
"time_format": "epoch_ms"
|
||||
}
|
||||
}
|
||||
----------------------------------
|
||||
//CONSOLE
|
||||
<1> The `categorization_field_name` property indicates which field will be
|
||||
categorized.
|
||||
<2> The resulting categories can be used in a detector by setting `by_field_name`,
|
||||
`over_field_name`, or `partition_field_name` to the keyword `mlcategory`.
|
||||
|
||||
The optional `categorization_examples_limit` property specifies the
|
||||
maximum number of examples that are stored in memory and in the results data
|
||||
store for each category. The default value is `4`. Note that this setting does
|
||||
not affect the categorization; it just affects the list of visible examples. If
|
||||
you increase this value, more examples are available, but you must have more
|
||||
storage available. If you set this value to `0`, no examples are stored.
|
||||
|
||||
The optional `categorization_filters` property can contain an array of regular
|
||||
expressions. If a categorization field value matches the regular expression, the
|
||||
portion of the field that is matched is not taken into consideration when
|
||||
defining categories. The categorization filters are applied in the order they
|
||||
are listed in the job configuration, which allows you to disregard multiple
|
||||
sections of the categorization field value. In this example, we have decided that
|
||||
we do not want the detailed SQL to be considered in the message categorization.
|
||||
This particular categorization filter removes the SQL statement from the categorization
|
||||
algorithm.
|
||||
|
||||
If your data is stored in {es}, you can create an advanced job with these same
|
||||
properties:
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/ml-category-advanced.jpg["Advanced job configuration options related to categorization"]
|
||||
|
||||
NOTE: To add the `categorization_examples_limit` property, you must use the
|
||||
**Edit JSON** tab and copy the `analysis_limits` object from the API example.
|
||||
|
||||
|
||||
After you open the job and start the {dfeed} or supply data to the job, you can
|
||||
view the results in {kib}. For example:
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/ml-category-anomalies.jpg["Categorization example in the Anomaly Explorer"]
|
||||
|
||||
For this type of job, the **Anomaly Explorer** contains extra information for
|
||||
each anomaly: the name of the category (for example, `mlcategory 11`) and
|
||||
examples of the messages in that category. In this case, you can use these
|
||||
details to investigate occurrences of unusually high message counts for specific
|
||||
message categories.
|
|
@ -4,7 +4,7 @@
|
|||
If you want to use {xpackml} features, there must be at least one {ml} node in
|
||||
your cluster and all master-eligible nodes must have {ml} enabled. By default,
|
||||
when you install {xpack}, all nodes are {ml} nodes. For more information about
|
||||
these settings, see <<ml-settings>>.
|
||||
these settings, see <<xpack-settings>>.
|
||||
|
||||
To use the {xpackml} features to analyze your data, you must create a job and
|
||||
send your data to that job.
|
||||
|
@ -14,8 +14,8 @@ send your data to that job.
|
|||
** You can create a {dfeed}, which retrieves data from {es} for analysis.
|
||||
** You can use {kib} to expedite the creation of jobs and {dfeeds}.
|
||||
|
||||
* If your data is not stored in {es}, you can <<ml-post-data,POST data>> from any
|
||||
source directly to an API.
|
||||
* If your data is not stored in {es}, you can
|
||||
{ref}/ml-post-data.html[POST data] from any source directly to an API.
|
||||
|
||||
The results of {ml} analysis are stored in {es} and you can use {kib} to help
|
||||
you visualize and explore the results.
|
||||
|
@ -29,5 +29,11 @@ The scenarios in this section describe some best practices for generating useful
|
|||
{ml} results and insights from your data.
|
||||
|
||||
* <<ml-configuring-aggregation>>
|
||||
* <<ml-configuring-categories>>
|
||||
* <<ml-configuring-pop>>
|
||||
* <<ml-configuring-transform>>
|
||||
|
||||
include::aggregations.asciidoc[]
|
||||
include::categories.asciidoc[]
|
||||
include::populations.asciidoc[]
|
||||
include::transforms.asciidoc[]
|
||||
|
|
|
@ -1,47 +1,64 @@
|
|||
[float]
|
||||
[[ml-functions]]
|
||||
=== Analytical Functions
|
||||
== Function Reference
|
||||
|
||||
The {xpackml} features include analysis functions that provide a wide variety of
|
||||
flexible ways to analyze data for anomalies.
|
||||
|
||||
When you create jobs, you specify one or more detectors, which define the type of
|
||||
analysis that needs to be done. If you are creating your job by using {ml} APIs,
|
||||
you specify the functions in <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
you specify the functions in
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
If you are creating your job in {kib}, you specify the functions differently
|
||||
depending on whether you are creating single metric, multi-metric, or advanced
|
||||
jobs. For a demonstration of creating jobs in {kib}, see <<ml-getting-started>>.
|
||||
|
||||
//TBD: Determine what these fields are called in Kibana, for people who aren't using APIs
|
||||
////
|
||||
TBD: Integrate from prelert docs?:
|
||||
By default, temporal (time-based) analysis is invoked, unless you also specify an
|
||||
`over_field_name`, which shifts the analysis to be population- or peer-based.
|
||||
|
||||
When you specify `by_field_name` with a function, the analysis considers whether
|
||||
there is an anomaly for one of more specific values of `by_field_name`.
|
||||
|
||||
NOTE: Some functions cannot be used with a `by_field_name` or `over_field_name`.
|
||||
|
||||
You can specify a `partition_field_name` with any function. When this is used,
|
||||
the analysis is replicated for every distinct value of `partition_field_name`.
|
||||
|
||||
You can specify a `summary_count_field_name` with any function except metric.
|
||||
When you use `summary_count_field_name`, the {ml} features expect the input
|
||||
data to be pre-summarized. The value of the `summary_count_field_name` field
|
||||
must contain the count of raw events that were summarized.
|
||||
|
||||
Some functions can benefit from overlapping buckets. This improves the overall
|
||||
accuracy of the results but at the cost of a 2 bucket delay in seeing the results.
|
||||
////
|
||||
|
||||
Most functions detect anomalies in both low and high values. In statistical
|
||||
terminology, they apply a two-sided test. Some functions offer low and high
|
||||
variations (for example, `count`, `low_count`, and `high_count`). These variations
|
||||
apply one-sided tests, detecting anomalies only when the values are low or
|
||||
high, depending one which alternative is used.
|
||||
|
||||
//For some functions, you can optionally specify a field name in the
|
||||
//`by_field_name` property. The analysis then considers whether there is an
|
||||
//anomaly for one of more specific values of that field. In {kib}, use the
|
||||
//**Key Fields** field in multi-metric jobs or the **by_field_name** field in
|
||||
//advanced jobs.
|
||||
////
|
||||
TODO: Per Sophie, "This is incorrect... Split Data refers to a partition_field_name. Over fields can only be added in Adv Config...
|
||||
|
||||
Can you please remove the explanations for by/over/partition fields from the documentation for analytical functions. It's a complex topic and will be easier to review in a separate exercise."
|
||||
////
|
||||
|
||||
//For some functions, you can also optionally specify a field name in the
|
||||
//`over_field_name` property. This property shifts the analysis to be population-
|
||||
//or peer-based and uses the field to split the data. In {kib}, use the
|
||||
//**Split Data** field in multi-metric jobs or the **over_field_name** field in
|
||||
//advanced jobs.
|
||||
|
||||
//You can specify a `partition_field_name` with any function. The analysis is then
|
||||
//segmented with completely independent baselines for each value of that field.
|
||||
//In {kib}, use the **partition_field_name** field in advanced jobs.
|
||||
|
||||
You can specify a `summary_count_field_name` with any function except `metric`.
|
||||
When you use `summary_count_field_name`, the {ml} features expect the input
|
||||
data to be pre-aggregated. The value of the `summary_count_field_name` field
|
||||
must contain the count of raw events that were summarized. In {kib}, use the
|
||||
**summary_count_field_name** in advanced jobs. Analyzing aggregated input data
|
||||
provides a significant boost in performance. For more information, see
|
||||
<<ml-configuring-aggregation>>.
|
||||
|
||||
If your data is sparse, there may be gaps in the data which means you might have
|
||||
empty buckets. You might want to treat these as anomalies or you might want these
|
||||
gaps to be ignored. Your decision depends on your use case and what is important
|
||||
to you. It also depends on which functions you use. The `sum` and `count`
|
||||
functions are strongly affected by empty buckets. For this reason, there are
|
||||
`non_null_sum` and `non_zero_count` functions, which are tolerant to sparse data.
|
||||
These functions effectively ignore empty buckets.
|
||||
|
||||
////
|
||||
Some functions can benefit from overlapping buckets. This improves the overall
|
||||
accuracy of the results but at the cost of a 2 bucket delay in seeing the results.
|
||||
|
||||
The table below provides a high-level summary of the analytical functions provided by the API. Each of the functions is described in detail over the following pages. Note the examples given in these pages use single Detector Configuration objects.
|
||||
////
|
||||
|
||||
|
@ -52,3 +69,11 @@ The table below provides a high-level summary of the analytical functions provid
|
|||
* <<ml-rare-functions>>
|
||||
* <<ml-sum-functions>>
|
||||
* <<ml-time-functions>>
|
||||
|
||||
include::functions/count.asciidoc[]
|
||||
include::functions/geo.asciidoc[]
|
||||
include::functions/info.asciidoc[]
|
||||
include::functions/metric.asciidoc[]
|
||||
include::functions/rare.asciidoc[]
|
||||
include::functions/sum.asciidoc[]
|
||||
include::functions/time.asciidoc[]
|
||||
|
|
|
@ -1,13 +1,7 @@
|
|||
[[ml-count-functions]]
|
||||
=== Count Functions
|
||||
|
||||
The {xpackml} features include the following count functions:
|
||||
|
||||
* `count`, `high_count`, `low_count`
|
||||
* `non_zero_count`, `high_non_zero_count`, `low_non_zero_count`
|
||||
* `distinct_count`, `high_distinct_count`, `low_distinct_count`
|
||||
|
||||
Count functions detect anomalies when the count of events in a bucket is
|
||||
Count functions detect anomalies when the number of events in a bucket is
|
||||
anomalous.
|
||||
|
||||
Use `non_zero_count` functions if your data is sparse and you want to ignore
|
||||
|
@ -19,111 +13,202 @@ in one field is unusual, as opposed to the total count.
|
|||
Use high-sided functions if you want to monitor unusually high event rates.
|
||||
Use low-sided functions if you want to look at drops in event rate.
|
||||
|
||||
The {xpackml} features include the following count functions:
|
||||
|
||||
////
|
||||
* <<ml-count>>
|
||||
* <<ml-high-count>>
|
||||
* <<ml-low-count>>
|
||||
* <<ml-nonzero-count>>
|
||||
* <<ml-high-nonzero-count>>
|
||||
* <<ml-low-nonzero-count>>
|
||||
* xref:ml-count[`count`, `high_count`, `low_count`]
|
||||
* xref:ml-nonzero-count[`non_zero_count`, `high_non_zero_count`, `low_non_zero_count`]
|
||||
* xref:ml-distinct-count[`distinct_count`, high_distinct_count`, `low_distinct_count`]
|
||||
|
||||
[float]
|
||||
[[ml-count]]
|
||||
===== Count
|
||||
===== Count, High_count, Low_count
|
||||
|
||||
The `count` function detects anomalies when the count of events in a bucket is
|
||||
The `count` function detects anomalies when the number of events in a bucket is
|
||||
anomalous.
|
||||
|
||||
* field_name: not applicable
|
||||
* by_field_name: optional
|
||||
* over_field_name: optional
|
||||
The `high_count` function detects anomalies when the count of events in a
|
||||
bucket are unusually high.
|
||||
|
||||
The `low_count` function detects anomalies when the count of events in a
|
||||
bucket are unusually low.
|
||||
|
||||
These functions support the following properties:
|
||||
|
||||
* `by_field_name` (optional)
|
||||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 1: Analyzing events with the count function
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{ "function" : "count" }
|
||||
--------------------------------------------------
|
||||
|
||||
This example is probably the simplest possible analysis! It identifies time
|
||||
buckets during which the overall count of events is higher or lower than usual.
|
||||
This example is probably the simplest possible analysis. It identifies
|
||||
time buckets during which the overall count of events is higher or lower than
|
||||
usual.
|
||||
|
||||
It models the event rate and detects when the event rate is unusual compared to
|
||||
the past.
|
||||
|
||||
[float]
|
||||
[[ml-high-count]]
|
||||
===== High_count
|
||||
|
||||
The `high_count` function detects anomalies when the count of events in a
|
||||
bucket are unusually high.
|
||||
|
||||
* field_name: not applicable
|
||||
* by_field_name: optional
|
||||
* over_field_name: optional
|
||||
When you use this function in a detector in your job, it models the event rate
|
||||
and detects when the event rate is unusual compared to its past behavior.
|
||||
|
||||
.Example 2: Analyzing errors with the high_count function
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{ "function" : "high_count", "byFieldName" : "error_code", "overFieldName": "user" }
|
||||
{
|
||||
"function" : "high_count",
|
||||
"by_field_name" : "error_code",
|
||||
"over_field_name": "user"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
This example models the event rate for each error code. It detects users that
|
||||
generate an unusually high count of error codes compared to other users.
|
||||
If you use this `high_count` function in a detector in your job, it
|
||||
models the event rate for each error code. It detects users that generate an
|
||||
unusually high count of error codes compared to other users.
|
||||
|
||||
[float]
|
||||
[[ml-low-count]]
|
||||
===== Low_count
|
||||
|
||||
The `low_count` function detects anomalies when the count of events in a
|
||||
bucket are unusually low.
|
||||
|
||||
* field_name: not applicable
|
||||
* by_field_name: optional
|
||||
* over_field_name: optional
|
||||
|
||||
.Example 3: Analyzing status codes with the low_count function
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{ "function" : "low_count", "byFieldName" : "status_code" }
|
||||
{
|
||||
"function" : "low_count",
|
||||
"by_field_name" : "status_code"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
In this example, there is a data stream that contains a field “status”. The
|
||||
function detects when the count of events for a given status code is lower than
|
||||
usual. It models the event rate for each status code and detects when a status
|
||||
code has an unusually low count compared to its past behavior.
|
||||
In this example, the function detects when the count of events for a
|
||||
status code is lower than usual.
|
||||
|
||||
If the data stream consists of web server access log records, for example,
|
||||
a drop in the count of events for a particular status code might be an indication
|
||||
that something isn’t working correctly.
|
||||
When you use this function in a detector in your job, it models the event rate
|
||||
for each status code and detects when a status code has an unusually low count
|
||||
compared to its past behavior.
|
||||
|
||||
.Example 4: Analyzing aggregated data with the count function
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"summary_count_field" : "events_per_min",
|
||||
"detectors" [
|
||||
{ "function" : "count" }
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
If you are analyzing an aggregated `events_per_min` field, do not use a sum
|
||||
function (for example, `sum(events_per_min)`). Instead, use the count function
|
||||
and the `summary_count_field` property.
|
||||
//TO-DO: For more information, see <<aggreggations.asciidoc>>.
|
||||
|
||||
[float]
|
||||
[[ml-nonzero-count]]
|
||||
===== Non_zero_count
|
||||
===== Non_zero_count, High_non_zero_count, Low_non_zero_count
|
||||
|
||||
The `non_zero_count` function detects anomalies when the number of events in a
|
||||
bucket is anomalous, but it ignores cases where the bucket count is zero. Use
|
||||
this function if you know your data is sparse or has gaps and the gaps are not
|
||||
important.
|
||||
|
||||
The `high_non_zero_count` function detects anomalies when the number of events
|
||||
in a bucket is unusually high and it ignores cases where the bucket count is
|
||||
zero.
|
||||
|
||||
The `low_non_zero_count` function detects anomalies when the number of events in
|
||||
a bucket is unusually low and it ignores cases where the bucket count is zero.
|
||||
|
||||
These functions support the following properties:
|
||||
|
||||
* `by_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
For example, if you have the following number of events per bucket:
|
||||
|
||||
========================================
|
||||
|
||||
1,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,43,31,0,0,0,0,0,0,0,0,0,0,0,0,2,1
|
||||
|
||||
========================================
|
||||
|
||||
The `non_zero_count` function models only the following data:
|
||||
|
||||
========================================
|
||||
|
||||
1,22,2,43,31,2,1
|
||||
|
||||
========================================
|
||||
|
||||
.Example 5: Analyzing signatures with the high_non_zero_count function
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"function" : "high_non_zero_count",
|
||||
"by_field_name" : "signaturename"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
If you use this `high_non_zero_count` function in a detector in your job, it
|
||||
models the count of events for the `signaturename` field. It ignores any buckets
|
||||
where the count is zero and detects when a `signaturename` value has an
|
||||
unusually high count of events compared to its past behavior.
|
||||
|
||||
NOTE: Population analysis (using an `over_field_name` property value) is not
|
||||
supported for the `non_zero_count`, `high_non_zero_count`, and
|
||||
`low_non_zero_count` functions. If you want to do population analysis and your
|
||||
data is sparse, use the `count` functions, which are optimized for that scenario.
|
||||
|
||||
non_zero_count:: count, but zeros are treated as null and ignored
|
||||
|
||||
[float]
|
||||
[[ml-high-nonzero-count]]
|
||||
===== High_non_zero_count
|
||||
[[ml-distinct-count]]
|
||||
===== Distinct_count, High_distinct_count, Low_distinct_count
|
||||
|
||||
high_non_zero_count::: count, but zeros are treated as null and ignored
|
||||
The `distinct_count` function detects anomalies where the number of distinct
|
||||
values in one field is unusual.
|
||||
|
||||
[float]
|
||||
[[ml-low-nonzero-count]]
|
||||
===== Low_non_zero_count
|
||||
The `high_distinct_count` function detects unusually high numbers of distinct
|
||||
values in one field.
|
||||
|
||||
low_non_zero_count::: count, but zeros are treated as null and ignored
|
||||
The `low_distinct_count` function detects unusually low numbers of distinct
|
||||
values in one field.
|
||||
|
||||
[float]
|
||||
[[ml-low-count]]
|
||||
===== Low_count
|
||||
distinct_count:: distinct count
|
||||
These functions support the following properties:
|
||||
|
||||
[float]
|
||||
[[ml-low-count]]
|
||||
===== Low_count
|
||||
high_distinct_count::: distinct count
|
||||
* `field_name` (required)
|
||||
* `by_field_name` (optional)
|
||||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
[float]
|
||||
[[ml-low-count]]
|
||||
===== Low_count
|
||||
low_distinct_count::: distinct count
|
||||
////
|
||||
For more information about those properties,
|
||||
see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 6: Analyzing users with the distinct_count function
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"function" : "distinct_count",
|
||||
"field_name" : "user"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
This `distinct_count` function detects when a system has an unusual number
|
||||
of logged in users. When you use this function in a detector in your job, it
|
||||
models the distinct count of users. It also detects when the distinct number of
|
||||
users is unusual compared to the past.
|
||||
|
||||
.Example 7: Analyzing ports with the high_distinct_count function
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"function" : "high_distinct_count",
|
||||
"field_name" : "dst_port",
|
||||
"over_field_name": "src_ip"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
This example detects instances of port scanning. When you use this function in a
|
||||
detector in your job, it models the distinct count of ports. It also detects the
|
||||
`src_ip` values that connect to an unusually high number of different
|
||||
`dst_ports` values compared to other `src_ip` values.
|
||||
|
|
|
@ -21,7 +21,7 @@ This function supports the following properties:
|
|||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 1: Analyzing transactions with the lat_long function
|
||||
[source,js]
|
||||
|
@ -66,7 +66,7 @@ format. For example, the following Painless script transforms
|
|||
"script_fields": {
|
||||
"lat-lon": {
|
||||
"script": {
|
||||
"inline": "doc['coords'].lat + ',' + doc['coords'].lon",
|
||||
"source": "doc['coords'].lat + ',' + doc['coords'].lon",
|
||||
"lang": "painless"
|
||||
}
|
||||
}
|
||||
|
@ -74,4 +74,4 @@ format. For example, the following Painless script transforms
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
For more information about `script_fields`, see <<ml-datafeed-resource>>.
|
||||
For more information, see <<ml-configuring-transform>>.
|
||||
|
|
|
@ -28,8 +28,8 @@ These functions support the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 1: Analyzing subdomain strings with the info_content function
|
||||
[source,js]
|
||||
|
|
|
@ -30,8 +30,8 @@ This function supports the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 1: Analyzing minimum transactions with the min function
|
||||
[source,js]
|
||||
|
@ -64,8 +64,8 @@ This function supports the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 2: Analyzing maximum response times with the max function
|
||||
[source,js]
|
||||
|
@ -124,8 +124,8 @@ These functions support the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 4: Analyzing response times with the median function
|
||||
[source,js]
|
||||
|
@ -161,8 +161,8 @@ These functions support the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 5: Analyzing response times with the mean function
|
||||
[source,js]
|
||||
|
@ -225,8 +225,8 @@ This function supports the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 8: Analyzing response times with the metric function
|
||||
[source,js]
|
||||
|
@ -261,8 +261,8 @@ These functions support the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 9: Analyzing response times with the varp function
|
||||
[source,js]
|
||||
|
|
|
@ -41,8 +41,8 @@ This function supports the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 1: Analyzing status codes with the rare function
|
||||
[source,js]
|
||||
|
@ -97,8 +97,8 @@ This function supports the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 3: Analyzing URI values in a population with the freq_rare function
|
||||
[source,js]
|
||||
|
|
|
@ -41,8 +41,8 @@ These functions support the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 1: Analyzing total expenses with the sum function
|
||||
[source,js]
|
||||
|
@ -95,8 +95,8 @@ These functions support the following properties:
|
|||
* `by_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
NOTE: Population analysis (that is to say, use of the `over_field_name` property)
|
||||
is not applicable for this function.
|
||||
|
|
|
@ -48,8 +48,8 @@ This function supports the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 1: Analyzing events with the time_of_day function
|
||||
[source,js]
|
||||
|
@ -78,8 +78,8 @@ This function supports the following properties:
|
|||
* `over_field_name` (optional)
|
||||
* `partition_field_name` (optional)
|
||||
|
||||
For more information about those properties,
|
||||
see <<ml-detectorconfig,Detector Configuration Objects>>.
|
||||
For more information about those properties, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
.Example 2: Analyzing events with the time_of_week function
|
||||
[source,js]
|
||||
|
|
|
@ -59,7 +59,7 @@ When you install {xpack} into {es} and {kib}, the {ml} features are
|
|||
enabled by default. If you have multiple nodes in your cluster, you can
|
||||
optionally dedicate nodes to specific purposes. If you want to control which
|
||||
nodes are _machine learning nodes_ or limit which nodes run resource-intensive
|
||||
activity related to jobs, see <<ml-settings>>.
|
||||
activity related to jobs, see <<xpack-settings>>.
|
||||
|
||||
|
||||
[float]
|
||||
|
@ -315,7 +315,7 @@ analytical task.
|
|||
--
|
||||
This tutorial uses {kib} to create jobs and view results, but you can
|
||||
alternatively use APIs to accomplish most tasks.
|
||||
For API reference information, see <<ml-apis>>.
|
||||
For API reference information, see {ref}/ml-apis.html[Machine Learning APIs].
|
||||
|
||||
The {xpackml} features in {kib} use pop-ups. You must configure your
|
||||
web browser so that it does not block pop-up windows or create an
|
||||
|
@ -463,7 +463,7 @@ job is running.
|
|||
TIP: The `create_single_metic.sh` script creates a similar job and {dfeed} by
|
||||
using the {ml} APIs. You can download that script by clicking
|
||||
here: https://download.elastic.co/demos/machine_learning/gettingstarted/create_single_metric.sh[create_single_metric.sh]
|
||||
For API reference information, see <<ml-apis>>.
|
||||
For API reference information, see {ref}/ml-apis.html[Machine Learning APIs].
|
||||
|
||||
[[ml-gs-job1-manage]]
|
||||
=== Managing Jobs
|
||||
|
|
After Width: | Height: | Size: 118 KiB |
After Width: | Height: | Size: 347 KiB |
After Width: | Height: | Size: 152 KiB |
After Width: | Height: | Size: 62 KiB |
After Width: | Height: | Size: 215 KiB |
After Width: | Height: | Size: 100 KiB |
|
@ -33,3 +33,4 @@ include::configuring.asciidoc[]
|
|||
// include::ml-scenarios.asciidoc[]
|
||||
include::api-quickref.asciidoc[]
|
||||
//include::troubleshooting.asciidoc[] Referenced from x-pack/docs/public/xpack-troubleshooting.asciidoc
|
||||
include::functions.asciidoc[]
|
||||
|
|
|
@ -33,13 +33,17 @@ see {ref}/modules-cross-cluster-search.html[Cross Cluster Search].
|
|||
|
||||
[float]
|
||||
=== Anomaly Explorer omissions and limitations
|
||||
//See x-pack-elasticsearch/#844
|
||||
//See x-pack-elasticsearch/#844 and x-pack-kibana/#1461
|
||||
|
||||
In Kibana, Anomaly Explorer charts are not displayed for anomalies
|
||||
In {kib}, Anomaly Explorer charts are not displayed for anomalies
|
||||
that were due to categorization, `time_of_day` functions, or `time_of_week`
|
||||
functions. Those particular results do not display well as time series
|
||||
charts.
|
||||
|
||||
The charts are also not displayed for detectors that use script fields. In that
|
||||
case, the original source data cannot be easily searched because it has been
|
||||
somewhat transformed by the script.
|
||||
|
||||
The Anomaly Explorer charts can also look odd in circumstances where there
|
||||
is very little data to plot. For example, if there is only one data point, it is
|
||||
represented as a single dot. If there are only two data points, they are joined
|
||||
|
@ -62,7 +66,8 @@ of closing and re-opening large jobs when there are pauses in the {dfeed}.
|
|||
The post data API enables you to send data to a job for analysis. The data that
|
||||
you send to the job must use the JSON format.
|
||||
|
||||
For more information about this API, see <<ml-post-data>>.
|
||||
For more information about this API, see
|
||||
{ref}/ml-post-data.html[Post Data to Jobs].
|
||||
|
||||
|
||||
[float]
|
||||
|
@ -79,7 +84,7 @@ Missing fields might be expected due to the structure of the data and therefore
|
|||
do not generate poor results.
|
||||
|
||||
For more information about `missing_field_count`,
|
||||
see <<ml-datacounts,Data Counts Objects>>.
|
||||
see {ref}/ml-jobstats.html#ml-datacounts[Data Counts Objects].
|
||||
|
||||
|
||||
[float]
|
||||
|
@ -112,7 +117,8 @@ this additional model information for every bucket might be problematic. If you
|
|||
are not certain that you need this option or if you experience performance
|
||||
issues, edit your job configuration to disable this option.
|
||||
|
||||
For more information, see <<ml-apimodelplotconfig,Model Plot Config>>.
|
||||
For more information, see
|
||||
{ref}/ml-job-resource.html#ml-apimodelplotconfig[Model Plot Config].
|
||||
|
||||
Likewise, when you create a single or multi-metric job in {kib}, in some cases
|
||||
it uses aggregations on the data that it retrieves from {es}. One of the
|
||||
|
@ -127,4 +133,4 @@ in performance that is gained by pre-aggregating the data makes the potentially
|
|||
poorer precision worthwhile. If you want to view or change the aggregations
|
||||
that are used in your job, refer to the `aggregations` property in your {dfeed}.
|
||||
|
||||
For more information, see <<ml-datafeed-resource>>.
|
||||
For more information, see {ref}/ml-datafeed-resource.html[Datafeed Resources].
|
||||
|
|
|
@ -10,7 +10,7 @@ concepts from the outset will tremendously help ease the learning process.
|
|||
|
||||
Machine learning jobs contain the configuration information and metadata
|
||||
necessary to perform an analytics task. For a list of the properties associated
|
||||
with a job, see <<ml-job-resource, Job Resources>>.
|
||||
with a job, see {ref}/ml-job-resource.html[Job Resources].
|
||||
|
||||
[float]
|
||||
[[ml-dfeeds]]
|
||||
|
@ -18,7 +18,7 @@ with a job, see <<ml-job-resource, Job Resources>>.
|
|||
|
||||
Jobs can analyze either a one-off batch of data or continuously in real time.
|
||||
{dfeeds-cap} retrieve data from {es} for analysis. Alternatively you can
|
||||
<<ml-post-data,POST data>> from any source directly to an API.
|
||||
{ref}/ml-post-data.html[POST data] from any source directly to an API.
|
||||
|
||||
[float]
|
||||
[[ml-detectors]]
|
||||
|
@ -28,8 +28,8 @@ As part of the configuration information that is associated with a job,
|
|||
detectors define the type of analysis that needs to be done. They also specify
|
||||
which fields to analyze. You can have more than one detector in a job, which
|
||||
is more efficient than running multiple jobs against the same data. For a list
|
||||
of the properties associated with detectors,
|
||||
see <<ml-detectorconfig, Detector Configuration Objects>>.
|
||||
of the properties associated with detectors, see
|
||||
{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects].
|
||||
|
||||
[float]
|
||||
[[ml-buckets]]
|
||||
|
@ -52,14 +52,10 @@ A {ml} node is a node that has `xpack.ml.enabled` and `node.ml` set to `true`,
|
|||
which is the default behavior. If you set `node.ml` to `false`, the node can
|
||||
service API requests but it cannot run jobs. If you want to use {xpackml}
|
||||
features, there must be at least one {ml} node in your cluster. For more
|
||||
information about this setting, see <<ml-settings>>.
|
||||
information about this setting, see <<xpack-settings>>.
|
||||
|
||||
include::functions.asciidoc[]
|
||||
[float]
|
||||
[[ml-function-overview]]
|
||||
=== Analytical functions
|
||||
|
||||
include::functions/count.asciidoc[]
|
||||
include::functions/geo.asciidoc[]
|
||||
include::functions/info.asciidoc[]
|
||||
include::functions/metric.asciidoc[]
|
||||
include::functions/rare.asciidoc[]
|
||||
include::functions/sum.asciidoc[]
|
||||
include::functions/time.asciidoc[]
|
||||
See <<ml-functions>>.
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
[[ml-configuring-pop]]
|
||||
=== Performing Population Analysis
|
||||
|
||||
Entities or events in your data can be considered anomalous when:
|
||||
|
||||
* Their behavior changes over time, relative to their own previous behavior, or
|
||||
* Their behavior is different than other entities in a specified population.
|
||||
|
||||
The latter method of detecting outliers is known as _population analysis_. The
|
||||
{ml} analytics build a profile of what a "typical" user, machine, or other entity
|
||||
does over a specified time period and then identify when one is behaving
|
||||
abnormally compared to the population.
|
||||
|
||||
This type of analysis is most useful when the behavior of the population as a
|
||||
whole is mostly homogeneous and you want to identify outliers. In general,
|
||||
population analysis is not useful when members of the population inherently
|
||||
have vastly different behavior. You can, however, segment your data into groups
|
||||
that behave similarly and run these as separate jobs. For example, you can use a
|
||||
query filter in the {dfeed} to segment your data or you can use the
|
||||
`partition_field_name` to split the analysis for the different groups.
|
||||
|
||||
Population analysis scales well and has a lower resource footprint than
|
||||
individual analysis of each series. For example, you can analyze populations
|
||||
of hundreds of thousands or millions of entities.
|
||||
|
||||
To specify the population, use the `over_field_name` property. For example:
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
PUT _xpack/ml/anomaly_detectors/population
|
||||
{
|
||||
"description" : "Population analysis",
|
||||
"analysis_config" : {
|
||||
"bucket_span":"10m",
|
||||
"influencers": [
|
||||
"username"
|
||||
],
|
||||
"detectors": [
|
||||
{
|
||||
"function": "mean",
|
||||
"field_name": "bytesSent",
|
||||
"over_field_name": "username" <1>
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_description" : {
|
||||
"time_field":"@timestamp",
|
||||
"time_format": "epoch_ms"
|
||||
}
|
||||
}
|
||||
----------------------------------
|
||||
//CONSOLE
|
||||
<1> This `over_field-name` property indicates that the metrics for each user (
|
||||
as identified by their `username` value) are analyzed relative to other users
|
||||
in each bucket.
|
||||
|
||||
//TO-DO: Per sophiec20 "Perhaps add the datafeed config and add a query filter to
|
||||
//include only workstations as servers and printers would behave differently
|
||||
//from the population
|
||||
|
||||
If your data is stored in {es}, you can create an advanced job with these same
|
||||
properties. In particular, you specify the `over_field_name` property when you
|
||||
add detectors:
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/ml-population-job.jpg["Create a detector for population analysis]
|
||||
|
||||
After you open the job and start the {dfeed} or supply data to the job, you can
|
||||
view the results in {kib}. For example:
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/ml-population-results.jpg["Population analysis results in the Anomaly Explorer"]
|
||||
|
||||
As in this case, the results are often quite sparse. There might be just a few
|
||||
data points for the selected time period. Population analysis is particularly
|
||||
useful when you have many entities and the data for specific entitles is sporadic
|
||||
or sparse.
|
||||
|
||||
If you click on a section in the time line or swim lanes, you can see more
|
||||
details about the anomalies:
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/ml-population-anomaly.jpg["Anomaly details for a specific user"]
|
||||
|
||||
In this example, the user identified as `antonette` sent a high volume of bytes
|
||||
on the date and time shown. This event is anomalous because the mean is two times
|
||||
higher than the expected behavior of the population.
|
|
@ -0,0 +1,611 @@
|
|||
[[ml-configuring-transform]]
|
||||
=== Transforming Data With Script Fields
|
||||
|
||||
If you use {dfeeds}, you can add scripts to transform your data before
|
||||
it is analyzed. {dfeeds-cap} contain an optional `script_fields` property, where
|
||||
you can specify scripts that evaluate custom expressions and return script
|
||||
fields.
|
||||
|
||||
If your {dfeed} defines script fields, you can use those fields in your job.
|
||||
For example, you can use the script fields in the analysis functions in one or
|
||||
more detectors.
|
||||
|
||||
* <<ml-configuring-transform1>>
|
||||
* <<ml-configuring-transform2>>
|
||||
* <<ml-configuring-transform3>>
|
||||
* <<ml-configuring-transform4>>
|
||||
* <<ml-configuring-transform5>>
|
||||
* <<ml-configuring-transform6>>
|
||||
* <<ml-configuring-transform7>>
|
||||
* <<ml-configuring-transform8>>
|
||||
* <<ml-configuring-transform9>>
|
||||
|
||||
The following indices APIs create and add content to an index that is used in
|
||||
subsequent examples:
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
PUT /my_index
|
||||
{
|
||||
"mappings":{
|
||||
"my_type":{
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"aborted_count": {
|
||||
"type": "long"
|
||||
},
|
||||
"another_field": {
|
||||
"type": "keyword" <1>
|
||||
},
|
||||
"clientip": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"coords": {
|
||||
"properties": {
|
||||
"lat": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"lon": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"error_count": {
|
||||
"type": "long"
|
||||
},
|
||||
"query": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"some_field": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"tokenstring1":{
|
||||
"type":"keyword"
|
||||
},
|
||||
"tokenstring2":{
|
||||
"type":"keyword"
|
||||
},
|
||||
"tokenstring3":{
|
||||
"type":"keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PUT /my_index/my_type/1
|
||||
{
|
||||
"@timestamp":"2017-03-23T13:00:00",
|
||||
"error_count":36320,
|
||||
"aborted_count":4156,
|
||||
"some_field":"JOE",
|
||||
"another_field":"SMITH ",
|
||||
"tokenstring1":"foo-bar-baz",
|
||||
"tokenstring2":"foo bar baz",
|
||||
"tokenstring3":"foo-bar-19",
|
||||
"query":"www.ml.elastic.co",
|
||||
"clientip":"123.456.78.900",
|
||||
"coords": {
|
||||
"lat" : 41.44,
|
||||
"lon":90.5
|
||||
}
|
||||
}
|
||||
----------------------------------
|
||||
// CONSOLE
|
||||
// TESTSETUP
|
||||
<1> In this example, string fields are mapped as `keyword` fields to support
|
||||
aggregation. If you want both a full text (`text`) and a keyword (`keyword`)
|
||||
version of the same field, use multi-fields. For more information, see
|
||||
{ref}/multi-fields.html[fields].
|
||||
|
||||
[[ml-configuring-transform1]]
|
||||
.Example 1: Adding two numerical fields
|
||||
[source,js]
|
||||
----------------------------------
|
||||
PUT _xpack/ml/anomaly_detectors/test1
|
||||
{
|
||||
"analysis_config":{
|
||||
"bucket_span": "10m",
|
||||
"detectors":[
|
||||
{
|
||||
"function":"mean",
|
||||
"field_name": "total_error_count", <1>
|
||||
"detector_description": "Custom script field transformation"
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_description": {
|
||||
"time_field":"@timestamp",
|
||||
"time_format":"epoch_ms"
|
||||
}
|
||||
}
|
||||
|
||||
PUT _xpack/ml/datafeeds/datafeed-test1
|
||||
{
|
||||
"job_id": "test1",
|
||||
"indices": ["my_index"],
|
||||
"types": ["my_type"],
|
||||
"query": {
|
||||
"match_all": {
|
||||
"boost": 1
|
||||
}
|
||||
},
|
||||
"script_fields": {
|
||||
"total_error_count": { <2>
|
||||
"script": {
|
||||
"lang": "expression",
|
||||
"inline": "doc['error_count'].value + doc['aborted_count'].value"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----------------------------------
|
||||
// CONSOLE
|
||||
// TEST[skip:broken]
|
||||
<1> A script field named `total_error_count` is referenced in the detector
|
||||
within the job.
|
||||
<2> The script field is defined in the {dfeed}.
|
||||
|
||||
This `test1` job contains a detector that uses a script field in a mean analysis
|
||||
function. The `datafeed-test1` {dfeed} defines the script field. It contains a
|
||||
script that adds two fields in the document to produce a "total" error count.
|
||||
|
||||
The syntax for the `script_fields` property is identical to that used by {es}.
|
||||
For more information, see {ref}/search-request-script-fields.html[Script Fields].
|
||||
|
||||
You can preview the contents of the {dfeed} by using the following API:
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
GET _xpack/ml/datafeeds/datafeed-test1/_preview
|
||||
----------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
In this example, the API returns the following results, which contain a sum of
|
||||
the `error_count` and `aborted_count` values:
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
[
|
||||
{
|
||||
"@timestamp": 1490274000000,
|
||||
"total_error_count": 40476
|
||||
}
|
||||
]
|
||||
----------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
|
||||
NOTE: This example demonstrates how to use script fields, but it contains
|
||||
insufficient data to generate meaningful results. For a full demonstration of
|
||||
how to create jobs with sample data, see <<ml-getting-started>>.
|
||||
|
||||
You can alternatively use {kib} to create an advanced job that uses script
|
||||
fields. To add the `script_fields` property to your {dfeed}, you must use the
|
||||
**Edit JSON** tab. For example:
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/ml-scriptfields.jpg[Adding script fields to a {dfeed} in {kib}]
|
||||
|
||||
[[ml-configuring-transform-examples]]
|
||||
==== Common Script Field Examples
|
||||
|
||||
While the possibilities are limitless, there are a number of common scenarios
|
||||
where you might use script fields in your {dfeeds}.
|
||||
|
||||
[NOTE]
|
||||
===============================
|
||||
Some of these examples use regular expressions. By default, regular
|
||||
expressions are disabled because they circumvent the protection that Painless
|
||||
provides against long running and memory hungry scripts. For more information,
|
||||
see {ref}/modules-scripting-painless.html[Painless Scripting Language].
|
||||
|
||||
Machine learning analysis is case sensitive. For example, "John" is considered
|
||||
to be different than "john". This is one reason you might consider using scripts
|
||||
that convert your strings to upper or lowercase letters.
|
||||
===============================
|
||||
|
||||
[[ml-configuring-transform2]]
|
||||
.Example 2: Concatenating strings
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/ml/anomaly_detectors/test2
|
||||
{
|
||||
"analysis_config":{
|
||||
"bucket_span": "10m",
|
||||
"detectors":[
|
||||
{
|
||||
"function":"low_info_content",
|
||||
"field_name":"my_script_field", <1>
|
||||
"detector_description": "Custom script field transformation"
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_description": {
|
||||
"time_field":"@timestamp",
|
||||
"time_format":"epoch_ms"
|
||||
}
|
||||
}
|
||||
|
||||
PUT _xpack/ml/datafeeds/datafeed-test2
|
||||
{
|
||||
"job_id": "test2",
|
||||
"indices": ["my_index"],
|
||||
"types": ["my_type"],
|
||||
"query": {
|
||||
"match_all": {
|
||||
"boost": 1
|
||||
}
|
||||
},
|
||||
"script_fields": {
|
||||
"my_script_field": {
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"inline": "doc['some_field'].value + '_' + doc['another_field'].value" <2>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET _xpack/ml/datafeeds/datafeed-test2/_preview
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[skip:broken]
|
||||
<1> The script field has a rather generic name in this case, since it will
|
||||
be used for various tests in the subsequent examples.
|
||||
<2> The script field uses the plus (+) operator to concatenate strings.
|
||||
|
||||
The preview {dfeed} API returns the following results, which show that "JOE"
|
||||
and "SMITH " have been concatenated and an underscore was added:
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
[
|
||||
{
|
||||
"@timestamp": 1490274000000,
|
||||
"my_script_field": "JOE_SMITH "
|
||||
}
|
||||
]
|
||||
----------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
[[ml-configuring-transform3]]
|
||||
.Example 3: Trimming strings
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/ml/datafeeds/datafeed-test2/_update
|
||||
{
|
||||
"script_fields": {
|
||||
"my_script_field": {
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"inline": "doc['another_field'].value.trim()" <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET _xpack/ml/datafeeds/datafeed-test2/_preview
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
<1> This script field uses the `trim()` function to trim extra white space from a
|
||||
string.
|
||||
|
||||
The preview {dfeed} API returns the following results, which show that "SMITH "
|
||||
has been trimmed to "SMITH":
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
[
|
||||
{
|
||||
"@timestamp": 1490274000000,
|
||||
"my_script_field": "SMITH"
|
||||
}
|
||||
]
|
||||
----------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
[[ml-configuring-transform4]]
|
||||
.Example 4: Converting strings to lowercase
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/ml/datafeeds/datafeed-test2/_update
|
||||
{
|
||||
"script_fields": {
|
||||
"my_script_field": {
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"inline": "doc['some_field'].value.toLowerCase()" <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET _xpack/ml/datafeeds/datafeed-test2/_preview
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
<1> This script field uses the `toLowerCase` function to convert a string to all
|
||||
lowercase letters. Likewise, you can use the `toUpperCase{}` function to convert
|
||||
a string to uppercase letters.
|
||||
|
||||
The preview {dfeed} API returns the following results, which show that "JOE"
|
||||
has been converted to "joe":
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
[
|
||||
{
|
||||
"@timestamp": 1490274000000,
|
||||
"my_script_field": "joe"
|
||||
}
|
||||
]
|
||||
----------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
[[ml-configuring-transform5]]
|
||||
.Example 5: Converting strings to mixed case formats
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/ml/datafeeds/datafeed-test2/_update
|
||||
{
|
||||
"script_fields": {
|
||||
"my_script_field": {
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"inline": "doc['some_field'].value.substring(0, 1).toUpperCase() + doc['some_field'].value.substring(1).toLowerCase()" <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET _xpack/ml/datafeeds/datafeed-test2/_preview
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
<1> This script field is a more complicated example of case manipulation. It uses
|
||||
the `subString()` function to capitalize the first letter of a string and
|
||||
converts the remaining characters to lowercase.
|
||||
|
||||
The preview {dfeed} API returns the following results, which show that "JOE"
|
||||
has been converted to "Joe":
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
[
|
||||
{
|
||||
"@timestamp": 1490274000000,
|
||||
"my_script_field": "Joe"
|
||||
}
|
||||
]
|
||||
----------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
[[ml-configuring-transform6]]
|
||||
.Example 6: Replacing tokens
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/ml/datafeeds/datafeed-test2/_update
|
||||
{
|
||||
"script_fields": {
|
||||
"my_script_field": {
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"inline": "/\\s/.matcher(doc['tokenstring2'].value).replaceAll('_')" <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET _xpack/ml/datafeeds/datafeed-test2/_preview
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
<1> This script field uses regular expressions to replace white
|
||||
space with underscores.
|
||||
|
||||
The preview {dfeed} API returns the following results, which show that
|
||||
"foo bar baz" has been converted to "foo_bar_baz":
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
[
|
||||
{
|
||||
"@timestamp": 1490274000000,
|
||||
"my_script_field": "foo_bar_baz"
|
||||
}
|
||||
]
|
||||
----------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
[[ml-configuring-transform7]]
|
||||
.Example 7: Regular expression matching and concatenation
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/ml/datafeeds/datafeed-test2/_update
|
||||
{
|
||||
"script_fields": {
|
||||
"my_script_field": {
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"inline": "def m = /(.*)-bar-([0-9][0-9])/.matcher(doc['tokenstring3'].value); return m.find() ? m.group(1) + '_' + m.group(2) : '';" <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET _xpack/ml/datafeeds/datafeed-test2/_preview
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
<1> This script field looks for a specific regular expression pattern and emits the
|
||||
matched groups as a concatenated string. If no match is found, it emits an empty
|
||||
string.
|
||||
|
||||
The preview {dfeed} API returns the following results, which show that
|
||||
"foo-bar-19" has been converted to "foo_19":
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
[
|
||||
{
|
||||
"@timestamp": 1490274000000,
|
||||
"my_script_field": "foo_19"
|
||||
}
|
||||
]
|
||||
----------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
[[ml-configuring-transform8]]
|
||||
.Example 8: Splitting strings by domain name
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/ml/anomaly_detectors/test3
|
||||
{
|
||||
"description":"DNS tunneling",
|
||||
"analysis_config":{
|
||||
"bucket_span": "30m",
|
||||
"influencers": ["clientip","hrd"],
|
||||
"detectors":[
|
||||
{
|
||||
"function":"high_info_content",
|
||||
"field_name": "sub",
|
||||
"over_field_name": "hrd",
|
||||
"exclude_frequent":"all"
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_description": {
|
||||
"time_field":"@timestamp",
|
||||
"time_format":"epoch_ms"
|
||||
}
|
||||
}
|
||||
|
||||
PUT _xpack/ml/datafeeds/datafeed-test3
|
||||
{
|
||||
"job_id": "test3",
|
||||
"indices": ["my_index"],
|
||||
"types": ["my_type"],
|
||||
"query": {
|
||||
"match_all": {
|
||||
"boost": 1
|
||||
}
|
||||
},
|
||||
"script_fields":{
|
||||
"sub":{
|
||||
"script":"return domainSplit(doc['query'].value, params).get(0);"
|
||||
},
|
||||
"hrd":{
|
||||
"script":"return domainSplit(doc['query'].value, params).get(1);"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET _xpack/ml/datafeeds/datafeed-test3/_preview
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[skip:broken]
|
||||
|
||||
If you have a single field that contains a well-formed DNS domain name, you can
|
||||
use the `domainSplit()` function to split the string into its highest registered
|
||||
domain and the sub-domain, which is everything to the left of the highest
|
||||
registered domain. For example, the highest registered domain of
|
||||
`www.ml.elastic.co` is `elastic.co` and the sub-domain is `www.ml`. The
|
||||
`domainSplit()` function returns an array of two values: the first value is the
|
||||
subdomain; the second value is the highest registered domain.
|
||||
|
||||
NOTE: The `domainSplit()` function takes two arguments. The first argument is
|
||||
the string you want to split. The second argument is always `params`. This is a
|
||||
technical implementation detail related to how Painless operates internally.
|
||||
|
||||
The preview {dfeed} API returns the following results, which show that
|
||||
"www.ml.elastic.co" has been split into "elastic.co" and "www.ml":
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
[
|
||||
{
|
||||
"@timestamp": 1490274000000,
|
||||
"clientip.keyword": "123.456.78.900",
|
||||
"hrd": "elastic.co",
|
||||
"sub": "www.ml"
|
||||
}
|
||||
]
|
||||
----------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
[[ml-configuring-transform9]]
|
||||
.Example 9: Transforming geo_point data
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/ml/anomaly_detectors/test4
|
||||
{
|
||||
"analysis_config":{
|
||||
"bucket_span": "10m",
|
||||
"detectors":[
|
||||
{
|
||||
"function":"lat_long",
|
||||
"field_name": "my_coordinates"
|
||||
}
|
||||
]
|
||||
},
|
||||
"data_description": {
|
||||
"time_field":"@timestamp",
|
||||
"time_format":"epoch_ms"
|
||||
}
|
||||
}
|
||||
|
||||
PUT _xpack/ml/datafeeds/datafeed-test4
|
||||
{
|
||||
"job_id": "test4",
|
||||
"indices": ["my_index"],
|
||||
"types": ["my_type"],
|
||||
"query": {
|
||||
"match_all": {
|
||||
"boost": 1
|
||||
}
|
||||
},
|
||||
"script_fields": {
|
||||
"my_coordinates": {
|
||||
"script": {
|
||||
"inline": "doc['coords.lat'].value + ',' + doc['coords.lon'].value",
|
||||
"lang": "painless"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GET _xpack/ml/datafeeds/datafeed-test4/_preview
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[skip:broken]
|
||||
|
||||
In {es}, location data can be stored in `geo_point` fields but this data type is
|
||||
not supported natively in {xpackml} analytics. This example of a script field
|
||||
transforms the data into an appropriate format. For more information,
|
||||
see <<ml-geo-functions>>.
|
||||
|
||||
The preview {dfeed} API returns the following results, which show that
|
||||
`41.44` and `90.5` have been combined into "41.44,90.5":
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
[
|
||||
{
|
||||
"@timestamp": 1490274000000,
|
||||
"my_coordinates": "41.44,90.5"
|
||||
}
|
||||
]
|
||||
----------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
////
|
||||
==== Configuring Script Fields in {dfeeds-cap}
|
||||
|
||||
//TO-DO: Add Kibana steps from
|
||||
//https://github.com/elastic/prelert-legacy/wiki/Transforming-data-with-script_fields#transforming-geo_point-data-to-a-workable-string-format
|
||||
////
|
|
@ -0,0 +1,117 @@
|
|||
[float]
|
||||
[[modules-node-xpack]]
|
||||
== [xpack]#X-Pack node settings#
|
||||
|
||||
//This content is referenced from the elastic/elasticsearch/docs/reference/modules/node.asciidoc
|
||||
|
||||
If {xpack} is installed, there is an additional node type:
|
||||
|
||||
<<ml-node,Machine learning node>>::
|
||||
|
||||
A node that has `xpack.ml.enabled` and `node.ml` set to `true`, which is the
|
||||
default behavior when {xpack} is installed. If you want to use {xpackml}
|
||||
features, there must be at least one {ml} node in your cluster. For more
|
||||
information about {xpackml} features,
|
||||
see {xpack-ref}/xpack-ml.html[Machine Learning in the Elastic Stack].
|
||||
|
||||
IMPORTANT: Do not set use the `node.ml` setting unless {xpack} is installed.
|
||||
Otherwise, the node fails to start.
|
||||
|
||||
If {xpack} is installed, nodes are master-eligible, data, ingest, and {ml}
|
||||
nodes by default. As the cluster grows and in particular if you have large
|
||||
{ml} jobs, consider separating dedicated master-eligible nodes from dedicated
|
||||
data nodes and dedicated {ml} nodes.
|
||||
|
||||
To create a dedicated master-eligible node when {xpack} is installed, set:
|
||||
|
||||
[source,yaml]
|
||||
-------------------
|
||||
node.master: true <1>
|
||||
node.data: false <2>
|
||||
node.ingest: false <3>
|
||||
node.ml: false <4>
|
||||
xpack.ml.enabled: true <5>
|
||||
-------------------
|
||||
<1> The `node.master` role is enabled by default.
|
||||
<2> Disable the `node.data` role (enabled by default).
|
||||
<3> Disable the `node.ingest` role (enabled by default).
|
||||
<4> Disable the `node.ml` role (enabled by default in {xpack}).
|
||||
<5> The `xpack.ml.enabled` setting is enabled by default in {xpack}.
|
||||
|
||||
To create a dedicated data node when {xpack} is installed, set:
|
||||
|
||||
[source,yaml]
|
||||
-------------------
|
||||
node.master: false <1>
|
||||
node.data: true <2>
|
||||
node.ingest: false <3>
|
||||
node.ml: false <4>
|
||||
-------------------
|
||||
<1> Disable the `node.master` role (enabled by default).
|
||||
<2> The `node.data` role is enabled by default.
|
||||
<3> Disable the `node.ingest` role (enabled by default).
|
||||
<4> Disable the `node.ml` role (enabled by default in {xpack}).
|
||||
|
||||
To create a dedicated ingest node when {xpack} is installed, set:
|
||||
|
||||
[source,yaml]
|
||||
-------------------
|
||||
node.master: false <1>
|
||||
node.data: false <2>
|
||||
node.ingest: true <3>
|
||||
search.remote.connect: false <4>
|
||||
node.ml: false <5>
|
||||
-------------------
|
||||
<1> Disable the `node.master` role (enabled by default).
|
||||
<2> Disable the `node.data` role (enabled by default).
|
||||
<3> The `node.ingest` role is enabled by default.
|
||||
<4> Disable cross-cluster search (enabled by default).
|
||||
<5> Disable the `node.ml` role (enabled by default in {xpack}).
|
||||
|
||||
To create a dedicated coordinating node when {xpack} is installed, set:
|
||||
|
||||
[source,yaml]
|
||||
-------------------
|
||||
node.master: false <1>
|
||||
node.data: false <2>
|
||||
node.ingest: false <3>
|
||||
search.remote.connect: false <4>
|
||||
node.ml: false <5>
|
||||
-------------------
|
||||
<1> Disable the `node.master` role (enabled by default).
|
||||
<2> Disable the `node.data` role (enabled by default).
|
||||
<3> Disable the `node.ingest` role (enabled by default).
|
||||
<4> Disable cross-cluster search (enabled by default).
|
||||
<5> Disable the `node.ml` role (enabled by default in {xpack}).
|
||||
|
||||
[float]
|
||||
[[ml-node]]
|
||||
=== [xpack]#Machine learning node#
|
||||
|
||||
The {xpackml} features provide {ml} nodes, which run jobs and handle {ml} API
|
||||
requests. If `xpack.ml.enabled` is set to true and `node.ml` is set to `false`,
|
||||
the node can service API requests but it cannot run jobs.
|
||||
|
||||
If you want to use {xpackml} features in your cluster, you must enable {ml}
|
||||
(set `xpack.ml.enabled` to `true`) on all master-eligible nodes. Do not use
|
||||
these settings if you do not have {xpack} installed.
|
||||
|
||||
For more information about these settings, see <<ml-settings>>.
|
||||
|
||||
To create a dedicated {ml} node, set:
|
||||
|
||||
[source,yaml]
|
||||
-------------------
|
||||
node.master: false <1>
|
||||
node.data: false <2>
|
||||
node.ingest: false <3>
|
||||
search.remote.connect: false <4>
|
||||
node.ml: true <5>
|
||||
xpack.ml.enabled: true <6>
|
||||
-------------------
|
||||
<1> Disable the `node.master` role (enabled by default).
|
||||
<2> Disable the `node.data` role (enabled by default).
|
||||
<3> Disable the `node.ingest` role (enabled by default).
|
||||
<4> Disable cross-cluster search (enabled by default).
|
||||
<5> The `node.ml` role is enabled by default in {xpack}.
|
||||
<6> The `xpack.ml.enabled` setting is enabled by default in {xpack}.
|
|
@ -0,0 +1,24 @@
|
|||
[role="xpack"]
|
||||
[[ml-api-definitions]]
|
||||
== Definitions
|
||||
|
||||
These resource definitions are used in {ml} APIs and in {kib} advanced
|
||||
job configuration options.
|
||||
|
||||
* <<ml-datafeed-resource,{dfeeds-cap}>>
|
||||
* <<ml-datafeed-counts,{dfeed-cap} counts>>
|
||||
* <<ml-job-resource,Jobs>>
|
||||
* <<ml-jobstats,Job statistics>>
|
||||
* <<ml-snapshot-resource,Model snapshots>>
|
||||
* <<ml-results-resource,Results>>
|
||||
|
||||
[role="xpack"]
|
||||
include::ml/datafeedresource.asciidoc[]
|
||||
[role="xpack"]
|
||||
include::ml/jobresource.asciidoc[]
|
||||
[role="xpack"]
|
||||
include::ml/jobcounts.asciidoc[]
|
||||
[role="xpack"]
|
||||
include::ml/snapshotresource.asciidoc[]
|
||||
[role="xpack"]
|
||||
include::ml/resultsresource.asciidoc[]
|
|
@ -1,6 +1,12 @@
|
|||
[role="xpack"]
|
||||
[[graph-api]]
|
||||
== Graph APIs
|
||||
|
||||
* <<graph-api-explore>>
|
||||
The Graph "explore" API is accessible via the
|
||||
`/_xpack/graph/_explore` endpoint.
|
||||
See {kibana}/graph-api-explore.html[Explore API].
|
||||
|
||||
include::graph/explore.asciidoc[]
|
||||
//* <<graph-api-explore>>
|
||||
|
||||
//TO-DO: Create a formatted API reference topic for explore:
|
||||
//include::graph/explore.asciidoc[]
|
||||
|
|
|
@ -1,254 +1,128 @@
|
|||
[[graph-api-explore]]
|
||||
[role="xpack"]
|
||||
[[graph-explore]]
|
||||
=== Explore API
|
||||
|
||||
The Graph "explore" API is accessible via the /_xpack/_graph/_explore endpoint.
|
||||
One of the best ways to understand the behaviour of this API is to use the Kibana
|
||||
Graph UI to visually click around connected data and then view the "Last request"
|
||||
panel (accessible from the button with the cog icon). This panel shows the JSON request/response
|
||||
pair of the last user operation.
|
||||
The graph explore API ...
|
||||
|
||||
image::images/spy.jpg["Viewing the last request in the Kibana Graph UI"]
|
||||
==== Request
|
||||
|
||||
- <<basic-search, Basic exploration>>
|
||||
- <<optional-controls, Optional controls>>
|
||||
- <<spider-search, "Spidering" operations>>
|
||||
`POST <index>/_xpack/graph/_explore`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
[float]
|
||||
[[basic-search]]
|
||||
=== Basic exploration
|
||||
|
||||
An initial search typically begins with a query to identify strongly related terms.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST clicklogs/_xpack/_graph/_explore
|
||||
{
|
||||
"query": { <1>
|
||||
"match": {
|
||||
"query.raw": "midi"
|
||||
}
|
||||
},
|
||||
"vertices": [ <2>
|
||||
{
|
||||
"field": "product"
|
||||
}
|
||||
],
|
||||
"connections": { <3>
|
||||
"vertices": [
|
||||
{
|
||||
"field": "query.raw"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> A query is used to "seed" the exploration - here we are looking in clicklogs for people who searched for "midi". Any of the
|
||||
usual elasticsearch query syntax can be used here to identify the documents of interest.
|
||||
<2> A list of fields is provided - here we want to find product codes that are significantly associated with searches for "midi"
|
||||
<3> A list of fields is provided again - here we are looking for other search terms that led people to click on the products found in 2)
|
||||
|
||||
NOTE: Further "connections" can be nested inside the "connections" object to continue exploring out the relationships in the data. Each level of nesting
|
||||
is commonly referred to as a "hop" and proximity in a graph is often thought of in terms of "hop depth".
|
||||
|
||||
|
||||
The response from a graph exploration is as follows:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"took": 0,
|
||||
"timed_out": false,
|
||||
"failures": [],
|
||||
"vertices": [ <1>
|
||||
{
|
||||
"field": "query.raw",
|
||||
"term": "midi cable",
|
||||
"weight": 0.08745858139552132,
|
||||
"depth": 1
|
||||
},
|
||||
{
|
||||
"field": "product",
|
||||
"term": "8567446",
|
||||
"weight": 0.13247784285434397,
|
||||
"depth": 0
|
||||
},
|
||||
{
|
||||
"field": "product",
|
||||
"term": "1112375",
|
||||
"weight": 0.018600718471158982,
|
||||
"depth": 0
|
||||
},
|
||||
{
|
||||
"field": "query.raw",
|
||||
"term": "midi keyboard",
|
||||
"weight": 0.04802242866755111,
|
||||
"depth": 1
|
||||
}
|
||||
],
|
||||
"connections": [ <2>
|
||||
{
|
||||
"source": 0,
|
||||
"target": 1,
|
||||
"weight": 0.04802242866755111,
|
||||
"doc_count": 13
|
||||
},
|
||||
{
|
||||
"source": 2,
|
||||
"target": 3,
|
||||
"weight": 0.08120623870976627,
|
||||
"doc_count": 23
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> An array of all of the vertices that were discovered. A vertex is an indexed term so the field and term value are supplied. The `weight` attribute denotes a significance score while `depth` is at which hop-level the term was first encountered.
|
||||
<2> The connections between the vertices in the array. The `source` and `target` properties are indexes into the vertices array and indicate which vertex term led to the other as part of exploration.
|
||||
The `doc_count` value indicates how many documents contain this pairing of terms was found in the sample of documents analyzed (this is not a global count for all documents in the index)
|
||||
|
||||
In the Kibana Graph UI response data is visualized in a diagram like this:
|
||||
|
||||
|
||||
image::images/midiclicks.jpg["An example visualization of product/search click data using the Kibana Graph UI",width="50%", align="center"]
|
||||
|
||||
|
||||
[float]
|
||||
[[optional-controls]]
|
||||
=== Optional controls
|
||||
|
||||
The previous basic example omitted several parameters that have default values. This fuller example illustrates the additional parameters that can be used in graph explore requests.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST clicklogs/_xpack/_graph/_explore
|
||||
{
|
||||
"query": {<1>
|
||||
"bool": {
|
||||
"must": {
|
||||
"match": {
|
||||
"query.raw": "midi"
|
||||
}
|
||||
},
|
||||
"filter": [
|
||||
{
|
||||
"range": {
|
||||
"query_time": {
|
||||
"gte": "2015-10-01 00:00:00"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"controls": {
|
||||
"use_significance": true,<2>
|
||||
"sample_size": 2000,<3>
|
||||
"timeout": 2000,<4>
|
||||
"sample_diversity": {<5>
|
||||
"field": "category.raw",
|
||||
"max_docs_per_value": 500
|
||||
}
|
||||
},
|
||||
"vertices": [
|
||||
{
|
||||
"field": "product",
|
||||
"size": 5,<6>
|
||||
"min_doc_count": 10,<7>
|
||||
"shard_min_doc_count": 3<8>
|
||||
}
|
||||
],
|
||||
"connections": {
|
||||
"query": {<9>
|
||||
"bool": {
|
||||
"filter": [
|
||||
{
|
||||
"range": {
|
||||
"query_time": {
|
||||
"gte": "2015-10-01 00:00:00"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"vertices": [
|
||||
{
|
||||
"field": "query.raw",
|
||||
"size": 5,
|
||||
"min_doc_count": 10,
|
||||
"shard_min_doc_count": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> This seed query iin this example is a more complex query for the word "midi" but with a date filter.
|
||||
<2> The `use_significance` flag defaults to true and is used to filter associated terms to only those that are significantly associated with our query.
|
||||
The algorithm used to calculate significance are explained in the documentation for the {ref}/search-aggregations-bucket-significantterms-aggregation.html[significant_terms aggregation].
|
||||
<3> Each "hop" considers a sample of the best-matching documents on each shard (default is 100 documents). Using samples has the dual benefit of keeping exploration focused on meaningfully-connected terms and improving the speed of execution. Very small values (less than 50) may not provide sufficient weight-of-evidence to identify significant connections between terms while very large sample sizes may dilute the quality and be slow.
|
||||
<4> A `timeout` setting (expressed here in milliseconds) after which exploration will be halted and results gathered so far are returned. This is a best-effort approach to termination so
|
||||
may overrun if, for example, a long pause is encountered while FieldData is loaded for a field.
|
||||
<5> To avoid the top-matching documents sample being dominated by a single source of results sometimes it can prove necessary to request diversity in the sample. This is achieved by
|
||||
selecting a single-value field and a maximum number of documents per value in that field. In this example we are requiring that there are no more than 500 click documents from any one department in the store.
|
||||
This might help us consider products from the electronics, book and video departments whereas without this diversification our results may be entirely dominated by the electronics department.
|
||||
<6> We can control the maximum number of vertex terms returned for each field using the `size` property (default is 5)
|
||||
<7> `min_doc_count` acts as a certainty threshold - just how many documents have to contain a pair of terms before we consider this to be a useful connection? (default is 3)
|
||||
<8> `shard_min_doc_count` is an advanced setting - just how many documents on a shard have to contain a pair of terms before we return this for global consideration? (default is 2)
|
||||
<9> Optionally, a "guiding query" can be used to guide the Graph API as it explores connected terms. In this case we are guiding the hop from products to related queries by only considering documents that are also clicks that have been recorded recently.
|
||||
|
||||
The default settings are configured to remove noisy data and get "the big picture" from data. For more detailed forensic type work where every document could be of interest see the <<graph-troubleshooting,troubleshooting guide>> for tips on tuning the settings for this type of work.
|
||||
|
||||
[float]
|
||||
[[spider-search]]
|
||||
=== "Spidering" operations
|
||||
|
||||
After an initial search users typically want to review the results using a form of graph visualization tool like the one in the Kibana Graph UI.
|
||||
Users will frequently then select one or more vertices of interest and ask to load more vertices that may be connected to their current selection. In graph-speak, this operation is often called "spidering" or "spidering out".
|
||||
After an initial search users typically want to review the results using a form
|
||||
of graph visualization tool like the one in the Kibana Graph UI. Users will
|
||||
frequently then select one or more vertices of interest and ask to load more
|
||||
vertices that may be connected to their current selection. In graph-speak,
|
||||
this operation is often called _spidering_ or _spidering out_.
|
||||
|
||||
In order to spider out it is typically necessary to define two things:
|
||||
|
||||
* The set of vertices from which you would like to spider
|
||||
* The set of vertices you already have in your workspace which you want to avoid seeing again in results
|
||||
|
||||
These two pieces of information when passed to the Graph API will ensure you are returned new vertices that can be attached to the existing selection.
|
||||
An example request is as follows:
|
||||
* The set of vertices you already have in your workspace which you want to
|
||||
avoid seeing again in results
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST clicklogs/_xpack/_graph/_explore
|
||||
{
|
||||
"vertices": [
|
||||
{
|
||||
"field": "product",
|
||||
"include": [ "1854873" ] <1>
|
||||
}
|
||||
],
|
||||
"connections": {
|
||||
"vertices": [
|
||||
{
|
||||
"field": "query.raw",
|
||||
"exclude": [ <2>
|
||||
"midi keyboard",
|
||||
"midi",
|
||||
"synth"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> Here we list the mandatory start points from which we want to spider using an `include` array of the terms of interest (in this case a single product code). Note that because
|
||||
we have an `include` clause here there is no need to define a seed query - we are implicitly querying for documents that contain any of the terms
|
||||
listed in our include clauses. Instead of passing plain strings in this array it is also possible to pass objects with `term` and `boost` values to
|
||||
boost matches on certain terms over others.
|
||||
<2> The `exclude` clause avoids returning specific terms. Here we are asking for more search terms that have led people to click on product 1854873 but explicitly exclude the search terms the client already
|
||||
knows about.
|
||||
These two pieces of information when passed to the graph explore API will
|
||||
ensure you are returned new vertices that can be attached to the existing
|
||||
selection.
|
||||
|
||||
The `include`and `exclude` clauses provide the essential features that enable clients to progressively build up a picture of related information in their workspace.
|
||||
The `include` clause is used to define the set of start points from which users wish to spider. Include clauses can also be used to limit the end points users wish to reach, thereby "filling in" some of the missing links between existing vertices in their client-side workspace.
|
||||
The `exclude` clause can be used to avoid the Graph API returning vertices already visible in a client's workspace or perhaps could list undesirable vertices that the client has blacklisted from their workspace and never wants to see returned.
|
||||
The `include`and `exclude` clauses provide the essential features that enable
|
||||
clients to progressively build up a picture of related information in their
|
||||
workspace. The `include` clause is used to define the set of start points from
|
||||
which users wish to spider. Include clauses can also be used to limit the end
|
||||
points users wish to reach, thereby "filling in" some of the missing links
|
||||
between existing vertices in their client-side workspace. The `exclude` clause
|
||||
can be used to avoid the Graph API returning vertices already visible in a
|
||||
client's workspace or perhaps could list undesirable vertices that the client
|
||||
has blacklisted from their workspace and never wants to see returned.
|
||||
|
||||
//==== Path Parameters
|
||||
|
||||
//==== Query Parameters
|
||||
|
||||
==== Request Body
|
||||
|
||||
connections::
|
||||
TBD. A list of fields is provided.
|
||||
query:::
|
||||
TBD. Optionally, a "guiding query" can be used to guide the API as it
|
||||
explores connected terms.
|
||||
vertices:::
|
||||
TBD.
|
||||
|
||||
NOTE: Further "connections" can be nested inside the "connections" object to
|
||||
continue exploring out the relationships in the data. Each level of nesting is
|
||||
commonly referred to as a "hop" and proximity in a graph is often thought of in
|
||||
terms of "hop depth".
|
||||
|
||||
controls::
|
||||
TBD.
|
||||
use_significance:::
|
||||
TBD. The `use_significance` flag defaults to true and is used to filter
|
||||
associated terms to only those that are significantly associated with our
|
||||
query. The algorithm used to calculate significance are explained in the
|
||||
documentation for the
|
||||
{ref}/search-aggregations-bucket-significantterms-aggregation.html[significant_terms aggregation].
|
||||
|
||||
sample_size:::
|
||||
TBD. Each "hop" considers a sample of the best-matching documents on each
|
||||
shard (default is 100 documents). Using samples has the dual benefit of
|
||||
keeping exploration focused on meaningfully-connected terms and improving
|
||||
the speed of execution. Very small values (less than 50) may not provide
|
||||
sufficient weight-of-evidence to identify significant connections between
|
||||
terms while very large sample sizes may dilute the quality and be slow.
|
||||
|
||||
timeout:::
|
||||
TBD. A `timeout` setting (expressed here in milliseconds) after which
|
||||
exploration will be halted and results gathered so far are returned. This is
|
||||
a best-effort approach to termination so may overrun if, for example, a long
|
||||
pause is encountered while FieldData is loaded for a field.
|
||||
|
||||
sample_diversity:::
|
||||
TBD. To avoid the top-matching documents sample being dominated by a single
|
||||
source of results sometimes it can prove necessary to request diversity in
|
||||
the sample. This is achieved by selecting a single-value field and a maximum
|
||||
number of documents per value in that field. In this example we are
|
||||
requiring that there are no more than 500 click documents from any one
|
||||
department in the store. This might help us consider products from the
|
||||
electronics, book and video departments whereas without this diversification
|
||||
our results may be entirely dominated by the electronics department.
|
||||
|
||||
query::
|
||||
TBD. A query is used to "seed" the exploration. Any of the usual {es} query
|
||||
syntax can be used here to identify the documents of interest.
|
||||
|
||||
vertices::
|
||||
TBD. A list of fields is provided.
|
||||
exclude:::
|
||||
TBD. The `exclude` clause avoids returning specific terms.
|
||||
field::: TBD
|
||||
include:::
|
||||
TBD. Lists the start points from which we want to spider using an `include`
|
||||
array of the terms of interest. Note that if you have an `include` clause,
|
||||
there is no need to define a seed query - we are implicitly querying for
|
||||
documents that contain any of the terms listed in our include clauses.
|
||||
Instead of passing plain strings in this array it is also possible to pass
|
||||
objects with `term` and `boost` values to boost matches on certain terms
|
||||
over others.
|
||||
size:::
|
||||
TBD. We can control the maximum number of vertex terms returned for each
|
||||
field using the `size` property. (Default is 5).
|
||||
min_doc_count:::
|
||||
TBD. This property acts as a certainty threshold - just how many documents
|
||||
have to contain a pair of terms before we consider this to be a useful
|
||||
connection? (Default is 3).
|
||||
shard_min_doc_count:::
|
||||
TBD. This is an advanced setting - just how many documents on a shard have
|
||||
to contain a pair of terms before we return this for global consideration?
|
||||
(Default is 2).
|
||||
|
||||
//==== Authorization
|
||||
|
||||
////
|
||||
==== Examples
|
||||
|
||||
TO-DO: Add link to example in Kibana Guide
|
||||
|
||||
////
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
[role="xpack"]
|
||||
[[xpack-api]]
|
||||
= {xpack} APIs
|
||||
|
||||
|
@ -6,12 +7,14 @@
|
|||
{xpack} exposes a wide range of REST APIs to manage and monitor its features.
|
||||
|
||||
* <<info-api, Info API>>
|
||||
* <<security-api, Security APIs>>
|
||||
* <<watcher-api, Watcher APIs>>
|
||||
//* <<security-api, Security APIs>>
|
||||
//* <<watcher-api, Watcher APIs>>
|
||||
* <<graph-api, Graph APIs>>
|
||||
* <<ml-apis, Machine Learning APIs>>
|
||||
* <<ml-api-definitions, Definitions>>
|
||||
--
|
||||
|
||||
[role="xpack"]
|
||||
[[info-api]]
|
||||
== Info API
|
||||
|
||||
|
@ -52,6 +55,11 @@ Example response:
|
|||
"available" : true,
|
||||
"enabled" : true
|
||||
},
|
||||
"logstash" : {
|
||||
"description" : "Logstash management component for X-Pack",
|
||||
"available" : true,
|
||||
"enabled" : true
|
||||
},
|
||||
"ml" : {
|
||||
"description" : "Machine Learning for the Elastic Stack",
|
||||
"available" : true,
|
||||
|
@ -107,9 +115,10 @@ GET /_xpack?human=false
|
|||
------------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
include::security.asciidoc[]
|
||||
//include::security.asciidoc[]
|
||||
|
||||
include::watcher.asciidoc[]
|
||||
//include::watcher.asciidoc[]
|
||||
|
||||
include::graph.asciidoc[]
|
||||
include::ml-api.asciidoc[]
|
||||
include::defs.asciidoc[]
|
||||
|
|
|
@ -1,64 +1,35 @@
|
|||
[role="xpack"]
|
||||
[[ml-apis]]
|
||||
== Machine Learning APIs
|
||||
|
||||
Use machine learning to detect anomalies in time series data.
|
||||
|
||||
* <<ml-api-datafeed-endpoint,{dfeeds-cap}>>
|
||||
* <<ml-api-job-endpoint,Jobs>>
|
||||
* <<ml-api-snapshot-endpoint, Model Snapshots>>
|
||||
* <<ml-api-result-endpoint,Results>>
|
||||
* <<ml-api-definitions, Definitions>>
|
||||
You can use APIs to perform the following {ml} activities:
|
||||
|
||||
[float]
|
||||
[[ml-api-datafeed-endpoint]]
|
||||
=== {dfeeds-cap}
|
||||
|
||||
* <<ml-put-datafeed,Create {dfeed}>>
|
||||
* <<ml-delete-datafeed,Delete {dfeed}>>
|
||||
* <<ml-get-datafeed,Get {dfeed} info>>
|
||||
* <<ml-get-datafeed-stats,Get {dfeed} statistics>>
|
||||
* <<ml-put-datafeed,Create {dfeed}>>, <<ml-delete-datafeed,Delete {dfeed}>>
|
||||
* <<ml-start-datafeed,Start {dfeed}>>, <<ml-stop-datafeed,Stop {dfeed}>>
|
||||
* <<ml-get-datafeed,Get {dfeed} info>>, <<ml-get-datafeed-stats,Get {dfeed} statistics>>
|
||||
* <<ml-preview-datafeed,Preview {dfeed}>>
|
||||
* <<ml-start-datafeed,Start {dfeed}>>
|
||||
* <<ml-stop-datafeed,Stop {dfeed}>>
|
||||
* <<ml-update-datafeed,Update {dfeed}>>
|
||||
|
||||
include::ml/put-datafeed.asciidoc[]
|
||||
include::ml/delete-datafeed.asciidoc[]
|
||||
include::ml/get-datafeed.asciidoc[]
|
||||
include::ml/get-datafeed-stats.asciidoc[]
|
||||
include::ml/preview-datafeed.asciidoc[]
|
||||
include::ml/start-datafeed.asciidoc[]
|
||||
include::ml/stop-datafeed.asciidoc[]
|
||||
include::ml/update-datafeed.asciidoc[]
|
||||
|
||||
|
||||
[float]
|
||||
[[ml-api-job-endpoint]]
|
||||
=== Jobs
|
||||
|
||||
You can use APIs to perform the following activities:
|
||||
|
||||
* <<ml-close-job,Close job>>
|
||||
* <<ml-put-job,Create job>>
|
||||
* <<ml-delete-job,Delete job>>
|
||||
* <<ml-get-job,Get job info>>
|
||||
* <<ml-get-job-stats,Get job statistics>>
|
||||
//* <<ml-valid-detector,Validate detectors>>, <<ml-valid-job,Validate job>>
|
||||
* <<ml-put-job,Create job>>, <<ml-delete-job,Delete job>>
|
||||
* <<ml-open-job,Open job>>, <<ml-close-job,Close job>>
|
||||
* <<ml-get-job,Get job info>>, <<ml-get-job-stats,Get job statistics>>
|
||||
* <<ml-flush-job,Flush job>>
|
||||
* <<ml-open-job,Open job>>
|
||||
* <<ml-post-data,Post data to job>>
|
||||
* <<ml-update-job,Update job>>
|
||||
* <<ml-valid-detector,Validate detectors>>
|
||||
* <<ml-valid-job,Validate job>>
|
||||
|
||||
include::ml/close-job.asciidoc[]
|
||||
include::ml/put-job.asciidoc[]
|
||||
include::ml/delete-job.asciidoc[]
|
||||
include::ml/get-job.asciidoc[]
|
||||
include::ml/get-job-stats.asciidoc[]
|
||||
include::ml/flush-job.asciidoc[]
|
||||
include::ml/open-job.asciidoc[]
|
||||
include::ml/post-data.asciidoc[]
|
||||
include::ml/update-job.asciidoc[]
|
||||
include::ml/validate-detector.asciidoc[]
|
||||
include::ml/validate-job.asciidoc[]
|
||||
|
||||
[float]
|
||||
[[ml-api-snapshot-endpoint]]
|
||||
=== Model Snapshots
|
||||
|
||||
|
@ -67,11 +38,8 @@ include::ml/validate-job.asciidoc[]
|
|||
* <<ml-revert-snapshot,Revert model snapshot>>
|
||||
* <<ml-update-snapshot,Update model snapshot>>
|
||||
|
||||
include::ml/delete-snapshot.asciidoc[]
|
||||
include::ml/get-snapshot.asciidoc[]
|
||||
include::ml/revert-snapshot.asciidoc[]
|
||||
include::ml/update-snapshot.asciidoc[]
|
||||
|
||||
[float]
|
||||
[[ml-api-result-endpoint]]
|
||||
=== Results
|
||||
|
||||
|
@ -80,31 +48,43 @@ include::ml/update-snapshot.asciidoc[]
|
|||
* <<ml-get-influencer,Get influencers>>
|
||||
* <<ml-get-record,Get records>>
|
||||
|
||||
|
||||
//CLOSE
|
||||
include::ml/close-job.asciidoc[]
|
||||
//CREATE
|
||||
include::ml/put-datafeed.asciidoc[]
|
||||
include::ml/put-job.asciidoc[]
|
||||
//DELETE
|
||||
include::ml/delete-datafeed.asciidoc[]
|
||||
include::ml/delete-job.asciidoc[]
|
||||
include::ml/delete-snapshot.asciidoc[]
|
||||
//FLUSH
|
||||
include::ml/flush-job.asciidoc[]
|
||||
//GET
|
||||
include::ml/get-bucket.asciidoc[]
|
||||
include::ml/get-category.asciidoc[]
|
||||
include::ml/get-datafeed.asciidoc[]
|
||||
include::ml/get-datafeed-stats.asciidoc[]
|
||||
include::ml/get-influencer.asciidoc[]
|
||||
include::ml/get-job.asciidoc[]
|
||||
include::ml/get-job-stats.asciidoc[]
|
||||
include::ml/get-snapshot.asciidoc[]
|
||||
include::ml/get-record.asciidoc[]
|
||||
|
||||
[[ml-api-definitions]]
|
||||
=== Definitions
|
||||
|
||||
* <<ml-datafeed-resource,{dfeeds-cap}>>
|
||||
* <<ml-datafeed-counts,{dfeed-cap} counts>>
|
||||
* <<ml-job-resource,Jobs>>
|
||||
* <<ml-jobstats,Job statistics>>
|
||||
* <<ml-snapshot-resource,Model snapshots>>
|
||||
* <<ml-results-resource,Results>>
|
||||
|
||||
include::ml/datafeedresource.asciidoc[]
|
||||
include::ml/jobresource.asciidoc[]
|
||||
include::ml/jobcounts.asciidoc[]
|
||||
include::ml/snapshotresource.asciidoc[]
|
||||
include::ml/resultsresource.asciidoc[]
|
||||
|
||||
|
||||
//* <<ml-put-job>>
|
||||
//* <<ml-delete-job>>
|
||||
//* <<ml-get-job>>
|
||||
//* <<ml-open-close-job>>
|
||||
//* <<ml-flush-job>>
|
||||
//* <<ml-post-data>>
|
||||
//OPEN
|
||||
include::ml/open-job.asciidoc[]
|
||||
//POST
|
||||
include::ml/post-data.asciidoc[]
|
||||
//PREVIEW
|
||||
include::ml/preview-datafeed.asciidoc[]
|
||||
//REVERT
|
||||
include::ml/revert-snapshot.asciidoc[]
|
||||
//START/STOP
|
||||
include::ml/start-datafeed.asciidoc[]
|
||||
include::ml/stop-datafeed.asciidoc[]
|
||||
//UPDATE
|
||||
include::ml/update-datafeed.asciidoc[]
|
||||
include::ml/update-job.asciidoc[]
|
||||
include::ml/update-snapshot.asciidoc[]
|
||||
//VALIDATE
|
||||
//include::ml/validate-detector.asciidoc[]
|
||||
//include::ml/validate-job.asciidoc[]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-close-job]]
|
||||
==== Close Jobs
|
||||
=== Close Jobs
|
||||
|
||||
The close job API enables you to close a job.
|
||||
A job can be opened and closed multiple times throughout its lifecycle.
|
||||
|
@ -9,12 +9,12 @@ A closed job cannot receive data or perform analysis
|
|||
operations, but you can still explore and navigate results.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/anomaly_detectors/<job_id>/_close`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
//A job can be closed once all data has been analyzed.
|
||||
|
||||
|
@ -30,19 +30,21 @@ are no longer required to process data.
|
|||
When a {dfeed} that has a specified end date stops, it automatically closes
|
||||
the job.
|
||||
|
||||
NOTE: If you use the `force` query parameter, the request returns before the
|
||||
associated actions such as flushing buffers and persisting the model snapshots
|
||||
complete. Therefore, do not use that parameter in a script that expects the job
|
||||
to be in a consistent state after the close job API returns.
|
||||
NOTE: If you use the `force` query parameter, the request returns without performing
|
||||
the associated actions such as flushing buffers and persisting the model snapshots.
|
||||
Therefore, do not use this parameter if you want the job to be in a consistent state
|
||||
after the close job API returns. The `force` query parameter should only be used in
|
||||
situations where the job has already failed, or where you are not interested in
|
||||
results the job might have recently produced or might produce in the future.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
|
||||
===== Query Parameters
|
||||
==== Query Parameters
|
||||
|
||||
`force`::
|
||||
(boolean) Use to close a failed job, or to forcefully close a job which has not
|
||||
|
@ -53,13 +55,14 @@ to be in a consistent state after the close job API returns.
|
|||
The default value is 30 minutes.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example closes the `event_rate` job:
|
||||
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-datafeed-resource]]
|
||||
==== {dfeed-cap} Resources
|
||||
=== {dfeed-cap} Resources
|
||||
|
||||
A {dfeed} resource has the following properties:
|
||||
|
||||
`aggregations`::
|
||||
(object) If set, the {dfeed} performs aggregation searches.
|
||||
Support for aggregations is limited and should only be used with
|
||||
low cardinality data. For more information,
|
||||
see <<ml-configuring-aggregation>>.
|
||||
low cardinality data. For more information, see
|
||||
{xpack-ref}/ml-configuring-aggregation.html[Aggregating Data for Faster Performance].
|
||||
//<<ml-configuring-aggregation>>.
|
||||
|
||||
`chunking_config`::
|
||||
(object) Specifies how data searches are split into time chunks.
|
||||
|
@ -47,24 +48,8 @@ A {dfeed} resource has the following properties:
|
|||
script fields to the {dfeed}.
|
||||
The <<ml-detectorconfig,detector configuration objects>> in a job can contain
|
||||
functions that use these script fields.
|
||||
For more information, see {ref}/search-request-script-fields.html[Script Fields].
|
||||
For example:
|
||||
+
|
||||
--
|
||||
[source,js]
|
||||
----------------------------------
|
||||
{
|
||||
"script_fields": {
|
||||
"total_error_count": {
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"inline": "doc['error_count'].value + doc['aborted_count'].value"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
----------------------------------
|
||||
--
|
||||
For more information, see
|
||||
{xpack-ref}/ml-configuring-transform.html[Transforming Data With Script Fields].
|
||||
|
||||
`scroll_size`::
|
||||
(unsigned integer) The `size` parameter that is used in {es} searches.
|
||||
|
@ -75,7 +60,7 @@ A {dfeed} resource has the following properties:
|
|||
For example: `["network","sql","kpi"]`.
|
||||
|
||||
[[ml-datafeed-chunking-config]]
|
||||
===== Chunking Configuration Objects
|
||||
==== Chunking Configuration Objects
|
||||
|
||||
{dfeeds-cap} might be required to search over long time periods, for several months
|
||||
or years. This search is split into time chunks in order to ensure the load
|
||||
|
|
|
@ -1,33 +1,41 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-delete-datafeed]]
|
||||
==== Delete {dfeeds-cap}
|
||||
=== Delete {dfeeds-cap}
|
||||
|
||||
The delete {dfeed} API enables you to delete an existing {dfeed}.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`DELETE _xpack/ml/datafeeds/<feed_id>`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
NOTE: You must stop the {dfeed} before you can delete it.
|
||||
NOTE: Unless the `force` parameter is used, the {dfeed} must be stopped before it can be deleted.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`feed_id` (required)::
|
||||
(string) Identifier for the {dfeed}
|
||||
|
||||
|
||||
===== Query Parameters
|
||||
|
||||
`force`::
|
||||
(boolean) Use to forcefully delete a started {dfeed}; this method is quicker than
|
||||
stopping and deleting the {dfeed}.
|
||||
|
||||
|
||||
===== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example deletes the `datafeed-it-ops` {dfeed}:
|
||||
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
//lcawley: Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-delete-job]]
|
||||
==== Delete Jobs
|
||||
=== Delete Jobs
|
||||
|
||||
The delete job API enables you to delete an existing anomaly detection job.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`DELETE _xpack/ml/anomaly_detectors/<job_id>`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
All job configuration, model state and results are deleted.
|
||||
|
||||
|
@ -20,24 +20,32 @@ IMPORTANT: Deleting a job must be done via this API only. Do not delete the
|
|||
privileges are granted to anyone over the `.ml-*` indices.
|
||||
|
||||
Before you can delete a job, you must delete the {dfeeds} that are associated
|
||||
with it. See <<ml-delete-datafeed,Delete {dfeeds-cap}>>.
|
||||
with it. See <<ml-delete-datafeed,Delete {dfeeds-cap}>>. Unless the `force` parameter
|
||||
is used the job must be closed before it can be deleted.
|
||||
|
||||
It is not currently possible to delete multiple jobs using wildcards or a comma
|
||||
separated list.
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
===== Query Parameters
|
||||
|
||||
===== Authorization
|
||||
`force`::
|
||||
(boolean) Use to forcefully delete an opened job; this method is quicker than
|
||||
closing and deleting the job.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example deletes the `event_rate` job:
|
||||
|
||||
|
|
|
@ -1,23 +1,22 @@
|
|||
[role="xpack"]
|
||||
[[ml-delete-snapshot]]
|
||||
==== Delete Model Snapshots
|
||||
=== Delete Model Snapshots
|
||||
|
||||
The delete model snapshot API enables you to delete an existing model snapshot.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`DELETE _xpack/ml/anomaly_detectors/<job_id>/model_snapshots/<snapshot_id>`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
IMPORTANT: You cannot delete the active model snapshot. To delete that snapshot,
|
||||
first revert to a different one.
|
||||
first revert to a different one. To identify the active model snapshot, refer to
|
||||
the `model_snapshot_id` in the results from the get jobs API.
|
||||
|
||||
//TBD: Where do you see restorePriority? Per old docs, the active model snapshot
|
||||
//is "...the snapshot with the highest restorePriority".
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
@ -26,13 +25,14 @@ first revert to a different one.
|
|||
(string) Identifier for the model snapshot
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example deletes the `1491948163` snapshot:
|
||||
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
//lcawley: Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-flush-job]]
|
||||
==== Flush Jobs
|
||||
=== Flush Jobs
|
||||
|
||||
The flush job API forces any buffered data to be processed by the job.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/anomaly_detectors/<job_id>/_flush`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
The flush job API is only applicable when sending data for analysis using the
|
||||
<<ml-post-data,post data API>>. Depending on the content of the buffer, then it
|
||||
|
@ -23,13 +23,13 @@ additionally prunes and persists the model state to disk and the job must be
|
|||
opened again before analyzing further data.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
|
||||
===== Query Parameters
|
||||
==== Query Parameters
|
||||
|
||||
`advance_time`::
|
||||
(string) Specifies that no data prior to the date `advance_time` is expected.
|
||||
|
@ -47,13 +47,14 @@ opened again before analyzing further data.
|
|||
buckets on which to calculate interim results.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example flushes the `farequote` job:
|
||||
|
||||
|
|
|
@ -1,23 +1,23 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-get-bucket]]
|
||||
==== Get Buckets
|
||||
=== Get Buckets
|
||||
|
||||
The get bucket API enables you to retrieve job results for one or more buckets.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/anomaly_detectors/<job_id>/results/buckets` +
|
||||
|
||||
`GET _xpack/ml/anomaly_detectors/<job_id>/results/buckets/<timestamp>`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
This API presents a chronological view of the records, grouped by bucket.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
(string) Identifier for the job
|
||||
|
@ -28,7 +28,7 @@ This API presents a chronological view of the records, grouped by bucket.
|
|||
about all buckets.
|
||||
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`anomaly_score`::
|
||||
(double) Returns buckets with anomaly scores higher than this value.
|
||||
|
@ -69,16 +69,18 @@ The API returns the following information:
|
|||
<<ml-results-buckets,Buckets>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. You also need `read` index privilege on the index
|
||||
that stores the results. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
<<security-privileges>> and <<built-in-roles>>.
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
//<<security-privileges>> and <<built-in-roles>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example gets bucket information for the `it-ops-kpi` job:
|
||||
|
||||
|
|
|
@ -1,20 +1,24 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-get-category]]
|
||||
==== Get Categories
|
||||
=== Get Categories
|
||||
|
||||
The get categories API enables you to retrieve job results for one or more
|
||||
categories.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/anomaly_detectors/<job_id>/results/categories` +
|
||||
|
||||
`GET _xpack/ml/anomaly_detectors/<job_id>/results/categories/<category_id>`
|
||||
|
||||
//===== Description
|
||||
==== Description
|
||||
|
||||
===== Path Parameters
|
||||
For more information about categories, see
|
||||
{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages].
|
||||
//<<ml-configuring-categories>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
(string) Identifier for the job.
|
||||
|
@ -24,7 +28,7 @@ categories.
|
|||
the API returns information about all categories in the job.
|
||||
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`page`::
|
||||
`from`:::
|
||||
|
@ -33,25 +37,27 @@ categories.
|
|||
(integer) Specifies the maximum number of categories to obtain.
|
||||
|
||||
|
||||
===== Results
|
||||
==== Results
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
`categories`::
|
||||
(array) An array of category objects. For more information, see
|
||||
<<ml-results-categories,Categories>>.
|
||||
<<ml-results-categories,Categories>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. You also need `read` index privilege on the index
|
||||
that stores the results. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
<<security-privileges>> and <<built-in-roles>>.
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
//<<security-privileges>> and <<built-in-roles>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example gets information about one category for the
|
||||
`it_ops_new_logs` job:
|
||||
|
|
|
@ -1,25 +1,25 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-get-datafeed-stats]]
|
||||
==== Get {dfeed-cap} Statistics
|
||||
=== Get {dfeed-cap} Statistics
|
||||
|
||||
The get {dfeed} statistics API enables you to retrieve usage information for
|
||||
{dfeeds}.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/datafeeds/_stats` +
|
||||
|
||||
`GET _xpack/ml/datafeeds/<feed_id>/_stats`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
If the {dfeed} is stopped, the only information you receive is the
|
||||
`datafeed_id` and the `state`.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`feed_id`::
|
||||
(string) Identifier for the {dfeed}.
|
||||
|
@ -27,7 +27,7 @@ If the {dfeed} is stopped, the only information you receive is the
|
|||
omit the `feed_id` to get information about all {dfeeds}.
|
||||
|
||||
|
||||
===== Results
|
||||
==== Results
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -36,13 +36,15 @@ The API returns the following information:
|
|||
For more information, see <<ml-datafeed-counts>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see <<privileges-list-cluster>>.
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example gets usage information for the
|
||||
`datafeed-farequote` {dfeed}:
|
||||
|
@ -69,7 +71,7 @@ The API returns the following results:
|
|||
"ephemeral_id": "KHMWPZoMToOzSsZY9lDDgQ",
|
||||
"transport_address": "127.0.0.1:9300",
|
||||
"attributes": {
|
||||
"max_running_jobs": "10"
|
||||
"ml.enabled": "true"
|
||||
}
|
||||
},
|
||||
"assignment_explanation": ""
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-get-datafeed]]
|
||||
==== Get {dfeeds-cap}
|
||||
=== Get {dfeeds-cap}
|
||||
|
||||
The get {dfeeds} API enables you to retrieve configuration information for
|
||||
{dfeeds}.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/datafeeds/` +
|
||||
|
||||
|
@ -13,7 +13,7 @@ The get {dfeeds} API enables you to retrieve configuration information for
|
|||
|
||||
//===== Description
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`feed_id`::
|
||||
(string) Identifier for the {dfeed}.
|
||||
|
@ -21,7 +21,7 @@ The get {dfeeds} API enables you to retrieve configuration information for
|
|||
omit the `feed_id` to get information about all {dfeeds}.
|
||||
|
||||
|
||||
===== Results
|
||||
==== Results
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -30,13 +30,15 @@ The API returns the following information:
|
|||
For more information, see <<ml-datafeed-resource>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see <<privileges-list-cluster>>.
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example gets configuration information for the
|
||||
`datafeed-it-ops-kpi` {dfeed}:
|
||||
|
|
|
@ -1,22 +1,23 @@
|
|||
[role="xpack"]
|
||||
[[ml-get-influencer]]
|
||||
==== Get Influencers
|
||||
=== Get Influencers
|
||||
|
||||
The get influencers API enables you to retrieve job results for one or more
|
||||
influencers.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/anomaly_detectors/<job_id>/results/influencers`
|
||||
|
||||
//===== Description
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
(string) Identifier for the job.
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`desc`::
|
||||
(boolean) If true, the results are sorted in descending order.
|
||||
|
@ -45,7 +46,7 @@ influencers.
|
|||
(string) Returns influencers with timestamps after this time.
|
||||
|
||||
|
||||
===== Results
|
||||
==== Results
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -54,16 +55,18 @@ The API returns the following information:
|
|||
For more information, see <<ml-results-influencers,Influencers>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. You also need `read` index privilege on the index
|
||||
that stores the results. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
<<security-privileges>> and <<built-in-roles>>.
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
//<<security-privileges>> and <<built-in-roles>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example gets influencer information for the `it_ops_new_kpi` job:
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-get-job-stats]]
|
||||
==== Get Job Statistics
|
||||
=== Get Job Statistics
|
||||
|
||||
The get jobs API enables you to retrieve usage information for jobs.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/anomaly_detectors/_stats` +
|
||||
|
||||
|
@ -13,7 +13,7 @@ The get jobs API enables you to retrieve usage information for jobs.
|
|||
|
||||
//===== Description
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
(string) A required identifier for the job.
|
||||
|
@ -21,7 +21,7 @@ The get jobs API enables you to retrieve usage information for jobs.
|
|||
the `job_id` to get information about all jobs.
|
||||
|
||||
|
||||
===== Results
|
||||
==== Results
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -30,13 +30,15 @@ The API returns the following information:
|
|||
For more information, see <<ml-jobstats,Job Statistics>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see <<privileges-list-cluster>>.
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example gets usage information for the `farequote` job:
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-get-job]]
|
||||
==== Get Jobs
|
||||
=== Get Jobs
|
||||
|
||||
The get jobs API enables you to retrieve configuration information for jobs.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/anomaly_detectors/` +
|
||||
|
||||
|
@ -13,7 +13,7 @@ The get jobs API enables you to retrieve configuration information for jobs.
|
|||
|
||||
//===== Description
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
(string) Identifier for the job.
|
||||
|
@ -21,7 +21,7 @@ The get jobs API enables you to retrieve configuration information for jobs.
|
|||
the `job_id` to get information about all jobs.
|
||||
|
||||
|
||||
===== Results
|
||||
==== Results
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -30,13 +30,15 @@ The API returns the following information:
|
|||
For more information, see <<ml-job-resource,Job Resources>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see <<privileges-list-cluster>>.
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example gets configuration information for the `farequote` job:
|
||||
|
||||
|
@ -67,7 +69,8 @@ The API returns the following results:
|
|||
"function": "mean",
|
||||
"field_name": "responsetime",
|
||||
"partition_field_name": "airline",
|
||||
"detector_rules": []
|
||||
"detector_rules": [],
|
||||
"detector_index": 0
|
||||
}
|
||||
],
|
||||
"influencers": [
|
||||
|
|
|
@ -1,23 +1,23 @@
|
|||
[role="xpack"]
|
||||
[[ml-get-record]]
|
||||
//lcawley Verified example output 2017-04-11
|
||||
==== Get Records
|
||||
=== Get Records
|
||||
|
||||
The get records API enables you to retrieve anomaly records for a job.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/anomaly_detectors/<job_id>/results/records`
|
||||
|
||||
//===== Description
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
(string) Identifier for the job.
|
||||
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`desc`::
|
||||
(boolean) If true, the results are sorted in descending order.
|
||||
|
@ -46,7 +46,7 @@ The get records API enables you to retrieve anomaly records for a job.
|
|||
(string) Returns records with timestamps after this time.
|
||||
|
||||
|
||||
===== Results
|
||||
==== Results
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -55,16 +55,18 @@ The API returns the following information:
|
|||
<<ml-results-records,Records>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. You also need `read` index privilege on the index
|
||||
that stores the results. The `machine_learning_admin` and `machine_learning_user`
|
||||
roles provide these privileges. For more information, see
|
||||
<<security-privileges>> and <<built-in-roles>>.
|
||||
{xpack-ref}/security-privileges.html[Security Privileges] and
|
||||
{xpack-ref}/built-in-roles.html[Built-in Roles].
|
||||
//<<security-privileges>> and <<built-in-roles>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example gets record information for the `it-ops-kpi` job:
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-get-snapshot]]
|
||||
==== Get Model Snapshots
|
||||
=== Get Model Snapshots
|
||||
|
||||
The get model snapshots API enables you to retrieve information about model snapshots.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/anomaly_detectors/<job_id>/model_snapshots` +
|
||||
|
||||
|
@ -13,7 +13,7 @@ The get model snapshots API enables you to retrieve information about model snap
|
|||
|
||||
//===== Description
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id`::
|
||||
(string) Identifier for the job.
|
||||
|
@ -22,7 +22,7 @@ The get model snapshots API enables you to retrieve information about model snap
|
|||
(string) Identifier for the model snapshot. If you do not specify this
|
||||
optional parameter, the API returns information about all model snapshots.
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`desc`::
|
||||
(boolean) If true, the results are sorted in descending order.
|
||||
|
@ -44,7 +44,7 @@ The get model snapshots API enables you to retrieve information about model snap
|
|||
(string) Returns snapshots with timestamps after this time.
|
||||
|
||||
|
||||
===== Results
|
||||
==== Results
|
||||
|
||||
The API returns the following information:
|
||||
|
||||
|
@ -53,13 +53,15 @@ The API returns the following information:
|
|||
<<ml-snapshot-resource,Model Snapshots>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see <<privileges-list-cluster>>.
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example gets model snapshot information for the
|
||||
`it_ops_new_logs` job:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-jobstats]]
|
||||
==== Job Statistics
|
||||
=== Job Statistics
|
||||
|
||||
The get job statistics API provides information about the operational
|
||||
progress of a job.
|
||||
|
@ -43,7 +43,7 @@ progress of a job.
|
|||
|
||||
[float]
|
||||
[[ml-datacounts]]
|
||||
===== Data Counts Objects
|
||||
==== Data Counts Objects
|
||||
|
||||
The `data_counts` object describes the number of records processed
|
||||
and any related error counts.
|
||||
|
@ -128,7 +128,7 @@ necessarily a cause for concern.
|
|||
|
||||
[float]
|
||||
[[ml-modelsizestats]]
|
||||
===== Model Size Stats Objects
|
||||
==== Model Size Stats Objects
|
||||
|
||||
The `model_size_stats` object has the following properties:
|
||||
|
||||
|
@ -178,7 +178,7 @@ NOTE: The `over` field values are counted separately for each detector and parti
|
|||
|
||||
[float]
|
||||
[[ml-stats-node]]
|
||||
===== Node Objects
|
||||
==== Node Objects
|
||||
|
||||
The `node` objects contains properties for the node that runs the job.
|
||||
This information is available only for open jobs.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-job-resource]]
|
||||
==== Job Resources
|
||||
=== Job Resources
|
||||
|
||||
A job resource has the following properties:
|
||||
|
||||
|
@ -73,7 +73,7 @@ so do not set the `background_persist_interval` value too low.
|
|||
are retained.
|
||||
|
||||
[[ml-analysisconfig]]
|
||||
===== Analysis Configuration Objects
|
||||
==== Analysis Configuration Objects
|
||||
|
||||
An analysis configuration object has the following properties:
|
||||
|
||||
|
@ -85,6 +85,9 @@ An analysis configuration object has the following properties:
|
|||
(string) If not null, the values of the specified field will be categorized.
|
||||
The resulting categories can be used in a detector by setting `by_field_name`,
|
||||
`over_field_name`, or `partition_field_name` to the keyword `mlcategory`.
|
||||
For more information, see
|
||||
{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages].
|
||||
//<<ml-configuring-categories>>.
|
||||
|
||||
`categorization_filters`::
|
||||
(array of strings) If `categorization_field_name` is specified,
|
||||
|
@ -93,7 +96,9 @@ An analysis configuration object has the following properties:
|
|||
off the categorization field values. This functionality is useful to fine tune
|
||||
categorization by excluding sequences that should not be taken into
|
||||
consideration for defining categories. For example, you can exclude SQL
|
||||
statements that appear in your log files.
|
||||
statements that appear in your log files. For more information, see
|
||||
{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages].
|
||||
//<<ml-configuring-categories>>.
|
||||
|
||||
`detectors`::
|
||||
(array) An array of detector configuration objects,
|
||||
|
@ -152,7 +157,7 @@ LEAVE UNDOCUMENTED
|
|||
|
||||
[float]
|
||||
[[ml-detectorconfig]]
|
||||
===== Detector Configuration Objects
|
||||
==== Detector Configuration Objects
|
||||
|
||||
Detector configuration objects specify which data fields a job analyzes.
|
||||
They also specify which analytical functions are used.
|
||||
|
@ -183,12 +188,15 @@ NOTE: The `field_name` cannot contain double quotes or backslashes.
|
|||
`function`::
|
||||
(string) The analysis function that is used.
|
||||
For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. For more
|
||||
information, see <<ml-functions>>.
|
||||
information, see {xpack-ref}/ml-functions.html[Function Reference].
|
||||
//<<ml-functions>>.
|
||||
|
||||
`over_field_name`::
|
||||
(string) The field used to split the data.
|
||||
In particular, this property is used for analyzing the splits with respect to the history of all splits.
|
||||
It is used for finding unusual values in the population of all splits.
|
||||
In particular, this property is used for analyzing the splits with respect to
|
||||
the history of all splits. It is used for finding unusual values in the
|
||||
population of all splits. For more information, see
|
||||
{xpack-ref}/ml-configuring-pop.html[Performing Population Analysis].
|
||||
|
||||
`partition_field_name`::
|
||||
(string) The field used to segment the analysis.
|
||||
|
@ -207,9 +215,13 @@ LEAVE UNDOCUMENTED
|
|||
(array) TBD
|
||||
////
|
||||
|
||||
`detector_index`::
|
||||
(integer) Unique ID for the detector, used when updating it.
|
||||
Based on the order of detectors within the `analysis_config`, starting at zero.
|
||||
|
||||
[float]
|
||||
[[ml-datadescription]]
|
||||
===== Data Description Objects
|
||||
==== Data Description Objects
|
||||
|
||||
The data description defines the format of the input data when you send data to
|
||||
the job by using the <<ml-post-data,post data>> API. Note that when configure
|
||||
|
@ -243,7 +255,7 @@ job creation fails.
|
|||
|
||||
[float]
|
||||
[[ml-apilimits]]
|
||||
===== Analysis Limits
|
||||
==== Analysis Limits
|
||||
|
||||
Limits can be applied for the resources required to hold the mathematical models in memory.
|
||||
These limits are approximate and can be set per job. They do not control the
|
||||
|
@ -259,6 +271,10 @@ The `analysis_limits` object has the following properties:
|
|||
If you set this value to `0`, no examples are stored. +
|
||||
|
||||
NOTE: The `categorization_examples_limit` only applies to analysis that uses categorization.
|
||||
For more information, see
|
||||
{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages].
|
||||
|
||||
//<<ml-configuring-categories>>.
|
||||
|
||||
`model_memory_limit`::
|
||||
(long) The approximate maximum amount of memory resources that are required
|
||||
|
@ -268,7 +284,7 @@ NOTE: The `categorization_examples_limit` only applies to analysis that uses cat
|
|||
|
||||
[float]
|
||||
[[ml-apimodelplotconfig]]
|
||||
===== Model Plot Config
|
||||
==== Model Plot Config
|
||||
|
||||
This advanced configuration option stores model information along with the
|
||||
results. It provides a more detailed view into anomaly detection. If you enable
|
||||
|
@ -290,5 +306,5 @@ The `model_plot_config` object has the following properties:
|
|||
|
||||
`terms`::
|
||||
(string) Limits data collection to this comma separated list of _partition_
|
||||
or _by_ field names. If terms are not specified or it is an empty string,
|
||||
or _by_ field values. If terms are not specified or it is an empty string,
|
||||
no filtering is applied. For example, `"CPU,NetworkIn,DiskWrites"`
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-open-job]]
|
||||
==== Open Jobs
|
||||
=== Open Jobs
|
||||
|
||||
A job must be opened in order for it to be ready to receive and analyze data.
|
||||
A job can be opened and closed multiple times throughout its lifecycle.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/anomaly_detectors/{job_id}/_open`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
A job must be open in order to it to accept and analyze data.
|
||||
|
||||
|
@ -21,13 +21,13 @@ When you open an existing job, the most recent model state is automatically load
|
|||
The job is ready to resume its analysis from where it left off, once new data is received.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`open_timeout`::
|
||||
(time) Controls the time to wait until a job has opened.
|
||||
|
@ -38,13 +38,15 @@ The job is ready to resume its analysis from where it left off, once new data is
|
|||
last closed is treated as a maintenance window. That is to say, it is not an anomaly
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example opens the `event_rate` job and sets an optional property:
|
||||
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
//lcawley: Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-post-data]]
|
||||
==== Post Data to Jobs
|
||||
=== Post Data to Jobs
|
||||
|
||||
The post data API enables you to send data to an anomaly detection job for analysis.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/anomaly_detectors/<job_id>/_data --data-binary @<data-file.json>`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
The job must have a state of `open` to receive and process the data.
|
||||
|
||||
|
@ -35,13 +35,13 @@ It is not currently possible to post data to multiple jobs using wildcards
|
|||
or a comma-separated list.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`reset_start`::
|
||||
(string) Specifies the start of the bucket resetting range
|
||||
|
@ -50,13 +50,15 @@ or a comma-separated list.
|
|||
(string) Specifies the end of the bucket resetting range
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example posts data from the farequote.json file to the `farequote` job:
|
||||
|
||||
|
|
|
@ -1,35 +1,37 @@
|
|||
//lcawley: Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-preview-datafeed]]
|
||||
==== Preview {dfeeds-cap}
|
||||
=== Preview {dfeeds-cap}
|
||||
|
||||
The preview {dfeed} API enables you to preview a {dfeed}.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`GET _xpack/ml/datafeeds/<datafeed_id>/_preview`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
The API returns the first "page" of results from the `search` that is created
|
||||
by using the current {dfeed} settings. This preview shows the structure of
|
||||
the data that will be passed to the anomaly detection engine.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`datafeed_id` (required)::
|
||||
(string) Identifier for the {dfeed}
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
|
||||
privileges to use this API. For more information, see <<privileges-list-cluster>>.
|
||||
privileges to use this API. For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example obtains a preview of the `datafeed-farequote` {dfeed}:
|
||||
|
||||
|
|
|
@ -1,28 +1,28 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-put-datafeed]]
|
||||
==== Create {dfeeds-cap}
|
||||
=== Create {dfeeds-cap}
|
||||
|
||||
The create {dfeed} API enables you to instantiate a {dfeed}.
|
||||
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`PUT _xpack/ml/datafeeds/<feed_id>`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
You must create a job before you create a {dfeed}. You can associate only one
|
||||
{dfeed} to each job.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`feed_id` (required)::
|
||||
(string) A numerical character string that uniquely identifies the {dfeed}.
|
||||
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`aggregations`::
|
||||
(object) If set, the {dfeed} performs aggregation searches.
|
||||
|
@ -61,7 +61,7 @@ You must create a job before you create a {dfeed}. You can associate only one
|
|||
(object) Specifies scripts that evaluate custom expressions and returns
|
||||
script fields to the {dfeed}.
|
||||
The <<ml-detectorconfig,detector configuration objects>> in a job can contain
|
||||
functions that use these script fields.
|
||||
functions that use these script fields.
|
||||
For more information,
|
||||
see {ref}/search-request-script-fields.html[Script Fields].
|
||||
|
||||
|
@ -77,12 +77,14 @@ For more information about these properties,
|
|||
see <<ml-datafeed-resource>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example creates the `datafeed-it-ops-kpi` {dfeed}:
|
||||
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-put-job]]
|
||||
==== Create Jobs
|
||||
=== Create Jobs
|
||||
|
||||
The create job API enables you to instantiate a job.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`PUT _xpack/ml/anomaly_detectors/<job_id>`
|
||||
|
||||
//===== Description
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`analysis_config`::
|
||||
(object) The analysis configuration, which specifies how to analyze the data.
|
||||
|
@ -43,12 +43,14 @@ The create job API enables you to instantiate a job.
|
|||
`.ml-anomalies-shared`.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example creates the `it-ops-kpi` job:
|
||||
|
||||
|
@ -93,7 +95,8 @@ When the job is created, you receive the following results:
|
|||
"detector_description": "low_sum(events_per_min)",
|
||||
"function": "low_sum",
|
||||
"field_name": "events_per_min",
|
||||
"detector_rules": []
|
||||
"detector_rules": [],
|
||||
"detector_index": 0
|
||||
}
|
||||
],
|
||||
"influencers": []
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-results-resource]]
|
||||
==== Results Resources
|
||||
=== Results Resources
|
||||
|
||||
Several different result types are created for each job. You can query anomaly
|
||||
results for _buckets_, _influencers_ and _records_ by using the results API.
|
||||
results for _buckets_, _influencers_, and _records_ by using the results API.
|
||||
|
||||
Results are written for each `bucket_span`. The timestamp for the results is the
|
||||
start of the bucket time interval.
|
||||
|
@ -31,11 +31,12 @@ indicate that at 16:05 Bob sent 837262434 bytes, when the typical value was
|
|||
entity too, you can drill through to the record results in order to investigate
|
||||
the anomalous behavior.
|
||||
|
||||
//TBD Add links to categorization
|
||||
Categorization results contain the definitions of _categories_ that have been
|
||||
identified. These are only applicable for jobs that are configured to analyze
|
||||
unstructured log data using categorization. These results do not contain a
|
||||
timestamp or any calculated scores.
|
||||
timestamp or any calculated scores. For more information, see
|
||||
{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages].
|
||||
//<<ml-configuring-categories>>.
|
||||
|
||||
* <<ml-results-buckets,Buckets>>
|
||||
* <<ml-results-influencers,Influencers>>
|
||||
|
@ -44,7 +45,7 @@ timestamp or any calculated scores.
|
|||
|
||||
[float]
|
||||
[[ml-results-buckets]]
|
||||
===== Buckets
|
||||
==== Buckets
|
||||
|
||||
Bucket results provide the top level, overall view of the job and are best for
|
||||
alerting.
|
||||
|
@ -109,7 +110,7 @@ the results for the bucket.
|
|||
|
||||
[float]
|
||||
[[ml-results-bucket-influencers]]
|
||||
===== Bucket Influencers
|
||||
==== Bucket Influencers
|
||||
|
||||
Bucket influencer results are available as nested objects contained within
|
||||
bucket results. These results are an aggregation for each type of influencer.
|
||||
|
@ -171,7 +172,7 @@ An bucket influencer object has the following properties:
|
|||
|
||||
[float]
|
||||
[[ml-results-influencers]]
|
||||
===== Influencers
|
||||
==== Influencers
|
||||
|
||||
Influencers are the entities that have contributed to, or are to blame for,
|
||||
the anomalies. Influencer results are available only if an
|
||||
|
@ -249,7 +250,7 @@ filter the anomaly results more easily.
|
|||
|
||||
[float]
|
||||
[[ml-results-records]]
|
||||
===== Records
|
||||
==== Records
|
||||
|
||||
Records contain the detailed analytical results. They describe the anomalous
|
||||
activity that has been identified in the input data based on the detector
|
||||
|
@ -379,7 +380,7 @@ filter the anomaly results more easily.
|
|||
|
||||
[float]
|
||||
[[ml-results-categories]]
|
||||
===== Categories
|
||||
==== Categories
|
||||
|
||||
When `categorization_field_name` is specified in the job configuration, it is
|
||||
possible to view the definitions of the resulting categories. A category
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-revert-snapshot]]
|
||||
==== Revert Model Snapshots
|
||||
=== Revert Model Snapshots
|
||||
|
||||
The revert model snapshot API enables you to revert to a specific snapshot.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/anomaly_detectors/<job_id>/model_snapshots/<snapshot_id>/_revert`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
The {ml} feature in {xpack} reacts quickly to anomalous input, learning new behaviors in data.
|
||||
Highly anomalous input increases the variance in the models whilst the system learns
|
||||
|
@ -49,7 +49,7 @@ Model size (in bytes) is available as part of the Job Resource Model Size Stats.
|
|||
IMPORTANT: Before you revert to a saved snapshot, you must close the job.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
@ -57,7 +57,7 @@ IMPORTANT: Before you revert to a saved snapshot, you must close the job.
|
|||
`snapshot_id` (required)::
|
||||
(string) Identifier for the model snapshot
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`delete_intervening_results`::
|
||||
(boolean) If true, deletes the results in the time period between the
|
||||
|
@ -69,13 +69,15 @@ the job will not accept input data that is older than the current time.
|
|||
If you want to resend data, then delete the intervening results.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example reverts to the `1491856080` snapshot for the
|
||||
`it_ops_new_kpi` job:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-snapshot-resource]]
|
||||
==== Model Snapshot Resources
|
||||
=== Model Snapshot Resources
|
||||
|
||||
Model snapshots are saved to disk periodically.
|
||||
By default, this is occurs approximately every 3 hours to 4 hours and is
|
||||
|
@ -51,7 +51,7 @@ A model snapshot resource has the following properties:
|
|||
|
||||
[float]
|
||||
[[ml-snapshot-stats]]
|
||||
===== Model Size Statistics
|
||||
==== Model Size Statistics
|
||||
|
||||
The `model_size_stats` object has the following properties:
|
||||
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
//lcawley Verified example output 2017-04
|
||||
[role="xpack"]
|
||||
[[ml-start-datafeed]]
|
||||
==== Start {dfeeds-cap}
|
||||
=== Start {dfeeds-cap}
|
||||
|
||||
A {dfeed} must be started in order to retrieve data from {es}.
|
||||
A {dfeed} can be started and stopped multiple times throughout its lifecycle.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/datafeeds/<feed_id>/_start`
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
NOTE: Before you can start a {dfeed}, the job must be open. Otherwise, an error
|
||||
occurs.
|
||||
|
@ -50,12 +50,12 @@ processed record, the {dfeed} continues from 1 millisecond after the timestamp
|
|||
of the latest processed record.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`feed_id` (required)::
|
||||
(string) Identifier for the {dfeed}
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`end`::
|
||||
(string) The time that the {dfeed} should end. This value is exclusive.
|
||||
|
@ -70,13 +70,15 @@ of the latest processed record.
|
|||
The default value is 20 seconds.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example starts the `datafeed-it-ops-kpi` {dfeed}:
|
||||
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-stop-datafeed]]
|
||||
==== Stop {dfeeds-cap}
|
||||
=== Stop {dfeeds-cap}
|
||||
|
||||
A {dfeed} that is stopped ceases to retrieve data from {es}.
|
||||
A {dfeed} can be started and stopped multiple times throughout its lifecycle.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/datafeeds/<feed_id>/_stop`
|
||||
|
||||
//===== Description
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`feed_id` (required)::
|
||||
(string) Identifier for the {dfeed}
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
`force`::
|
||||
(boolean) If true, the {dfeed} is stopped forcefully.
|
||||
|
@ -26,12 +26,14 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle.
|
|||
The default value is 20 seconds.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example stops the `datafeed-it-ops-kpi` {dfeed}:
|
||||
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
//lcawley Verified example output 2017-04
|
||||
[role="xpack"]
|
||||
[[ml-update-datafeed]]
|
||||
==== Update {dfeeds-cap}
|
||||
=== Update {dfeeds-cap}
|
||||
|
||||
The update {dfeed} API enables you to update certain properties of a {dfeed}.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/datafeeds/<feed_id>/_update`
|
||||
|
||||
//===== Description
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`feed_id` (required)::
|
||||
(string) Identifier for the {dfeed}
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
The following properties can be updated after the {dfeed} is created:
|
||||
|
||||
|
@ -72,12 +72,14 @@ For more information about these properties,
|
|||
see <<ml-datafeed-resource>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example updates the query for the `datafeed-it-ops-kpi` {dfeed}
|
||||
so that only log entries of error level are analyzed:
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-update-job]]
|
||||
==== Update Jobs
|
||||
=== Update Jobs
|
||||
|
||||
The update job API enables you to update certain properties of a job.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/anomaly_detectors/<job_id>/_update`
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
The following properties can be updated after the job is created:
|
||||
|
||||
|
@ -55,7 +55,7 @@ if the job is open when you make the update, you must stop the data feed, close
|
|||
the job, then restart the data feed and open the job for the changes to take
|
||||
effect.
|
||||
|
||||
//|`analysis_config`: `detectors`: `index` | A unique identifier of the
|
||||
//|`analysis_config`: `detectors`: `detector_index` | A unique identifier of the
|
||||
//detector. Matches the order of detectors returned by
|
||||
//<<ml-get-job,GET job>>, starting from 0. | No
|
||||
//|`analysis_config`: `detectors`: `detector_description` |A description of the
|
||||
|
@ -71,13 +71,15 @@ want to re-run this job with an increased `model_memory_limit`.
|
|||
--
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example updates the `it_ops_new_logs` job:
|
||||
|
||||
|
@ -126,7 +128,8 @@ information, including the updated property values. For example:
|
|||
"detector_description": "Unusual message counts",
|
||||
"function": "count",
|
||||
"by_field_name": "mlcategory",
|
||||
"detector_rules": []
|
||||
"detector_rules": [],
|
||||
"detector_index": 0
|
||||
}
|
||||
],
|
||||
"influencers": []
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-update-snapshot]]
|
||||
==== Update Model Snapshots
|
||||
=== Update Model Snapshots
|
||||
|
||||
The update model snapshot API enables you to update certain properties of a snapshot.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/anomaly_detectors/<job_id>/model_snapshots/<snapshot_id>/_update`
|
||||
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
//TBD. Is the following still true? - not sure but close/open would be the method
|
||||
Updates to the configuration are only applied after the job has been closed
|
||||
and re-opened.
|
||||
|
||||
|
||||
===== Path Parameters
|
||||
==== Path Parameters
|
||||
|
||||
`job_id` (required)::
|
||||
(string) Identifier for the job
|
||||
|
@ -24,7 +24,7 @@ and re-opened.
|
|||
`snapshot_id` (required)::
|
||||
(string) Identifier for the model snapshot
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
The following properties can be updated after the model snapshot is created:
|
||||
|
||||
|
@ -39,13 +39,15 @@ The following properties can be updated after the model snapshot is created:
|
|||
The default value is false.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example updates the snapshot identified as `1491852978`:
|
||||
|
||||
|
|
|
@ -1,31 +1,33 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-valid-detector]]
|
||||
==== Validate Detectors
|
||||
=== Validate Detectors
|
||||
|
||||
The validate detectors API validates detector configuration information.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/anomaly_detectors/_validate/detector`
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
This API enables you validate the detector configuration before you create a job.
|
||||
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
For a list of the properties that you can specify in the body of this API,
|
||||
see <<ml-detectorconfig,detector configuration objects>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example validates detector configuration information:
|
||||
|
||||
|
|
|
@ -1,31 +1,33 @@
|
|||
//lcawley Verified example output 2017-04-11
|
||||
[role="xpack"]
|
||||
[[ml-valid-job]]
|
||||
==== Validate Jobs
|
||||
=== Validate Jobs
|
||||
|
||||
The validate jobs API validates job configuration information.
|
||||
|
||||
===== Request
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/anomaly_detectors/_validate`
|
||||
|
||||
===== Description
|
||||
==== Description
|
||||
|
||||
This API enables you validate the job configuration before you create the job.
|
||||
|
||||
|
||||
===== Request Body
|
||||
==== Request Body
|
||||
|
||||
For a list of the properties that you can specify in the body of this API,
|
||||
see <<ml-job-resource,Job Resources>>.
|
||||
|
||||
|
||||
===== Authorization
|
||||
==== Authorization
|
||||
|
||||
You must have `manage_ml`, or `manage` cluster privileges to use this API.
|
||||
For more information, see <<privileges-list-cluster>>.
|
||||
For more information, see
|
||||
{xpack-ref}/security-privileges.html[Security Privileges].
|
||||
//<<privileges-list-cluster>>.
|
||||
|
||||
|
||||
===== Examples
|
||||
==== Examples
|
||||
|
||||
The following example validates job configuration information:
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
* <<security-api-clear-cache>>
|
||||
* <<security-api-users>>
|
||||
* <<security-api-roles>>
|
||||
* <<security-api-role-mapping>>
|
||||
* <<security-api-privileges>>
|
||||
* <<security-api-tokens>>
|
||||
|
||||
|
@ -13,5 +14,6 @@ include::security/change-password.asciidoc[]
|
|||
include::security/clear-cache.asciidoc[]
|
||||
include::security/users.asciidoc[]
|
||||
include::security/roles.asciidoc[]
|
||||
include::security/role-mapping.asciidoc[]
|
||||
include::security/privileges.asciidoc[]
|
||||
include::security/tokens.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
[[security-api-role-mapping]]
|
||||
=== Role Mapping APIs
|
||||
|
||||
The Role Mapping API enables you to add, remove, and retrieve role-mappings.
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
NOTE: The API requires that each role-mapping have a distinct name. The name is
|
||||
used solely as an identifier to facilitate interaction via the API, and does
|
||||
not affect the behaviour of the mapping in any way.
|
||||
|
||||
[[security-api-put-role-mapping]]
|
||||
To add a role-mapping, submit a PUT or POST request to the `/_xpack/security/role_mapping/<name>`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_xpack/security/role_mapping/administrators
|
||||
{
|
||||
"roles": [ "user", "admin" ],
|
||||
"enabled": true, <1>
|
||||
"rules": {
|
||||
"field" : { "username" : [ "esadmin01", "esadmin02" ] }
|
||||
},
|
||||
"metadata" : { <2>
|
||||
"version" : 1
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> Mappings that have `enabled` set to `false` will be ignored when role-mapping
|
||||
is performed.
|
||||
<2> Metadata is optional
|
||||
|
||||
The `roles`, `enabled`, and `rules` fields are required at the top-level.
|
||||
Within the `metadata` object, keys beginning with `_` are reserved for system
|
||||
usage.
|
||||
|
||||
A successful call returns a JSON structure that shows whether the mapping has
|
||||
been created or updated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"role_mapping" : {
|
||||
"created" : true <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> When an existing mapping is updated, `created` is set to false.
|
||||
|
||||
[[security-api-get-role-mapping]]
|
||||
To retrieve a role-mapping, issue a GET request to the
|
||||
`/_xpack/security/role_mapping/<name>` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/role_mapping/administrators
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
A successful call an object, where the keys are the
|
||||
names of the request mappings, and the values are
|
||||
the JSON representation of those mappings.
|
||||
If there is no mapping with the requested name, the
|
||||
response will have status code `404`.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"administrators" : {
|
||||
"enabled" : true,
|
||||
"roles" : [
|
||||
"user",
|
||||
"admin"
|
||||
],
|
||||
"rules" : {
|
||||
"field" : {
|
||||
"username" : [
|
||||
"esadmin01",
|
||||
"esadmin02"
|
||||
]
|
||||
}
|
||||
},
|
||||
"metadata" : {
|
||||
"version" : 1
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
You can specify multiple mapping names as a comma-separated list.
|
||||
To retrieve all mappings, omit the name entirely.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
# Retrieve mappings "m1", "m2", and "administrators"
|
||||
GET /_xpack/security/role_mapping/m1,m2,administrators
|
||||
|
||||
# Retrieve all mappings
|
||||
GET /_xpack/security/role_mapping
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[security-api-delete-role-mapping]]
|
||||
To delete a role-mapping, submit a DELETE request to the
|
||||
`/_xpack/security/role_mapping/<name>` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/role_mapping/administrators
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
If the mapping is successfully deleted, the request returns `{"found": true}`.
|
||||
Otherwise, `found` is set to false.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"found" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
|
@ -10,10 +10,10 @@ To add a user, submit a PUT or POST request to the `/_xpack/security/user/<usern
|
|||
endpoint.
|
||||
|
||||
[[username-validation]]
|
||||
NOTE: A username must be at least 1 character and no longer than 30 characters.
|
||||
The first character must be a letter (`a-z` or `A-Z`) or an underscore (`_`).
|
||||
Subsequent characters can be letters, underscores (`_`), digits (`0-9`),
|
||||
or any of the following symbols `@`, `-`, `.` or `$`
|
||||
NOTE: Usernames must be at least 1 and no more than 1024 characters. They can
|
||||
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces,
|
||||
punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block].
|
||||
Leading or trailing whitespace is not allowed.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -84,8 +84,13 @@ check the status:
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/watcher/watch/my_watch/_execute
|
||||
{
|
||||
"record_execution" : true
|
||||
}
|
||||
|
||||
GET _xpack/watcher/watch/my_watch
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
and the action is now in `ackable` state:
|
||||
|
@ -96,23 +101,36 @@ and the action is now in `ackable` state:
|
|||
"found": true,
|
||||
"_id": "my_watch",
|
||||
"status": {
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"actions": {
|
||||
"test_index": {
|
||||
"ack": {
|
||||
"timestamp": "2015-05-26T18:04:27.723Z",
|
||||
"state": "ackable"
|
||||
},
|
||||
"last_execution" : {
|
||||
"timestamp": "2015-05-25T18:04:27.723Z",
|
||||
"successful": true
|
||||
},
|
||||
"last_successful_execution" : {
|
||||
"timestamp": "2015-05-25T18:04:27.723Z",
|
||||
"successful": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"state": ...
|
||||
"state": ...,
|
||||
"last_checked": ...,
|
||||
"last_met_condition": ...
|
||||
},
|
||||
"watch": ...
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"state": \.\.\./"state": "$body.status.state"/]
|
||||
// TESTRESPONSE[s/"watch": \.\.\./"watch": "$body.watch"/]
|
||||
// TESTRESPONSE[s/"last_checked": \.\.\./"last_checked": "$body.status.last_checked"/]
|
||||
// TESTRESPONSE[s/"last_met_condition": \.\.\./"last_met_condition": "$body.status.last_met_condition"/]
|
||||
// TESTRESPONSE[s/"timestamp": "2015-05-26T18:04:27.723Z"/"timestamp": "$body.status.actions.test_index.ack.timestamp"/]
|
||||
// TESTRESPONSE[s/"timestamp": "2015-05-25T18:04:27.723Z"/"timestamp": "$body.status.actions.test_index.last_execution.timestamp"/]
|
||||
|
||||
Now we can acknowledge it:
|
||||
|
||||
|
@ -122,6 +140,7 @@ PUT _xpack/watcher/watch/my_watch/_ack/test_index
|
|||
GET _xpack/watcher/watch/my_watch
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -129,23 +148,36 @@ GET _xpack/watcher/watch/my_watch
|
|||
"found": true,
|
||||
"_id": "my_watch",
|
||||
"status": {
|
||||
"version": 1,
|
||||
"version": 3,
|
||||
"actions": {
|
||||
"test_index": {
|
||||
"ack": {
|
||||
"timestamp": "2015-05-26T18:04:27.723Z",
|
||||
"state": "acknowledged"
|
||||
"state": "acked"
|
||||
},
|
||||
"last_execution" : {
|
||||
"timestamp": "2015-05-25T18:04:27.723Z",
|
||||
"successful": true
|
||||
},
|
||||
"last_successful_execution" : {
|
||||
"timestamp": "2015-05-25T18:04:27.723Z",
|
||||
"successful": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"state": ...
|
||||
"state": ...,
|
||||
"last_checked": ...,
|
||||
"last_met_condition": ...
|
||||
},
|
||||
"watch": ...
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"state": \.\.\./"state": "$body.status.state"/]
|
||||
// TESTRESPONSE[s/"watch": \.\.\./"watch": "$body.watch"/]
|
||||
// TESTRESPONSE[s/"last_checked": \.\.\./"last_checked": "$body.status.last_checked"/]
|
||||
// TESTRESPONSE[s/"last_met_condition": \.\.\./"last_met_condition": "$body.status.last_met_condition"/]
|
||||
// TESTRESPONSE[s/"timestamp": "2015-05-26T18:04:27.723Z"/"timestamp": "$body.status.actions.test_index.ack.timestamp"/]
|
||||
// TESTRESPONSE[s/"timestamp": "2015-05-25T18:04:27.723Z"/"timestamp": "$body.status.actions.test_index.last_execution.timestamp"/]
|
||||
|
||||
Acknowledging an action throttles further executions of that action until its
|
||||
`ack.state` is reset to `awaits_successful_execution`. This happens when the
|
||||
|
@ -167,58 +199,46 @@ parameter:
|
|||
--------------------------------------------------
|
||||
POST _xpack/watcher/watch/my_watch/_ack
|
||||
--------------------------------------------------
|
||||
// TEST[s/^/POST _xpack\/watcher\/watch\/my_watch\/_execute\n{ "record_execution" : true }\n/]
|
||||
// CONSOLE
|
||||
|
||||
[float]
|
||||
==== Timeouts
|
||||
|
||||
If you acknowledge a watch while it is executing, the request blocks and waits
|
||||
for the watch execution to finish. For some watches, this can take a significant
|
||||
amount of time. By default, the acknowledge action has a timeout of 10 seconds.
|
||||
You can change the timeout setting by specifying the `master_timeout` parameter.
|
||||
|
||||
The following snippet shows how to change the default timeout of the acknowledge
|
||||
action to 30 seconds:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/watcher/watch/my_watch/_ack?master_timeout=30s
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
[float]
|
||||
==== Response format
|
||||
[source,js]
|
||||
|
||||
The response format looks like:
|
||||
The response looks like a get watch response, but only contains the status:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"status": {
|
||||
"last_checked": "2015-05-26T18:21:08.630Z",
|
||||
"last_met_condition": "2015-05-26T18:21:08.630Z",
|
||||
"actions": {
|
||||
"my-action": {
|
||||
"ack_status": {
|
||||
"timestamp": "2015-05-26T18:21:09.982Z",
|
||||
"state": "acked"
|
||||
},
|
||||
"last_execution": {
|
||||
"timestamp": "2015-05-26T18:21:04.106Z",
|
||||
"successful": true
|
||||
},
|
||||
"last_successful_execution": {
|
||||
"timestamp": "2015-05-26T18:21:04.106Z",
|
||||
"successful": true
|
||||
},
|
||||
"last_throttle": {
|
||||
"timestamp": "2015-05-26T18:21:08.630Z",
|
||||
"reason": "throttling interval is set to [5 seconds] but time elapsed since last execution is [4 seconds and 530 milliseconds]"
|
||||
}
|
||||
}
|
||||
"status": {
|
||||
"state": {
|
||||
"active": true,
|
||||
"timestamp": "2015-05-26T18:04:27.723Z"
|
||||
},
|
||||
"last_checked": "2015-05-26T18:04:27.753Z",
|
||||
"last_met_condition": "2015-05-26T18:04:27.763Z",
|
||||
"actions": {
|
||||
"test_index": {
|
||||
"ack" : {
|
||||
"timestamp": "2015-05-26T18:04:27.713Z",
|
||||
"state": "acked"
|
||||
},
|
||||
"last_execution" : {
|
||||
"timestamp": "2015-05-25T18:04:27.733Z",
|
||||
"successful": true
|
||||
},
|
||||
"last_successful_execution" : {
|
||||
"timestamp": "2015-05-25T18:04:27.773Z",
|
||||
"successful": true
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 2
|
||||
}
|
||||
}
|
||||
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
// TESTRESPONSE[s/"last_checked": "2015-05-26T18:04:27.753Z"/"last_checked": "$body.status.last_checked"/]
|
||||
// TESTRESPONSE[s/"last_met_condition": "2015-05-26T18:04:27.763Z"/"last_met_condition": "$body.status.last_met_condition"/]
|
||||
// TESTRESPONSE[s/"timestamp": "2015-05-26T18:04:27.723Z"/"timestamp": "$body.status.state.timestamp"/]
|
||||
// TESTRESPONSE[s/"timestamp": "2015-05-26T18:04:27.713Z"/"timestamp": "$body.status.actions.test_index.ack.timestamp"/]
|
||||
// TESTRESPONSE[s/"timestamp": "2015-05-25T18:04:27.733Z"/"timestamp": "$body.status.actions.test_index.last_execution.timestamp"/]
|
||||
// TESTRESPONSE[s/"timestamp": "2015-05-25T18:04:27.773Z"/"timestamp": "$body.status.actions.test_index.last_successful_execution.timestamp"/]
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
[[xpack-api]]
|
||||
= {xpack} APIs
|
||||
|
||||
[partintro]
|
||||
--
|
||||
{xpack} exposes a wide range of REST APIs to manage and monitor its features.
|
||||
|
||||
* <<info-api, Info API>>
|
||||
* <<security-api, Security APIs>>
|
||||
* <<watcher-api, Watcher APIs>>
|
||||
* <<graph-api, Graph APIs>>
|
||||
* {ref}/ml-apis.html[Machine Learning APIs]
|
||||
* {ref}/ml-api-definitions.html[Definitions]
|
||||
--
|
||||
|
||||
[[info-api]]
|
||||
== Info API
|
||||
|
||||
The info API provides general information on the installed {xpack}. This
|
||||
information includes:
|
||||
|
||||
* Build Information - including the build number and timestamp.
|
||||
* License Information - basic information about the currently installed license.
|
||||
* Features Information - The features that are currently enabled and available
|
||||
under the current license.
|
||||
|
||||
The following example queries the info API:
|
||||
|
||||
[source,js]
|
||||
------------------------------------------------------------
|
||||
GET /_xpack
|
||||
------------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
Example response:
|
||||
[source,js]
|
||||
------------------------------------------------------------
|
||||
{
|
||||
"build" : {
|
||||
"hash" : "2798b1a3ce779b3611bb53a0082d4d741e4d3168",
|
||||
"date" : "2015-04-07T13:34:42Z"
|
||||
},
|
||||
"license" : {
|
||||
"uid" : "893361dc-9749-4997-93cb-802e3dofh7aa",
|
||||
"type" : "trial",
|
||||
"mode" : "trial",
|
||||
"status" : "active",
|
||||
"expiry_date_in_millis" : 1914278399999
|
||||
},
|
||||
"features" : {
|
||||
"graph" : {
|
||||
"description" : "Graph Data Exploration for the Elastic Stack",
|
||||
"available" : true,
|
||||
"enabled" : true
|
||||
},
|
||||
"logstash" : {
|
||||
"description" : "Logstash management component for X-Pack",
|
||||
"available" : true,
|
||||
"enabled" : true
|
||||
},
|
||||
"ml" : {
|
||||
"description" : "Machine Learning for the Elastic Stack",
|
||||
"available" : true,
|
||||
"enabled" : true,
|
||||
"native_code_info" : {
|
||||
"version" : "6.0.0-alpha1-SNAPSHOT",
|
||||
"build_hash" : "d081461967d61a"
|
||||
}
|
||||
},
|
||||
"monitoring" : {
|
||||
"description" : "Monitoring for the Elastic Stack",
|
||||
"available" : true,
|
||||
"enabled" : true
|
||||
},
|
||||
"security" : {
|
||||
"description" : "Security for the Elastic Stack",
|
||||
"available" : true,
|
||||
"enabled" : true
|
||||
},
|
||||
"watcher" : {
|
||||
"description" : "Alerting, Notification and Automation for the Elastic Stack",
|
||||
"available" : true,
|
||||
"enabled" : true
|
||||
}
|
||||
},
|
||||
"tagline" : "You know, for X"
|
||||
}
|
||||
------------------------------------------------------------
|
||||
// TESTRESPONSE[s/"hash" : "2798b1a3ce779b3611bb53a0082d4d741e4d3168",/"hash" : "$body.build.hash",/]
|
||||
// TESTRESPONSE[s/"date" : "2015-04-07T13:34:42Z"/"date" : "$body.build.date"/]
|
||||
// TESTRESPONSE[s/"uid" : "893361dc-9749-4997-93cb-802e3dofh7aa",/"uid": "$body.license.uid",/]
|
||||
// TESTRESPONSE[s/"expiry_date_in_millis" : 1914278399999/"expiry_date_in_millis" : "$body.license.expiry_date_in_millis"/]
|
||||
// TESTRESPONSE[s/"version" : "6.0.0-alpha1-SNAPSHOT",/"version": "$body.features.ml.native_code_info.version",/]
|
||||
// TESTRESPONSE[s/"build_hash" : "d081461967d61a"/"build_hash": "$body.features.ml.native_code_info.build_hash"/]
|
||||
// So much s/// but at least we test that the layout is close to matching....
|
||||
|
||||
You can also control what information is returned using the `categories` and
|
||||
`human` parameters.
|
||||
|
||||
The following example only returns the build and features information:
|
||||
|
||||
[source,js]
|
||||
------------------------------------------------------------
|
||||
GET /_xpack?categories=build,features
|
||||
------------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
The following example removes the descriptions from the response:
|
||||
|
||||
[source,js]
|
||||
------------------------------------------------------------
|
||||
GET /_xpack?human=false
|
||||
------------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
include::security.asciidoc[]
|
||||
|
||||
include::watcher.asciidoc[]
|
||||
|
||||
include::graph.asciidoc[]
|
|
@ -219,9 +219,12 @@ operation are supported: failover and load balancing
|
|||
`base_dn` is a group object and that it is the only group considered.
|
||||
| `unmapped_groups_as_roles` | no | Specifies whether the names of any unmapped Active Directory
|
||||
groups should be used as role names and assigned to the user.
|
||||
A group is considered to be _unmapped_ if it is not referenced
|
||||
in any <<mapping-roles-file, role-mapping files>> (API based
|
||||
role-mappings are not considered).
|
||||
Defaults to `false`.
|
||||
| `files.role_mapping` | no | Specifies the path and file name of the
|
||||
<<ad-role-mapping, YAML role mapping configuration file>>.
|
||||
<<ldap-role-mapping, YAML role mapping configuration file>>.
|
||||
Defaults to `CONF_DIR/x-pack/role_mapping.yml`,
|
||||
where `CONF_DIR` is `ES_HOME/config` (zip/tar installations)
|
||||
or `/etc/elasticsearch` (package installations).
|
||||
|
@ -229,6 +232,8 @@ operation are supported: failover and load balancing
|
|||
by the Active Directory server. Referrals are URLs returned by
|
||||
the server that are to be used to continue the LDAP operation
|
||||
(such as `search`). Defaults to `true`.
|
||||
| `metadata` | no | Specifies the list of additional LDAP attributes that should
|
||||
be stored in the `metadata` of an authenticated user.
|
||||
| `ssl.key` | no | Specifies the path to the PEM encoded private key to use if the Active Directory
|
||||
server requires client authentication. `ssl.key` and `ssl.keystore.path` may not be used at the
|
||||
same time.
|
||||
|
@ -281,13 +286,55 @@ Active Directory server, the expectation is that their roles are managed there
|
|||
as well. In fact, Active Directory supports the notion of groups, which often
|
||||
represent user roles for different systems in the organization.
|
||||
|
||||
The `active_directory` realm enables you to map Active Directory users and groups
|
||||
to roles in the role mapping file stored on each node. You specify users and
|
||||
groups using their distinguished names (DNs). For example, the following mapping
|
||||
configuration maps the Active Directory `admins` group to both the `monitoring`
|
||||
and `user` roles, maps the `users` group to the `user` role and maps the `John Doe`
|
||||
user to the `user` role.
|
||||
The `active_directory` realm enables you to map Active Directory users to roles
|
||||
via their Active Directory groups, or other metadata. This role mapping can be
|
||||
configured via the <<security-api-role-mapping, role-mapping API>>, or by using
|
||||
a file stored on each node. When a user authenticates against an Active
|
||||
Directory realm, the privileges for that user are the union of all privileges
|
||||
defined by the roles to which the user is mapped.
|
||||
|
||||
Within a mapping definition, you specify groups using their distinguished
|
||||
names. For example, the following mapping configuration maps the Active
|
||||
Directory `admins` group to both the `monitoring` and `user` roles, maps the
|
||||
`users` group to the `user` role and maps the `John Doe` user to the `user`
|
||||
role.
|
||||
|
||||
Configured via the role-mapping API:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/security/role_mapping/admins
|
||||
{
|
||||
"roles" : [ "monitoring" , "user" ],
|
||||
"rules" : { "field" : {
|
||||
"groups" : "cn=admins,dc=example,dc=com" <1>
|
||||
} },
|
||||
"enabled": true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> The Active Directory distinguished name (DN) of the `admins` group.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/security/role_mapping/basic_users
|
||||
{
|
||||
"roles" : [ "user" ],
|
||||
"rules" : { "any": [
|
||||
{ "field" : {
|
||||
"groups" : "cn=users,dc=example,dc=com" <1>
|
||||
} },
|
||||
{ "field" : {
|
||||
"dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" <2>
|
||||
} }
|
||||
] },
|
||||
"enabled": true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> The Active Directory distinguished name (DN) of the `users` group.
|
||||
<2> The Active Directory distinguished name (DN) of the user `John Doe`.
|
||||
|
||||
Or, alternatively, configured via the role-mapping file:
|
||||
[source, yaml]
|
||||
------------------------------------------------------------
|
||||
monitoring: <1>
|
||||
|
@ -307,7 +354,7 @@ For more information, see <<mapping-roles, Mapping Users and Groups to Roles>>.
|
|||
[[ad-user-metadata]]
|
||||
==== User Metadata in Active Directory Realms
|
||||
When a user is authenticated via an Active Directory realm, the following
|
||||
properties are populated in user's _metadata_. This metadata is returned in the
|
||||
properties are populated in the user's _metadata_. This metadata is returned in the
|
||||
<<security-api-authenticate,authenticate API>>, and can be used with
|
||||
<<templating-role-query, templated queries>> in roles.
|
||||
|
||||
|
@ -319,6 +366,8 @@ properties are populated in user's _metadata_. This metadata is returned in the
|
|||
groups were mapped to a role).
|
||||
|=======================
|
||||
|
||||
Additional metadata can be extracted from the Active Directory server by configuring
|
||||
the `metadata` setting on the Active Directory realm.
|
||||
|
||||
[[active-directory-ssl]]
|
||||
==== Setting up SSL Between Elasticsearch and Active Directory
|
||||
|
|
|
@ -112,10 +112,10 @@ NOTE: To ensure that Elasticsearch can read the user and role information at
|
|||
bin/x-pack/users useradd <username>
|
||||
----------------------------------------
|
||||
|
||||
A username must be at least 1 character and no longer than 30 characters. The
|
||||
first character must be a letter (`a-z` or `A-Z`) or an underscore (`_`).
|
||||
Subsequent characters can be letters, underscores (`_`), digits (`0-9`), or any
|
||||
of the following symbols `@`, `-`, `.` or `$`.
|
||||
Usernames must be at least 1 and no more than 1024 characters. They can
|
||||
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and
|
||||
printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block].
|
||||
Leading or trailing whitespace is not allowed.
|
||||
|
||||
You can specify the user's password at the command-line with the `-p` option.
|
||||
When this option is absent, the command prompts you for the password. Omit the
|
||||
|
|
|
@ -207,6 +207,9 @@ failover and load balancing modes of operation.
|
|||
the user DN is passed to the filter.
|
||||
| `unmapped_groups_as_roles` | no | Specifies whether the names of any unmapped LDAP groups
|
||||
should be used as role names and assigned to the user.
|
||||
A group is considered to be _unmapped_ if it is not referenced
|
||||
in any <<mapping-roles-file, role-mapping files>> (API based
|
||||
role-mappings are not considered).
|
||||
Defaults to `false`.
|
||||
| `timeout.tcp_connect` | no | Specifies the TCP connect timeout period for establishing an
|
||||
LDAP connection. An `s` at the end indicates seconds, or `ms`
|
||||
|
@ -224,6 +227,8 @@ failover and load balancing modes of operation.
|
|||
returned by the LDAP server. Referrals are URLs returned by
|
||||
the server that are to be used to continue the LDAP operation
|
||||
(e.g. search). Defaults to `true`.
|
||||
| `metadata` | no | Specifies the list of additional LDAP attributes that should
|
||||
be stored in the `metadata` of an authenticated user.
|
||||
| `ssl.key` | no | Specifies the path to the PEM encoded private key to use if the LDAP
|
||||
server requires client authentication. `ssl.key` and `ssl.keystore.path`
|
||||
may not be used at the same time.
|
||||
|
@ -330,15 +335,48 @@ the expectation is that their roles are managed there as well. If fact, LDAP
|
|||
supports the notion of groups, which often represent user roles for different
|
||||
systems in the organization.
|
||||
|
||||
The `ldap` realm enables you to map LDAP groups to roles in the role mapping
|
||||
file stored on each node. When a user authenticates with LDAP, the privileges
|
||||
for that user are the union of all privileges defined by the roles assigned to
|
||||
the set of groups that the user belongs to.
|
||||
The `ldap` realm enables you to map LDAP users to to roles via their LDAP
|
||||
groups, or other metadata. This role mapping can be configured via the
|
||||
<<security-api-role-mapping, role-mapping API>>, or by using a file stored
|
||||
on each node. When a user authenticates with LDAP, the privileges
|
||||
for that user are the union of all privileges defined by the roles to which
|
||||
the user is mapped.
|
||||
|
||||
You specify groups using their distinguished names. For example, the following
|
||||
mapping configuration maps the LDAP `admins` group to both the `monitoring` and
|
||||
`user` roles, and maps the `users` group to the `user` role.
|
||||
Within a mapping definition, you specify groups using their distinguished
|
||||
names. For example, the following mapping configuration maps the LDAP
|
||||
`admins` group to both the `monitoring` and `user` roles, and maps the
|
||||
`users` group to the `user` role.
|
||||
|
||||
Configured via the role-mapping API:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/security/role_mapping/admins
|
||||
{
|
||||
"roles" : [ "monitoring" , "user" ],
|
||||
"rules" : { "field" : {
|
||||
"groups" : "cn=admins,dc=example,dc=com" <1>
|
||||
} },
|
||||
"enabled": true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> The LDAP distinguished name (DN) of the `admins` group.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/security/role_mapping/basic_users
|
||||
{
|
||||
"roles" : [ "user" ],
|
||||
"rules" : { "field" : {
|
||||
"groups" : "cn=users,dc=example,dc=com" <1>
|
||||
} },
|
||||
"enabled": true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> The LDAP distinguished name (DN) of the `users` group.
|
||||
|
||||
Or, alternatively, configured via the role-mapping file:
|
||||
[source, yaml]
|
||||
------------------------------------------------------------
|
||||
monitoring: <1>
|
||||
|
@ -368,6 +406,24 @@ populated in user's _metadata_. This metadata is returned in the
|
|||
groups were mapped to a role).
|
||||
|=======================
|
||||
|
||||
Additional fields can be included in the user's metadata by configuring
|
||||
the `metadata` setting on the LDAP realm. This metadata is available for use
|
||||
with the <<mapping-roles-api, role mapping API>> or in
|
||||
<<templating-role-query, templated role queries>>.
|
||||
|
||||
The example below includes the user's common name (`cn`) as an additional
|
||||
field in their metadata.
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
xpack:
|
||||
security:
|
||||
authc:
|
||||
realms:
|
||||
ldap1:
|
||||
type: ldap
|
||||
metadata: cn
|
||||
--------------------------------------------------
|
||||
|
||||
[[ldap-ssl]]
|
||||
==== Setting up SSL Between Elasticsearch and LDAP
|
||||
|
||||
|
|
|
@ -98,10 +98,12 @@ NOTE: To migrate file-based users to the `native` realm, use the
|
|||
===== Adding Users
|
||||
|
||||
To add a user, submit a PUT or POST request to the `/_xpack/security/user/<username>`
|
||||
endpoint. A username must be at least 1 character long and no longer than 30
|
||||
characters. The first character must be a letter (`a-z` or `A-Z`) or an
|
||||
underscore (`_`). Subsequent characters can be letters, underscores (`_`),
|
||||
digits (`0-9`), or any of the following symbols `@`, `-`, `.` or `$`.
|
||||
endpoint.
|
||||
|
||||
Usernames must be at least 1 and no more than 1024 characters. They can
|
||||
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and
|
||||
printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block].
|
||||
Leading or trailing whitespace is not allowed.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -117,10 +117,32 @@ xpack:
|
|||
[[assigning-roles-pki]]
|
||||
==== Mapping Roles for PKI Users
|
||||
|
||||
You map roles for PKI users in the role mapping file stored on each node. You
|
||||
identify a user by the distinguished name in their certificate. For example, the
|
||||
following mapping configuration maps `John Doe` to the `user` role:
|
||||
You map roles for PKI users through the
|
||||
<<security-api-role-mapping, role-mapping API>>, or by using a file stored on
|
||||
each node. When a user authenticates against a PKI realm, the privileges for
|
||||
that user are the union of all privileges defined by the roles to which the
|
||||
user is mapped.
|
||||
|
||||
You identify a user by the distinguished name in their certificate.
|
||||
For example, the following mapping configuration maps `John Doe` to the
|
||||
`user` role:
|
||||
|
||||
Using the role-mapping API:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/security/role_mapping/users
|
||||
{
|
||||
"roles" : [ "user" ],
|
||||
"rules" : { "field" : {
|
||||
"dn" : "cn=John Doe,ou=example,o=com" <1>
|
||||
} },
|
||||
"enabled": true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> The distinguished name (DN) of a PKI user.
|
||||
|
||||
Or, alternatively, configured in a role-mapping file:
|
||||
[source, yaml]
|
||||
------------------------------------------------------------
|
||||
user: <1>
|
||||
|
|
|
@ -59,15 +59,13 @@ themselves.
|
|||
{security} also provides a set of built-in roles you can explicitly assign
|
||||
to users. These roles have a fixed set of privileges and cannot be updated.
|
||||
|
||||
[[built-in-roles-ingest-user]]
|
||||
`ingest_admin` ::
|
||||
[[built-in-roles-ingest-user]] `ingest_admin` ::
|
||||
Grants access to manage *all* index templates and *all* ingest pipeline configurations.
|
||||
+
|
||||
NOTE: This role does *not* provide the ability to create indices; those privileges
|
||||
must be defined in a separate role.
|
||||
|
||||
[[built-in-roles-kibana-system]]
|
||||
`kibana_system` ::
|
||||
[[built-in-roles-kibana-system]] `kibana_system` ::
|
||||
Grants access necessary for the <<kibana, Kibana system user>>
|
||||
to read from and write to the Kibana indices and check the availability of the
|
||||
Elasticsearch cluster.
|
||||
|
@ -75,13 +73,11 @@ Elasticsearch cluster.
|
|||
NOTE: This role should not be assigned to users as the granted permissions may
|
||||
change between releases.
|
||||
|
||||
[[built-in-roles-kibana-user]]
|
||||
`kibana_user`::
|
||||
[[built-in-roles-kibana-user]] `kibana_user`::
|
||||
Grants the minimum privileges required for any user of Kibana. This role grants
|
||||
access to the Kibana indices and grants monitoring privileges for the cluster.
|
||||
|
||||
[[built-in-roles-logstash-system]]
|
||||
`logstash_system` ::
|
||||
[[built-in-roles-logstash-system]] `logstash_system` ::
|
||||
Grants access necessary for the <<ls-monitoring-user, Logstash system user>>
|
||||
to send system-level data (such as monitoring) to Elasticsearch.
|
||||
+
|
||||
|
@ -91,44 +87,37 @@ change between releases.
|
|||
NOTE: This role does not provide access to the logstash indices and is not
|
||||
suitable for use within a Logstash pipeline.
|
||||
|
||||
[[built-in-roles-ml-admin]]
|
||||
`machine_learning_admin`::
|
||||
[[built-in-roles-ml-admin]] `machine_learning_admin`::
|
||||
Grants `manage_ml` cluster privileges and read access to the `.ml-*` indices.
|
||||
|
||||
[[built-in-roles-ml-user]]
|
||||
`machine_learning_user`::
|
||||
[[built-in-roles-ml-user]] `machine_learning_user`::
|
||||
Grants the minimum privileges required to view {xpackml} configuration,
|
||||
status, and results. This role grants `monitor_ml` cluster privileges and
|
||||
read access to the `.ml-notifications` and `.ml-anomalies*` indices,
|
||||
which store {ml} results.
|
||||
|
||||
[[built-in-roles-monitoring-user]]
|
||||
`monitoring_user`::
|
||||
[[built-in-roles-monitoring-user]] `monitoring_user`::
|
||||
Grants the minimum privileges required for any user of Monitoring other than those
|
||||
required to use Kibana. This role grants access to the monitoring indices.
|
||||
Monitoring users should also be assigned the `kibana_user` role.
|
||||
|
||||
[[built-in-roles-remote-monitoring-agent]]
|
||||
`remote_monitoring_agent`::
|
||||
[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`::
|
||||
Grants the minimum privileges required for a remote monitoring agent to write data
|
||||
into this cluster.
|
||||
|
||||
[[built-in-roles-reporting-user]]
|
||||
`reporting_user`::
|
||||
[[built-in-roles-reporting-user]] `reporting_user`::
|
||||
Grants the specific privileges required for users of Reporting other than those
|
||||
required to use Kibana. This role grants access to the reporting indices. Reporting
|
||||
users should also be assigned the `kibana_user` role and a role that grants them
|
||||
access to the data that will be used to generate reports with.
|
||||
|
||||
[[built-in-roles-superuser]]
|
||||
`superuser`::
|
||||
[[built-in-roles-superuser]] `superuser`::
|
||||
Grants full access to the cluster, including all indices and data. A user with
|
||||
the `superuser` role can also manage users and roles and
|
||||
<<run-as-privilege, impersonate>> any other user in the system. Due to the
|
||||
permissive nature of this role, take extra care when assigning it to a user.
|
||||
|
||||
[[built-in-roles-transport-client]]
|
||||
`transport_client`::
|
||||
[[built-in-roles-transport-client]] `transport_client`::
|
||||
Grants the privileges required to access the cluster through the Java Transport
|
||||
Client. The Java Transport Client fetches information about the nodes in the
|
||||
cluster using the _Node Liveness API_ and the _Cluster State API_ (when
|
||||
|
@ -140,14 +129,12 @@ to the cluster state. This means users can view the metadata over all indices,
|
|||
index templates, mappings, node and basically everything about the cluster.
|
||||
However, this role does not grant permission to view the data in all indices.
|
||||
|
||||
[[built-in-roles-watcher-admin]]
|
||||
`watcher_admin`::
|
||||
[[built-in-roles-watcher-admin]] `watcher_admin`::
|
||||
+
|
||||
Grants write access to the `.watches` index, read access to the watch history and
|
||||
the triggered watches index and allows to execute all watcher actions.
|
||||
|
||||
[[built-in-roles-watcher-user]]
|
||||
`watcher_user`::
|
||||
[[built-in-roles-watcher-user]] `watcher_user`::
|
||||
+
|
||||
Grants read access to the `.watches` index, the get watch action and the watcher
|
||||
stats.
|
||||
|
@ -177,10 +164,10 @@ A role is defined by the following JSON structure:
|
|||
privileges effectively mean no index level permissions).
|
||||
|
||||
[[valid-role-name]]
|
||||
NOTE: A valid role name must be at least 1 character and no longer than 30
|
||||
characters. It must begin with a letter (`a-z`) or an underscore (`_`).
|
||||
Subsequent characters can be letters, underscores (`_`), digits (`0-9`) or
|
||||
any of the following symbols `@`, `-`, `.` or `$`
|
||||
NOTE: Role names must be at least 1 and no more than 1024 characters. They can
|
||||
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces,
|
||||
punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block].
|
||||
Leading or trailing whitespace is not allowed.
|
||||
|
||||
The following describes the structure of an indices permissions entry:
|
||||
|
||||
|
|
|
@ -320,7 +320,7 @@ of the current authenticated user:
|
|||
"privileges" : [ "read" ],
|
||||
"query" : {
|
||||
"template" : {
|
||||
"inline" : {
|
||||
"source" : {
|
||||
"term" : { "acl.username" : "{{_user.username}}" }
|
||||
}
|
||||
}
|
||||
|
@ -355,7 +355,7 @@ based on the `group.id` field in your documents:
|
|||
"privileges" : [ "read" ],
|
||||
"query" : {
|
||||
"template" : {
|
||||
"inline" : {
|
||||
"source" : {
|
||||
"term" : { "group.id" : "{{_user.metadata.group_id}}" }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,239 @@ If you authenticate users with the `native` or `file` realms, you can manage
|
|||
role assignment user the <<managing-native-users, User Management APIs>> or the
|
||||
<<managing-file-users, file-realm>> command-line tool respectively.
|
||||
|
||||
For other types of realms, you configure role mappings for users and groups in a
|
||||
For other types of realms, you must create _role-mappings_ that define which
|
||||
roles should be assigned to each user based on their username, groups, or
|
||||
other metadata.
|
||||
|
||||
{security} allows role-mappings to be defined via an
|
||||
<<mapping-roles-api, API>>, or managed through <<mapping-roles-file, files>>.
|
||||
These two sources of role-mapping are combined inside of {security}, so it is
|
||||
possible for a single user to have some roles that have been mapped through
|
||||
the API, and other roles that are mapped through files.
|
||||
|
||||
When you use role-mappings, you assign existing roles to users.
|
||||
The available roles should either be added using the
|
||||
<<roles-management-api, Role Management APIs>> or defined in the
|
||||
<<roles-management-file, roles file>>. Either role-mapping method can use
|
||||
either role management method. For example, when you use the role mapping API,
|
||||
you are able to map users to both API-managed roles and file-managed roles
|
||||
(and likewise for file-based role-mappings).
|
||||
|
||||
[[mapping-roles-api]]
|
||||
==== Using the Role Mapping API
|
||||
|
||||
You can define role-mappings through the
|
||||
<<security-api-role-mapping, role mapping API>>.
|
||||
|
||||
Each role-mapping has a distinct name which is used to interact with it via the
|
||||
API. The name does not affect the behaviour of the mapping in any way, but it
|
||||
is needed so that you can update or delete an existing mapping.
|
||||
|
||||
A mapping has _rules_ that determine which users should be matched by this
|
||||
mapping, a list of _roles_ that will be granted to the users that match.
|
||||
|
||||
The rule is a logical condition that is expressed using a JSON DSL.
|
||||
An mapping example with a simple rule is shown below:
|
||||
|
||||
[source, js]
|
||||
------------------------------------------------------------
|
||||
{
|
||||
"roles": [ "superuser" ],
|
||||
"enabled": true,
|
||||
"rules": {
|
||||
"any": [
|
||||
{
|
||||
"field": {
|
||||
"username": "esadmin"
|
||||
}
|
||||
},
|
||||
{
|
||||
"field": {
|
||||
"groups": "cn=admins,dc=example,dc=com"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
This mapping matches any user where either of these conditions are met:
|
||||
|
||||
- the username is `esadmin`
|
||||
- the user is in the `cn=admins,dc=example,dc=com` group
|
||||
|
||||
|
||||
The rules can be more complex and include wildcard matching:
|
||||
[source, js]
|
||||
------------------------------------------------------------
|
||||
{
|
||||
"roles": [ "superuser" ],
|
||||
"enabled": true,
|
||||
"rules": {
|
||||
"all": [
|
||||
{
|
||||
"any": [
|
||||
{
|
||||
"field": {
|
||||
"dn": "*,ou=admin,dc=example,dc=com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"field": {
|
||||
"username": [ "es-admin", "es-system" ]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"field": {
|
||||
"groups": "cn=people,dc=example,dc=com"
|
||||
}
|
||||
},
|
||||
{
|
||||
"except": {
|
||||
"field": {
|
||||
"metadata.terminated_date": null
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
The mapping above matches any user where *all* of these conditions are met:
|
||||
|
||||
- the _Distinguished Name_ matches the pattern `*,ou=admin,dc=example,dc=com`,
|
||||
or the username is `es-admin`, or the username is `es-system`
|
||||
- the user in in the `cn=people,dc=example,dc=com` group
|
||||
- the user does not have a `terminated_date`
|
||||
|
||||
[float]
|
||||
===== The Role Mapping DSL
|
||||
The DSL supports the following rule types:
|
||||
|
||||
|=======================
|
||||
| Type | Value Type (child) | Description
|
||||
|
||||
| `any` | An array of rules | Evaluates to `true` if *any* of its
|
||||
children are true
|
||||
| `all` | An array of rules | Evaluates to `true` if *all* of its
|
||||
children are true
|
||||
| `field` | An object | <<mapping-roles-rule-field, See below>>
|
||||
| `except` | A single rule as an object | Only valid as a child of an `all`
|
||||
rule, the `except` is `true` if its
|
||||
child is `false` (negation).
|
||||
|=======================
|
||||
|
||||
[float]
|
||||
[[mapping-roles-rule-field]]
|
||||
===== The `field` Rule
|
||||
|
||||
The `field` rule is the primary building block for a role-mapping expression.
|
||||
It takes a single object as value, and that object must contains a single
|
||||
member with key _F_ and value _V_. The field rule looks up the value of _F_
|
||||
within the user object and then tests whether the user-value _matches_ the
|
||||
provided value _V_.
|
||||
|
||||
The value specified in the field rule may be one of the following types:
|
||||
[cols="2,3m,5"]
|
||||
|=======================
|
||||
| Type | Example | Description
|
||||
|
||||
| Simple String | "esadmin" | Matches exactly the provided value
|
||||
| Wildcard String | "*,dc=example,dc=com" | Matches the provided value using a wildcard
|
||||
| Regular Expression | "/.\*-admin[0-9]*/" | Matches the provided value using a
|
||||
{ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp]
|
||||
| Number | 7 | Matches an equivalent numerical value
|
||||
| Null | null | Matches a null, or missing value
|
||||
| Array | ["admin", "operator"] | Tests each element in the array in
|
||||
accordance with the definitions above.
|
||||
The match is successful if _any_ of elements match.
|
||||
|=======================
|
||||
|
||||
===== Available User Fields
|
||||
|
||||
The _user object_ against which the rules are evaluated has the following fields:
|
||||
[cols="1s,1,3"]
|
||||
|=======================
|
||||
| Name | Type | Description
|
||||
|
||||
| username | string | The username by which {security} knows this user.
|
||||
| dn | string | The _Distinguished Name_ of the user.
|
||||
| groups | array-of-string | The groups to which the user belongs.
|
||||
| metadata | object | Additional metadata for the user.
|
||||
| realm | object | The realm that authenticated the user.
|
||||
The only field in this object is the realm name.
|
||||
|=======================
|
||||
|
||||
Example:
|
||||
[source, js]
|
||||
------------------------------------------------------------
|
||||
{
|
||||
"username": "jsmith",
|
||||
"dn" : "cn=jsmith,ou=users,dc=example,dc=com",
|
||||
"groups" : [ "cn=admin,ou=groups,dc=example,dc=com", "cn=esusers,ou=groups,dc=example,dc=com" ],
|
||||
"metadata": { "cn": "John Smith" },
|
||||
"realm" : { "name": "ldap1" }
|
||||
}
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
The `groups` field is multi-valued - a user may belong to many groups. When a
|
||||
`field` rule is applied against a multi-valued field, it is considered to match
|
||||
if _at least one_ of the member values matches. This means that the rule:
|
||||
|
||||
[source, js]
|
||||
------------------------------------------------------------
|
||||
{ "field" : { "groups" : "admin" } }
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
will match any user who is a member of the `admin` group, regardless of any
|
||||
other groups they may belong to.
|
||||
|
||||
===== Role Mapping Examples
|
||||
|
||||
- Match *all users*
|
||||
[source, js]
|
||||
------------------------------------------------------------
|
||||
{ "field" : { "username" : "*" } }
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
- Match users who authenticated against a *specific realm*:
|
||||
[source, js]
|
||||
------------------------------------------------------------
|
||||
{ "field" : { "realm.name" : "ldap1" } }
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
- Match users within a particular *LDAP sub-tree*: +
|
||||
[source, js]
|
||||
------------------------------------------------------------
|
||||
{ "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } }
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
- Match users within a particular *LDAP sub-tree* in a *specific realm*:
|
||||
[source, js]
|
||||
------------------------------------------------------------
|
||||
{
|
||||
"all": [
|
||||
{ "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } },
|
||||
{ "field" : { "realm.name" : "ldap1" } }
|
||||
]
|
||||
}
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
[[mapping-roles-file]]
|
||||
==== Using Role Mapping Files
|
||||
|
||||
To use file based role-mappings, you must configure the mappings in a
|
||||
YAML file and copy it to each node in the cluster. Tools like Puppet or Chef can
|
||||
help with this.
|
||||
|
||||
|
@ -26,20 +258,20 @@ are values. The mappings can have a many-to-many relationship. When you map role
|
|||
to groups, the roles of a user in that group are the combination of the roles
|
||||
assigned to that group and the roles assigned to that user.
|
||||
|
||||
[[ad-role-mapping]]
|
||||
The available roles are either added using the <<roles-management-api, Role Management APIs>>
|
||||
or defined in the <<roles-management-file, roles file>>. To specify users and
|
||||
groups in the role mappings, you use their _Distinguished Names_ (DNs). A DN is
|
||||
a string that uniquely identifies the user or group, for example
|
||||
`"cn=John Doe,cn=contractors,dc=example,dc=com"`.
|
||||
==== Realm Specific Details
|
||||
[float]
|
||||
[[ldap-role-mapping]]
|
||||
===== Active Directory and LDAP Realms
|
||||
To specify users and groups in the role mappings, you use their
|
||||
_Distinguished Names_ (DNs). A DN is a string that uniquely identifies the user
|
||||
or group, for example `"cn=John Doe,cn=contractors,dc=example,dc=com"`.
|
||||
|
||||
NOTE: {security} only supports Active Directory security groups. You cannot map
|
||||
distribution groups to roles.
|
||||
|
||||
[[ldap-role-mapping]]
|
||||
For example, the following snippet maps the `admins` group to the `monitoring`
|
||||
role and maps the `John Doe` user, the `users` group, and the `admins` group to
|
||||
the `user` role.
|
||||
For example, the following snippet uses the file-based method to map the
|
||||
`admins` group to the `monitoring` role and map the `John Doe` user, the
|
||||
`users` group, and the `admins` group to the `user` role.
|
||||
|
||||
[source, yaml]
|
||||
------------------------------------------------------------
|
||||
|
@ -54,9 +286,41 @@ user:
|
|||
<2> The distinguished name of an LDAP group or an Active Directory security group.
|
||||
<3> The distinguished name of an LDAP or Active Directory user.
|
||||
|
||||
We can use the role-mapping API to define equivalent mappings as follows:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/security/role_mapping/admins
|
||||
{
|
||||
"roles" : [ "monitoring", "user" ],
|
||||
"rules" : { "field" : { "groups" : "cn=admins,dc=example,dc=com" } },
|
||||
"enabled": true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/security/role_mapping/basic_users
|
||||
{
|
||||
"roles" : [ "user" ],
|
||||
"rules" : { "any" : [
|
||||
{ "field" : { "dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" } },
|
||||
{ "field" : { "groups" : "cn=users,dc=example,dc=com" } }
|
||||
] },
|
||||
"enabled": true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST
|
||||
|
||||
[float]
|
||||
[[pki-role-mapping]]
|
||||
PKI realms only support mapping users to roles, as there is no notion of a group
|
||||
in PKI. For example:
|
||||
===== PKI Realms
|
||||
PKI realms support mapping users to roles, but you cannot map groups as
|
||||
the PKI realm has no notion of a group.
|
||||
|
||||
This is an example using a file-based mapping:
|
||||
|
||||
[source, yaml]
|
||||
------------------------------------------------------------
|
||||
|
@ -65,3 +329,28 @@ monitoring:
|
|||
user:
|
||||
- "cn=John Doe,ou=example,o=com"
|
||||
------------------------------------------------------------
|
||||
|
||||
And the equivalent mappings using the API:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/security/role_mapping/admin_user
|
||||
{
|
||||
"roles" : [ "monitoring" ],
|
||||
"rules" : { "field" : { "dn" : "cn=Admin,ou=example,o=com" } },
|
||||
"enabled": true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _xpack/security/role_mapping/basic_user
|
||||
{
|
||||
"roles" : [ "user" ],
|
||||
"rules" : { "field" : { "dn" : "cn=John Doe,ou=example,o=com" } },
|
||||
"enabled": true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST
|
||||
|
|
After Width: | Height: | Size: 106 KiB |
|
@ -87,7 +87,7 @@ Enter name for directories and files [node02]:
|
|||
Enter IP Addresses for instance (comma-separated if more than one) []: 10.10.0.2
|
||||
Enter DNS names for instance (comma-separated if more than one) []: node02.mydomain.com
|
||||
Would you like to specify another instance? Press 'y' to continue entering instance information:
|
||||
Certificates written to /Users/jmodi/dev/tmp/elasticsearch-5.0.0-alpha5-SNAPSHOT/config/x-pack/certificate-bundle.zip
|
||||
Certificates written to /home/es/config/x-pack/certificate-bundle.zip
|
||||
|
||||
This file should be properly secured as it contains the private keys for all
|
||||
instances and the certificate authority.
|
||||
|
|
|
@ -150,3 +150,4 @@ GET two:logs-2017.04/_search <1>
|
|||
}
|
||||
-----------------------------------------------------------
|
||||
|
||||
include::{xkb-repo-dir}/security/cross-cluster-kibana.asciidoc[]
|
|
@ -90,6 +90,40 @@ dependencies {
|
|||
--------------------------------------------------------------
|
||||
--
|
||||
|
||||
If you are using a repository manager such as https://www.sonatype.com/nexus-repository-oss[Nexus OSS] within your
|
||||
company, you need to add the repository as per the following screenshot:
|
||||
|
||||
|
||||
image::images/nexus.png["Adding the Elastic repo in Nexus",link="images/nexus.png"]
|
||||
|
||||
|
||||
Then in your project's `pom.xml` if using maven, add the following repositories and dependencies definitions:
|
||||
|
||||
[source,xml]
|
||||
--------------------------------------------------------------
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.elasticsearch.client</groupId>
|
||||
<artifactId>x-pack-transport</artifactId>
|
||||
<version>{version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>local-nexus</id>
|
||||
<name>Elastic Local Nexus</name>
|
||||
<url>http://0.0.0.0:8081/repository/elasticsearch/</url>
|
||||
<releases>
|
||||
<enabled>true</enabled>
|
||||
</releases>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
</repository>
|
||||
</repositories>
|
||||
--------------------------------------------------------------
|
||||
|
||||
. Set up the transport client. At a minimum, you must configure `xpack.security.user` to
|
||||
include the name and password of your transport client user in your requests. The
|
||||
following snippet configures the user credentials globally--every request
|
||||
|
|
|
@ -61,8 +61,8 @@ monitoring_user:
|
|||
|
||||
To configure the monitoring agent to communicate with a secured monitoring cluster:
|
||||
|
||||
. Configure a user on the monitoring cluster who has the `remote_monitoring_agent`
|
||||
role, which is <<[[built-in-roles-remote-monitoring-agent]], built-in to {xpack}>>.
|
||||
. Configure a user on the monitoring cluster who has the `remote_monitoring_agent`
|
||||
role, which is <<built-in-roles-remote-monitoring-agent, built-in to {xpack}>>.
|
||||
For example:
|
||||
+
|
||||
[source,js]
|
||||
|
|
|
@ -7,14 +7,15 @@ response to user actions in Kibana.
|
|||
To use Reporting with {security} enabled, you need to <<kibana, set up Kibana
|
||||
to work with {security}>>. If you are automatically generating reports with
|
||||
<<xpack-alerting, {watcher}>>, you also need to configure {watcher} to trust the
|
||||
Kibana server's certificate. For more information, see <<securing-reporting,
|
||||
Securing Reporting>>.
|
||||
Kibana server's certificate.
|
||||
//TO-DO: Add link:
|
||||
//For more information, see {kibana-ref}/securing-reporting.html[Securing Reporting].
|
||||
|
||||
[[reporting-app-users]]
|
||||
To enable users to generate reports, assign them the built in `reporting_user`
|
||||
and `kibana_user` roles:
|
||||
|
||||
* If you're using the `native` realm, you can assign roles through
|
||||
* If you're using the `native` realm, you can assign roles through
|
||||
**Management / Users** UI in Kibana or with the `user` API. For example,
|
||||
the following request creates a `reporter` user that has the
|
||||
`reporting_user` and `kibana_user` roles:
|
||||
|
@ -23,8 +24,8 @@ the following request creates a `reporter` user that has the
|
|||
---------------------------------------------------------------
|
||||
POST /_xpack/security/user/reporter
|
||||
{
|
||||
"password" : "changeme",
|
||||
"roles" : ["kibana_user", "reporting_user"],
|
||||
"password" : "changeme",
|
||||
"roles" : ["kibana_user", "reporting_user"],
|
||||
"full_name" : "Reporting User"
|
||||
}
|
||||
---------------------------------------------------------------
|
||||
|
@ -41,4 +42,4 @@ kibana_user:
|
|||
- "cn=Bill Murray,dc=example,dc=com"
|
||||
reporting_user:
|
||||
- "cn=Bill Murray,dc=example,dc=com"
|
||||
--------------------------------------------------------------------------------
|
||||
--------------------------------------------------------------------------------
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
[role="xpack"]
|
||||
[[settings-xpack]]
|
||||
== Configuring X-Pack
|
||||
|
||||
You can configure {es} settings for {xpack} features in the `elasticsearch.yml`
|
||||
file.
|
||||
|
||||
If you are using {kib}, there are also settings in the `kibana.yml` file. See
|
||||
{kibana}/settings.html[Configuring {kib}].
|
||||
|
||||
//TODO: Add link to "Configuring XPack" in Kibana Reference.
|
||||
|
||||
The following settings pertain to specific {xpack} features:
|
||||
|
||||
* <<ml-settings,Machine Learning Settings>>
|
||||
* {xpack-ref}/monitoring-settings.html[Monitoring Settings]
|
||||
* {xpack-ref}/security-settings.html[Security Settings]
|
||||
* {xpack-ref}/notification-settings.html[Watcher Settings]
|
||||
|
||||
For more information, see <<settings>> and
|
||||
{xpack-ref}/xpack-settings.html[{xpack} Settings].
|
||||
|
||||
include::ml-settings.asciidoc[]
|
|
@ -1,9 +0,0 @@
|
|||
[[graph-settings]]
|
||||
== Graph Settings
|
||||
You do not need to configure any settings to use {graph}.
|
||||
|
||||
[float]
|
||||
[[general-graph-settings]]
|
||||
=== General Graph Settings
|
||||
`xpack.graph.enabled`::
|
||||
Set to `false` to disable {graph}.
|
|
@ -3,19 +3,28 @@
|
|||
|
||||
[partintro]
|
||||
--
|
||||
You configure settings for X-Pack features in the `elasticsearch.yml` and `kibana.yml`
|
||||
configuration files.
|
||||
You configure settings for X-Pack features in the `elasticsearch.yml`,
|
||||
`kibana.yml`, and `logstash.yml` configuration files.
|
||||
|
||||
[options="header,footer"]
|
||||
|=======================
|
||||
|{xpack} Feature |{es} Settings |{kib} Settings |Logstash Settings
|
||||
|Graph |No |{kibana-ref}/graph-settings-kb.html[Yes] |No
|
||||
|Machine learning |{ref}/ml-settings.html[Yes] |{kibana-ref}/ml-settings-kb.html[Yes] |No
|
||||
|Monitoring |<<monitoring-settings,Yes>> |{kibana-ref}/monitoring-settings-kb.html[Yes] |<<monitoring-settings,Yes>>
|
||||
//{ref}/settings-xpack.html[Yes]
|
||||
|Reporting |No |{kibana-ref}/reporting-settings-kb.html[Yes] |No
|
||||
|Security |<<security-settings,Yes>> |{kibana-ref}/security-settings-kb.html[Yes] |No
|
||||
//{ref}/settings-xpack.html[Yes]
|
||||
|Watcher |<<notification-settings,Yes>> |No |No
|
||||
//{ref}/settings-xpack.html[Yes] No
|
||||
|=======================
|
||||
|
||||
* <<security-settings, Security Settings>>
|
||||
* <<monitoring-settings, Monitoring Settings>>
|
||||
* <<notification-settings, Watcher Settings>>
|
||||
* <<reporting-settings, Reporting Settings>>
|
||||
* <<ml-settings, Machine Learning Settings>>
|
||||
--
|
||||
|
||||
include::security-settings.asciidoc[]
|
||||
include::monitoring-settings.asciidoc[]
|
||||
include::graph-settings.asciidoc[]
|
||||
//include::graph-settings.asciidoc[]
|
||||
include::notification-settings.asciidoc[]
|
||||
include::reporting-settings.asciidoc[]
|
||||
include::ml-settings.asciidoc[]
|
||||
//include::reporting-settings.asciidoc[]
|
||||
//include::ml-settings.asciidoc[]
|
||||
|
|
|
@ -1,20 +1,22 @@
|
|||
[role="xpack"]
|
||||
[[ml-settings]]
|
||||
== Machine Learning Settings
|
||||
=== Machine Learning Settings
|
||||
You do not need to configure any settings to use {ml}. It is enabled by default.
|
||||
|
||||
[float]
|
||||
[[general-ml-settings]]
|
||||
=== General Machine Learning Settings
|
||||
==== General Machine Learning Settings
|
||||
|
||||
`xpack.ml.enabled`::
|
||||
Set to `true` (default) to enable {ml}. +
|
||||
+
|
||||
If set to `false` in `elasticsearch.yml`, the {ml} APIs are disabled.
|
||||
You also cannot open jobs or start {dfeeds}.
|
||||
If set to `false` in `kibana.yml`, the {ml} icon is not visible in {kib}. +
|
||||
You also cannot open jobs, start {dfeeds}, or receive transport (internal)
|
||||
communication requests related to {ml} APIs. +
|
||||
+
|
||||
IMPORTANT: If you want to use {ml} features in your cluster, you must enable
|
||||
{ml} on all master-eligible nodes. This is the default behavior.
|
||||
IMPORTANT: If you want to use {ml} features in your cluster, you must have
|
||||
`xpack.ml.enabled` set to `true` on all master-eligible nodes. This is the
|
||||
default behavior.
|
||||
|
||||
`node.ml`::
|
||||
Set to `true` (default) to identify the node as a _machine learning node_. +
|
||||
|
@ -26,5 +28,3 @@ least one machine learning node in your cluster. +
|
|||
+
|
||||
IMPORTANT: On dedicated coordinating nodes or dedicated master nodes, disable
|
||||
the `node.ml` role.
|
||||
|
||||
//Eventually this node information should be added to https://www.elastic.co/guide/en/elasticsearch/reference/5.3/modules-node.html
|
||||
|
|
|
@ -1,98 +0,0 @@
|
|||
[[reporting-settings]]
|
||||
== Reporting Settings
|
||||
|
||||
You configure `xpack.reporting` settings in `kibana.yml` to
|
||||
control how {reporting} <<reporting-kibana-server-settings, communicates
|
||||
with the Kibana server>>, <<reporting-job-queue-settings, manages background
|
||||
jobs>>, and <<reporting-capture-settings, captures screenshots>>.
|
||||
|
||||
[float]
|
||||
[[general-reporting-settings]]
|
||||
=== General Reporting Settings
|
||||
`xpack.reporting.enabled`::
|
||||
Set to `false` to disable {reporting}.
|
||||
|
||||
`xpack.reporting.encryptionKey`::
|
||||
Set to any text string. By default, Kibana generates a random key when it starts,
|
||||
which causes any pending reports to fail on restart. Configure this setting to use
|
||||
the same key across restarts.
|
||||
|
||||
[float]
|
||||
[[reporting-kibana-server-settings]]
|
||||
=== Kibana Server Settings
|
||||
|
||||
Reporting uses the Kibana interface to generate reports. In most cases, you don't need
|
||||
to configure Reporting to communicate with Kibana, it just works out of the box.
|
||||
However, if you use a proxy in your stack or otherwise change how you access Kibana, you
|
||||
might need to configure the following settings.
|
||||
|
||||
`xpack.reporting.kibanaApp`::
|
||||
The root path used to access Kibana, defaults to `/app/kibana`.
|
||||
|
||||
`xpack.reporting.kibanaServer.port`::
|
||||
The port used to access Kibana, if different than the `server.port` value.
|
||||
|
||||
`xpack.reporting.kibanaServer.protocol`::
|
||||
The protocol used to access Kibana, typically `http` or `https`.
|
||||
|
||||
`xpack.reporting.kibanaServer.hostname`::
|
||||
The hostname used to access Kibana, if different than the `server.name` value.
|
||||
|
||||
[float]
|
||||
[[reporting-job-queue-settings]]
|
||||
=== Background Job Settings
|
||||
|
||||
Reporting generates reports in the background and jobs are coordinated using documents
|
||||
in Elasticsearch. Depending on how often you generate reports and the overall number of
|
||||
reports, you may need to change some of the following settings.
|
||||
|
||||
`xpack.reporting.queue.indexInterval`::
|
||||
How often the index that stores reporting jobs rolls over to a new index.
|
||||
Valid values are `year`, `month`, `week`, `day`, and `hour`. Defaults to `week`.
|
||||
|
||||
`xpack.reporting.queue.pollInterval`::
|
||||
How often idle workers poll the index for pending jobs. Defaults to `3000` (3 seconds).
|
||||
|
||||
`xpack.reporting.queue.timeout`::
|
||||
How long each worker has to produce a report. If your machine is slow or under constant
|
||||
heavy load, you might need to increase this timeout. Specified in milliseconds.
|
||||
Defaults to `30000` (30 seconds).
|
||||
|
||||
[float]
|
||||
[[reporting-capture-settings]]
|
||||
=== Capture Settings
|
||||
|
||||
Reporting works by capturing screenshots from Kibana. These settings are used to
|
||||
control various aspects of the capturing process.
|
||||
|
||||
`xpack.reporting.capture.concurrency`::
|
||||
The number of concurrent capture processes to run. Note that jobs are CPU bound,
|
||||
and exceeding the number of cores available on the machine will likely be very
|
||||
slow and might cause issues. Defaults to the number of cores on
|
||||
the machine.
|
||||
|
||||
`xpack.reporting.capture.loadDelay`::
|
||||
When visualizations are not evented, this is the amount of time to before
|
||||
taking a screenshot. All visualizations that ship with Kibana are evented, so this
|
||||
setting shouldn't have much effect. If you are seeing empty images instead of
|
||||
visualizations in your reports, try increasing this value.
|
||||
Defaults to `3000` (3 seconds).
|
||||
|
||||
`xpack.reporting.capture.settleTime`::
|
||||
When visualizations are evented, this is the amount of time to wait for their rendering
|
||||
to settle. If visualizations in your PDF are smaller than they should be, try increasing
|
||||
this value.
|
||||
Defaults to `1000` (1 second).
|
||||
|
||||
`xpack.reporting.capture.timeout`::
|
||||
The maximum amount of time to wait for things to render in Kibana when capturing
|
||||
screenshots. Defaults to `6000` (6 seconds).
|
||||
|
||||
[float]
|
||||
[[reporting-advanced-settings]]
|
||||
=== Advanced Settings
|
||||
|
||||
`xpack.reporting.index`::
|
||||
Reporting uses a weekly index in Elasticsearch to store the reporting job and the report
|
||||
content. The index will be created automatically if it does not already exist.
|
||||
Defaults to `.reporting`
|
|
@ -56,6 +56,25 @@ Level Security>>.
|
|||
Set to `false` to prevent document and field level security
|
||||
from being configured. Defaults to `true`.
|
||||
|
||||
[float]
|
||||
[[token-service-settings]]
|
||||
=== Token Service Settings
|
||||
|
||||
You can set the following token service settings in
|
||||
`elasticsearch.yml`.
|
||||
|
||||
`xpack.security.authc.token.enabled`::
|
||||
Set to `false` to disable the built-in token service. Defaults to `true`.
|
||||
|
||||
`xpack.security.authc.token.passphrase`::
|
||||
A secure passphrase that must be the same on each node and greater than
|
||||
8 characters in length. This passphrase is used to derive a cryptographic key
|
||||
with which the tokens will be encrypted and authenticated.
|
||||
|
||||
`xpack.security.authc.token.timeout`::
|
||||
The length of time that a token is valid for. By default this value is `20m` or
|
||||
20 minutes. The maximum value is 1 hour.
|
||||
|
||||
[float]
|
||||
[[realm-settings]]
|
||||
=== Realm Settings
|
||||
|
@ -221,8 +240,8 @@ the filter. If not set, the user DN is passed into the filter. Defaults to Empt
|
|||
|
||||
`unmapped_groups_as_roles`::
|
||||
Takes a boolean variable. When this element is set to `true`, the names of any
|
||||
unmapped LDAP groups are used as role names and assigned to the user. Defaults
|
||||
to `false`.
|
||||
LDAP groups that are not referenced in a role-mapping _file_ are used as role
|
||||
names and assigned to the user. Defaults to `false`.
|
||||
|
||||
`files.role_mapping`::
|
||||
The <<security-files-location,location>> for the <<ldap-role-mapping,
|
||||
|
@ -234,6 +253,10 @@ Boolean value that specifies whether Securityshould follow referrals returned
|
|||
by the LDAP server. Referrals are URLs returned by the server that are to be
|
||||
used to continue the LDAP operation (e.g. search). Defaults to `true`.
|
||||
|
||||
`metadata`::
|
||||
A list of additional LDAP attributes that should be loaded from the
|
||||
LDAP server and stored in the authenticated user's metadata field.
|
||||
|
||||
`timeout.tcp_connect`::
|
||||
The TCP connect timeout period for establishing an LDAP connection.
|
||||
An `s` at the end indicates seconds, or `ms` indicates milliseconds.
|
||||
|
@ -331,12 +354,12 @@ The domain name of Active Directory. The cluster can derive the URL and
|
|||
otherwise specified. Required.
|
||||
|
||||
`unmapped_groups_as_roles`::
|
||||
Takes a boolean variable. When this element is set to `true`, the names of
|
||||
any unmapped groups and the user's relative distinguished name are used as
|
||||
role names and assigned to the user. Defaults to `false`.
|
||||
Takes a boolean variable. When this element is set to `true`, the names of any
|
||||
LDAP groups that are not referenced in a role-mapping _file_ are used as role
|
||||
names and assigned to the user. Defaults to `false`.
|
||||
|
||||
`files.role_mapping`::
|
||||
The <<security-files-location,location>> for the <<ad-role-mapping, YAML
|
||||
The <<security-files-location,location>> for the <<ldap-role-mapping, YAML
|
||||
role mapping configuration file>>. Defaults to `CONFIG_DIR/x-pack/role_mapping.yml`.
|
||||
|
||||
`user_search.base_dn`::
|
||||
|
@ -383,7 +406,11 @@ Specifies whether the group search should be `sub_tree`, `one_level` or
|
|||
`base` specifies that the `base_dn` is a group object, and that it is
|
||||
the only group considered. Defaults to `sub_tree`.
|
||||
|
||||
`timeout.tcp_connect`::
|
||||
`metadata`::
|
||||
A list of additional LDAP attributes that should be loaded from the
|
||||
LDAP server and stored in the authenticated user's metadata field.
|
||||
|
||||
`timeout.tcp_connect`::
|
||||
The TCP connect timeout period for establishing an LDAP connection.
|
||||
An `s` at the end indicates seconds, or `ms` indicates milliseconds.
|
||||
Defaults to `5s` (5 seconds ).
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
[role="xpack"]
|
||||
[[setup-xpack]]
|
||||
= Set up X-Pack
|
||||
|
||||
[partintro]
|
||||
--
|
||||
{xpack} is an Elastic Stack extension that bundles security, alerting,
|
||||
monitoring, reporting, machine learning, and graph capabilities into one
|
||||
easy-to-install package. To access this functionality, you must
|
||||
<<installing-xpack-es,install {xpack} in {es}>>.
|
||||
--
|
||||
|
||||
include::installing-xes.asciidoc[]
|
||||
include::settings/configuring-xes.asciidoc[]
|
|
@ -130,20 +130,21 @@ killed by firewalls or load balancers inbetween.
|
|||
.`reporting` attachment type attributes
|
||||
[options="header"]
|
||||
|=====
|
||||
| Name | Description
|
||||
| `url` | The URL to trigger the dashboard creation
|
||||
| `inline` | Configures as an attachment to sent with disposition `inline`. This
|
||||
allows the use of embedded images in HTML bodies, which are displayed
|
||||
in certain email clients. Optional. Defaults to `false`.
|
||||
| `retries` | The reporting attachment type tries to poll regularly to receive the
|
||||
created PDF. This configures the number of retries. Defaults to `40`.
|
||||
The setting `xpack.notification.reporting.retries` can be configured
|
||||
globally to change the default.
|
||||
| `interval` | The time to wait between two polling tries. Defaults to `15s` (this
|
||||
means, by default watcher tries to download a dashboard for 10 minutes,
|
||||
forty times fifteen seconds). The setting `xpack.notification.reporting.interval`
|
||||
can be configured globally to change the default.
|
||||
| `request.auth` | Additional auth information for the request
|
||||
| Name | Description
|
||||
| `url` | The URL to trigger the dashboard creation
|
||||
| `inline` | Configures as an attachment to sent with disposition `inline`. This
|
||||
allows the use of embedded images in HTML bodies, which are displayed
|
||||
in certain email clients. Optional. Defaults to `false`.
|
||||
| `retries` | The reporting attachment type tries to poll regularly to receive the
|
||||
created PDF. This configures the number of retries. Defaults to `40`.
|
||||
The setting `xpack.notification.reporting.retries` can be configured
|
||||
globally to change the default.
|
||||
| `interval` | The time to wait between two polling tries. Defaults to `15s` (this
|
||||
means, by default watcher tries to download a dashboard for 10 minutes,
|
||||
forty times fifteen seconds). The setting `xpack.notification.reporting.interval`
|
||||
can be configured globally to change the default.
|
||||
| `request.auth` | Additional auth configuration for the request
|
||||
| `request.proxy` | Additional proxy configuration for the request
|
||||
|======
|
||||
|
||||
|
||||
|
@ -157,8 +158,9 @@ include::{xkb-repo-dir}/reporting/watch-example.asciidoc[]
|
|||
|
||||
include::{xkb-repo-dir}/reporting/report-intervals.asciidoc[]
|
||||
|
||||
For more information, see <<automating-report-generation,
|
||||
Automating Report Generation>>.
|
||||
//TODO: RE-ADD LINK:
|
||||
//For more information, see
|
||||
//{kibana-ref}/automating-report-generation.html[Automating Report Generation].
|
||||
|
||||
[[email-action-attributes]]
|
||||
==== Email Action Attributes
|
||||
|
|
|
@ -108,7 +108,7 @@ aggregation and the Slack action:
|
|||
"throttle_period" : "5m",
|
||||
"transform" : {
|
||||
"script" : {
|
||||
"inline" : "['items': ctx.payload.aggregations.users_per_month.buckets.collect(bucket -> ['count': bucket.doc_count, 'name': bucket.key_as_string, 'color': bucket.doc_count < 100 ? 'danger' : 'good'])]",
|
||||
"source" : "['items': ctx.payload.aggregations.users_per_month.buckets.collect(bucket -> ['count': bucket.doc_count, 'name': bucket.key_as_string, 'color': bucket.doc_count < 100 ? 'danger' : 'good'])]",
|
||||
"lang" : "painless"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -35,7 +35,7 @@ parameter, `result`:
|
|||
--------------------------------------------------
|
||||
"condition" : {
|
||||
"script" : {
|
||||
"inline" : "return result",
|
||||
"source" : "return result",
|
||||
"lang" : "painless",
|
||||
"params" : {
|
||||
"result" : true
|
||||
|
@ -55,7 +55,7 @@ always returns `true`.
|
|||
--------------------------------------------------
|
||||
"condition" : {
|
||||
"script" : {
|
||||
"inline" : "return true"
|
||||
"source" : "return true"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
@ -113,7 +113,7 @@ threshold:
|
|||
},
|
||||
"condition" : {
|
||||
"script" : {
|
||||
"inline" : "return ctx.payload.hits.total > threshold",
|
||||
"source" : "return ctx.payload.hits.total > threshold",
|
||||
"params" : {
|
||||
"threshold" : 5
|
||||
}
|
||||
|
|
|
@ -171,7 +171,7 @@ VIX quote loaded by the `http` input is either greater than 5% or lower than -5%
|
|||
--------------------------------------------------
|
||||
"condition" : {
|
||||
"script" : {
|
||||
"inline" : "Math.abs(ctx.payload.hits.hits[0]._source.ChangePercent) > 5",
|
||||
"source" : "Math.abs(ctx.payload.hits.hits[0]._source.ChangePercent) > 5",
|
||||
"lang" : "painless"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ PUT _xpack/watcher/watch/rss_watch
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/"id" : "threshold_hits"/"inline": "return ctx.payload.hits.total > params.threshold"/]
|
||||
// TEST[s/"id" : "threshold_hits"/"source": "return ctx.payload.hits.total > params.threshold"/]
|
||||
<1> Replace `<username>@<domainname>` with your email address to receive
|
||||
notifications.
|
||||
|
||||
|
|
|
@ -387,7 +387,7 @@ definition as the field value. For example:
|
|||
"email_notification" : {
|
||||
"email" : {
|
||||
"subject" : {
|
||||
"inline" : "{{ctx.metadata.color}} alert"
|
||||
"source" : "{{ctx.metadata.color}} alert"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -400,7 +400,7 @@ The formal object definition for a script would be:
|
|||
----------------------------------------------------------------------
|
||||
"condition" : {
|
||||
"script" : {
|
||||
"inline": "return true"
|
||||
"source": "return true"
|
||||
}
|
||||
}
|
||||
----------------------------------------------------------------------
|
||||
|
|
|
@ -144,7 +144,7 @@ The following is an example of using templates that refer to provided parameters
|
|||
"indices" : [ "logstash-*" ],
|
||||
"types" : [ "event" ],
|
||||
"template" : {
|
||||
"inline" : {
|
||||
"source" : {
|
||||
"size" : 0,
|
||||
"query" : {
|
||||
"bool" : {
|
||||
|
|
|
@ -93,10 +93,6 @@ fi
|
|||
|
||||
declare -a args=("$@")
|
||||
|
||||
if [ -e "$CONF_DIR" ]; then
|
||||
args=("${args[@]}" -Edefault.path.conf="$CONF_DIR")
|
||||
fi
|
||||
|
||||
cd "$ES_HOME" > /dev/null
|
||||
"$JAVA" $ES_JAVA_OPTS -cp "$ES_CLASSPATH" -Des.path.home="$ES_HOME" org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool "${args[@]}"
|
||||
status=$?
|
||||
|
|