mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge branch 'master' into feature/sql
Original commit: elastic/x-pack-elasticsearch@f9c0bc95d4
This commit is contained in:
commit
2207c19afa
@ -92,7 +92,6 @@ buildRestTests.expectedUnconvertedCandidates = [
|
||||
'en/rest-api/security/authenticate.asciidoc',
|
||||
'en/rest-api/watcher/stats.asciidoc',
|
||||
'en/security/authorization.asciidoc',
|
||||
'en/security/tribe-clients-integrations/logstash.asciidoc',
|
||||
'en/watcher/actions.asciidoc',
|
||||
'en/watcher/example-watches/watching-time-series-data.asciidoc',
|
||||
]
|
||||
|
@ -1,6 +1,9 @@
|
||||
[role="xpack"]
|
||||
[[installing-xpack-es]]
|
||||
== Installing X-Pack
|
||||
== Installing X-Pack in Elasticsearch
|
||||
++++
|
||||
<titleabbrev>Installing {xpack}</titleabbrev>
|
||||
++++
|
||||
|
||||
After you install {es}, you can optionally obtain and install {xpack}.
|
||||
For more information about how to obtain {xpack},
|
||||
@ -151,8 +154,10 @@ bootstrap password is only intended to be a transient password that is used to
|
||||
help you set all the built-in user passwords.
|
||||
--
|
||||
|
||||
.. If you have more than one node, you must configure SSL/TLS for inter-node
|
||||
communication. For more information, see
|
||||
.. If you have more than one node or a single node that listens on an external
|
||||
interface, you must configure SSL/TLS for inter-node communication. Single-node
|
||||
instances that use a loopback interface do not have this requirement. For more
|
||||
information, see
|
||||
{xpack-ref}/encrypting-communications.html[Encrypting Communications].
|
||||
... Generate node certificates. For example, you can use the `certgen` command
|
||||
line tool to generate a certificate authority and signed certificates for your
|
||||
@ -191,7 +196,7 @@ the `node.name` configuration setting, you must specify the full path to the
|
||||
node key file.
|
||||
<2> Alternatively, specify the full path to the node certificate.
|
||||
<3> Alternatively specify the full path to the CA certificate.
|
||||
<4> Disables the built-in token service.
|
||||
<4> Disables the built-in token service.
|
||||
--
|
||||
|
||||
.. Start {es}.
|
||||
|
@ -104,7 +104,7 @@ progress of a {dfeed}. For example:
|
||||
`ephemeral_id`::: The node ephemeral ID.
|
||||
`transport_address`::: The host and port where transport HTTP connections are
|
||||
accepted. For example, `127.0.0.1:9300`.
|
||||
`attributes`::: For example, `{"max_running_jobs": "10"}`.
|
||||
`attributes`::: For example, `{"ml.max_open_jobs": "10"}`.
|
||||
|
||||
`state`::
|
||||
(string) The status of the {dfeed}, which can be one of the following values: +
|
||||
|
@ -196,4 +196,4 @@ This information is available only for open jobs.
|
||||
(string) The host and port where transport HTTP connections are accepted.
|
||||
|
||||
`attributes`::
|
||||
(object) For example, {"max_running_jobs": "10"}.
|
||||
(object) For example, {"ml.max_open_jobs": "10"}.
|
||||
|
@ -1,7 +1,7 @@
|
||||
[[authorization]]
|
||||
== Configuring Role-based Access Control
|
||||
|
||||
{security} introduces the concept of _authorization_ to Elasticsearch.
|
||||
{security} introduces the concept of _authorization_ to {es}.
|
||||
Authorization is the process of determining whether the user behind an incoming
|
||||
request is allowed to execute it. This process takes place once a request is
|
||||
successfully authenticated and the user behind the request is identified.
|
||||
@ -14,7 +14,7 @@ The authorization process revolves around the following 5 constructs:
|
||||
|
||||
_Secured Resource_::
|
||||
A resource to which access is restricted. Indices/aliases, documents, fields,
|
||||
users and the Elasticsearch cluster itself are all examples of secured objects.
|
||||
users and the {es} cluster itself are all examples of secured objects.
|
||||
|
||||
_Privilege_::
|
||||
A named group representing one or more actions that a user may execute against a
|
||||
@ -38,7 +38,7 @@ A named sets of permissions
|
||||
_User_::
|
||||
The authenticated user.
|
||||
|
||||
A secure Elasticsearch cluster manages the privileges of users through _roles_.
|
||||
A secure {es} cluster manages the privileges of users through _roles_.
|
||||
A role has a unique name and identifies a set of permissions that translate to
|
||||
privileges on resources. A user can be associated with an arbitrary number of
|
||||
roles. The total set of permissions that a user has is therefore defined by
|
||||
@ -66,20 +66,22 @@ NOTE: This role does *not* provide the ability to create indices; those privileg
|
||||
must be defined in a separate role.
|
||||
|
||||
[[built-in-roles-kibana-system]] `kibana_system` ::
|
||||
Grants access necessary for the <<kibana, Kibana system user>>
|
||||
to read from and write to the Kibana indices and check the availability of the
|
||||
Elasticsearch cluster.
|
||||
Grants access necessary for the {kib} system user
|
||||
to read from and write to the {kib} indices and check the availability of the
|
||||
{es} cluster. For more information, see
|
||||
{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]
|
||||
+
|
||||
NOTE: This role should not be assigned to users as the granted permissions may
|
||||
change between releases.
|
||||
|
||||
[[built-in-roles-kibana-user]] `kibana_user`::
|
||||
Grants the minimum privileges required for any user of Kibana. This role grants
|
||||
access to the Kibana indices and grants monitoring privileges for the cluster.
|
||||
Grants the minimum privileges required for any user of {kib}. This role grants
|
||||
access to the {kib} indices and grants monitoring privileges for the cluster.
|
||||
|
||||
[[built-in-roles-logstash-system]] `logstash_system` ::
|
||||
Grants access necessary for the <<ls-monitoring-user, Logstash system user>>
|
||||
to send system-level data (such as monitoring) to Elasticsearch.
|
||||
Grants access necessary for the Logstash system user to send system-level data
|
||||
(such as monitoring) to {es}. For more information, see
|
||||
{logstash-ref}/ls-security.html[Configuring Security in Logstash].
|
||||
+
|
||||
NOTE: This role should not be assigned to users as the granted permissions may
|
||||
change between releases.
|
||||
@ -97,8 +99,8 @@ read access to the `.ml-notifications` and `.ml-anomalies*` indices,
|
||||
which store {ml} results.
|
||||
|
||||
[[built-in-roles-monitoring-user]] `monitoring_user`::
|
||||
Grants the minimum privileges required for any user of Monitoring other than those
|
||||
required to use Kibana. This role grants access to the monitoring indices.
|
||||
Grants the minimum privileges required for any user of {monitoring} other than those
|
||||
required to use {kib}. This role grants access to the monitoring indices.
|
||||
Monitoring users should also be assigned the `kibana_user` role.
|
||||
|
||||
[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`::
|
||||
@ -106,8 +108,8 @@ Grants the minimum privileges required for a remote monitoring agent to write da
|
||||
into this cluster.
|
||||
|
||||
[[built-in-roles-reporting-user]] `reporting_user`::
|
||||
Grants the specific privileges required for users of Reporting other than those
|
||||
required to use Kibana. This role grants access to the reporting indices. Reporting
|
||||
Grants the specific privileges required for users of {reporting} other than those
|
||||
required to use {kib}. This role grants access to the reporting indices. Reporting
|
||||
users should also be assigned the `kibana_user` role and a role that grants them
|
||||
access to the data that will be used to generate reports with.
|
||||
|
||||
@ -239,7 +241,7 @@ The following snippet shows an example definition of a `clicks_admin` role:
|
||||
Based on the above definition, users owning the `clicks_admin` role can:
|
||||
|
||||
* Impersonate the `clicks_watcher_1` user and execute requests on its behalf.
|
||||
* Monitor the Elasticsearch cluster
|
||||
* Monitor the {es} cluster
|
||||
* Read data from all indices prefixed with `events-`
|
||||
* Within these indices, only read the events of the `click` category
|
||||
* Within these document, only read the `category`, `@timestamp` and `message`
|
||||
@ -248,7 +250,7 @@ Based on the above definition, users owning the `clicks_admin` role can:
|
||||
TIP: For a complete list of available <<security-privileges, cluster and indices privileges>>
|
||||
|
||||
There are two available mechanisms to define roles: using the _Role Management APIs_
|
||||
or in local files on the Elasticsearch nodes. {security} also supports implementing
|
||||
or in local files on the {es} nodes. {security} also supports implementing
|
||||
custom roles providers. If you need to integrate with another system to retrieve
|
||||
user roles, you can build a custom roles provider plugin. For more information,
|
||||
see <<custom-roles-provider, Custom Roles Provider Extension>>.
|
||||
@ -257,9 +259,10 @@ see <<custom-roles-provider, Custom Roles Provider Extension>>.
|
||||
[[roles-management-ui]]
|
||||
=== Role Management UI
|
||||
|
||||
If you are a Kibana user, make sure to <<installing-xpack, install {xpack} in Kibana>>.
|
||||
This enables you to easily manage users and roles from within Kibana. To manage roles,
|
||||
log in to Kibana and go to *Management / Elasticsearch / Roles*.
|
||||
If you are a {kib} user, make sure to
|
||||
<<installing-xpack, install {xpack} in {kib}>>.
|
||||
This enables you to easily manage users and roles from within {kib}. To manage roles,
|
||||
log in to {kib} and go to *Management / Elasticsearch / Roles*.
|
||||
|
||||
[float]
|
||||
[[roles-management-api]]
|
||||
@ -267,7 +270,7 @@ log in to Kibana and go to *Management / Elasticsearch / Roles*.
|
||||
|
||||
The _Role Management APIs_ enable you to add, update, remove and retrieve roles
|
||||
dynamically. When you use the APIs to manage roles in the `native` realm, the
|
||||
roles are stored in an internal Elasticsearch index.
|
||||
roles are stored in an internal {es} index.
|
||||
|
||||
[[roles-api-add]]
|
||||
==== Adding a Role
|
||||
@ -376,7 +379,7 @@ _Role Management APIs_, the role found in the file will be used.
|
||||
|
||||
While the _Role Management APIs_ is the preferred mechanism to define roles,
|
||||
using the `roles.yml` file becomes useful if you want to define fixed roles that
|
||||
no one (beside an administrator having physical access to the Elasticsearch nodes)
|
||||
no one (beside an administrator having physical access to the {es} nodes)
|
||||
would be able to change.
|
||||
|
||||
[IMPORTANT]
|
||||
|
@ -67,7 +67,7 @@ search shards, templates, validate).
|
||||
`view_index_metadata`::
|
||||
Read-only access to index metadata (aliases, aliases exists, get index, exists, field mappings,
|
||||
mappings, search shards, type exists, validate, warmers, settings). This
|
||||
privilege is primarily available for use by <<kibana-roles, Kibana users>>.
|
||||
privilege is primarily available for use by {kib} users.
|
||||
|
||||
`read`::
|
||||
Read only access to actions (count, explain, get, mget, get indexed scripts,
|
||||
|
@ -1,330 +0,0 @@
|
||||
[[security-release-notes]]
|
||||
== Shield Release Notes (Pre-5.0)
|
||||
|
||||
[float]
|
||||
[[update-roles]]
|
||||
=== Updated Role Definitions
|
||||
The default role definitions in the `roles.yml` file may need to be changed to ensure proper interoperation with other
|
||||
applications such as Monitoring and Kibana. Any role changes are stored in `roles.yml.new` when you upgrade. We recommend copying the following changes to your `roles.yml` file.
|
||||
|
||||
* The `kibana4` role now grants access to the Field Stats API.
|
||||
* The permission on all the roles are updated to the verbose format to make it easier to enable field level and document level security. The `transport_client` role has been updated to work with Elasticsearch 2.0.0.
|
||||
The `marvel_user` role has been updated to work with Monitoring 2.0 and a `remote_marvel_agent` role has been added. The `kibana3` and `marvel_agent` roles have been removed.
|
||||
* `kibana` role added that defines the minimum set of permissions necessary for the Kibana 4 server.
|
||||
* `kibana4` role updated to work with new features in Kibana 4 RC1
|
||||
|
||||
[float]
|
||||
[[security-change-list]]
|
||||
=== Change List
|
||||
|
||||
[float]
|
||||
==== 2.4.2
|
||||
November 22, 2016
|
||||
|
||||
.Bug Fixes
|
||||
* Users with `manage` or `manage_security` cluster privileges can now access the `.security` index if they have the appropriate index
|
||||
privileges.
|
||||
|
||||
.Breaking Changes
|
||||
* Shield on tribe nodes now requires `tribe.on_conflict` to prefer one of the clusters.
|
||||
|
||||
[float]
|
||||
==== 2.4.0
|
||||
August 31, 2016
|
||||
|
||||
.Breaking Changes
|
||||
* The `monitor` cluster privilege now grants access to the GET `/_license` API
|
||||
|
||||
|
||||
[float]
|
||||
==== 2.3.5
|
||||
August 3, 2016
|
||||
|
||||
.Bug Fixes
|
||||
|
||||
* Fixed a license problem that was preventing tribe nodes from working with
|
||||
Shield.
|
||||
|
||||
[float]
|
||||
==== 2.3.4
|
||||
July 7, 2016
|
||||
|
||||
.Bug Fixes
|
||||
* The `default` transport profile SSL settings now override the `shield.ssl.*`
|
||||
settings properly.
|
||||
* Fixed a memory leak that occured when indices were deleted or closed.
|
||||
|
||||
[float]
|
||||
==== 2.3.3
|
||||
May 18, 2016
|
||||
|
||||
.Bug Fixes
|
||||
* Fixed the `/_shield/realm/{realms}/_cache/clear` REST endpoint. This endpoint is deprecated and `/_shield/realm/{realms}/_clear_cache` should be used going forward.
|
||||
|
||||
[float]
|
||||
==== 2.3.2
|
||||
April 26, 2016
|
||||
|
||||
.Bug Fixes
|
||||
* Date math expressions in index names are now resolved before attempting to authorize access to the indices.
|
||||
* Fixed an issue where active directory realms did not work unless the url setting was configured.
|
||||
* Enabled `_cat/indices` to be used when Shield is installed.
|
||||
|
||||
[float]
|
||||
==== 2.3.1
|
||||
April 4, 2016
|
||||
|
||||
.Bug Fixes
|
||||
* Fixed an issue that could prevent nodes from joining the cluster.
|
||||
|
||||
[float]
|
||||
==== 2.3.0
|
||||
March 30, 2016
|
||||
|
||||
.New Features
|
||||
* <<native-realm,Native realm>> with support for
|
||||
{ref}/security-api-users.html[user management APIs].
|
||||
* <<security-api-roles,Role management APIs>> have been added.
|
||||
|
||||
.Bug Fixes
|
||||
* When evaluating permissions for multiple roles that have document level security enabled for the same index, Shield performed an `AND`
|
||||
on the queries, which is not consistent with how role privileges work in Shield. This has been changed to an `OR` relationship and may
|
||||
affect the behavior of existing roles; please ensure you are not relying on the `AND` behavior of document level security queries.
|
||||
* When evaluation permissions for user that has roles with and without document level security (and/or field level security), the roles that
|
||||
granted unrestricted access were not being applied properly and the user's access was still being restricted.
|
||||
|
||||
.Enhancements
|
||||
* Added new <<security-privileges, privileges>> to simplify access control.
|
||||
|
||||
[float]
|
||||
==== 2.2.1
|
||||
March 15, 2016
|
||||
|
||||
.Bug Fixes
|
||||
* Enable <<field-and-document-access-control,document and field level security>> by default.
|
||||
* Fix issues with message authentication on certain JDKs that do not support cloning message
|
||||
authentication codes.
|
||||
* Built in <<setting-up-authentication, realms>> no longer throw an exception if the `Authorization` header does not contain a basic
|
||||
authentication token.
|
||||
* Ensure each tribe client node has the same shield configuration as defined in the settings.
|
||||
|
||||
[float]
|
||||
==== 2.2.0
|
||||
February 2, 2016
|
||||
|
||||
.New Features
|
||||
* Shield plugin for Kibana: Secures user sessions and enables users to log in and out of Kibana.
|
||||
For information about installing the Shield plugin, see <<kibana, Using Kibana with Shield>>.
|
||||
|
||||
.Bug Fixes
|
||||
* Update requests (including within bulk requests) are blocked when document
|
||||
and field level security is enabled
|
||||
|
||||
[float]
|
||||
==== 2.1.2
|
||||
February 2, 2016
|
||||
|
||||
.Enhancements
|
||||
* Adds support for Elasticssearch 2.1.2
|
||||
|
||||
[float]
|
||||
==== 2.1.1
|
||||
December 17, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* Disable the request cache when <<document-level-security, document level security>> is in use for a search request.
|
||||
* Fix startup failures when using auditing and <<audit-log-entry-local-node-info, enabling network information output>>.
|
||||
* Updated the `kibana4` role to include the Field Stats API.
|
||||
|
||||
[float]
|
||||
==== 2.1.0
|
||||
November 24, 2015
|
||||
|
||||
.Breaking Changes
|
||||
* Same as 2.0.1. <<field-and-document-access-control, Document and Field Level Security>> is now disabled by default. Set `shield.dls_fls.enabled` to `true` in `elasticsearch.yml` to enable it. You cannot submit `_bulk` update requests when document and field level security is enabled.
|
||||
|
||||
.Enhancements
|
||||
* Adds support for Elasticsearch 2.1.0.
|
||||
|
||||
[float]
|
||||
==== 2.0.2
|
||||
December 16, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* Disable the request cache when <<document-level-security, document level security>> is in use for a search request.
|
||||
|
||||
[float]
|
||||
==== 2.0.1
|
||||
November 24, 2015
|
||||
|
||||
.Breaking Changes
|
||||
* <<field-and-document-access-control, Document and Field Level Security>> is now disabled by default. Set `shield.dls_fls.enabled` to `true` in `elasticsearch.yml` to enable it. You cannot submit `_bulk` update requests when document and field level security is enabled.
|
||||
|
||||
.Enhancement
|
||||
* Adds support for Elasticsearch 2.0.1.
|
||||
|
||||
[float]
|
||||
==== 2.0.0
|
||||
October 28, 2015
|
||||
|
||||
.Breaking Changes
|
||||
* All files that Shield uses must be kept in the <<security-files-location, configuration directory>> due to the enhanced security of Elasticsearch 2.0.
|
||||
* The network format has been changed from all previous versions of Shield and a full cluster restart is required to upgrade to Shield 2.0.
|
||||
|
||||
.New Features
|
||||
* <<field-and-document-access-control, Document and Field Level Security>> support has been added and can be
|
||||
configured per role.
|
||||
* Support for <<custom-realms, custom authentication realms>> has been added, allowing Shield to integrate with more authentication sources and methods.
|
||||
* <<run-as-privilege, User impersonation support>> has also been added, which allows a user to send a request to Elasticsearch that will be run
|
||||
with the specified user's permissions.
|
||||
|
||||
.Bug Fixes
|
||||
* <<auditing, Auditing>> now captures requests from nodes using a different system key as tampered requests.
|
||||
* The <<audit-index, index output for auditing>> stores the type of request when available.
|
||||
* `esusers` and `syskeygen` work when spaces are in the Elasticsearch installation path.
|
||||
* Fixed a rare issue where authentication fails even when the username and password are correct.
|
||||
|
||||
[float]
|
||||
==== 1.3.3
|
||||
|
||||
.Bug Fixes
|
||||
* Fixed a rare issue where authentication fails even when the username and password are correct.
|
||||
* The <<audit-index, index output for auditing>> stores the type of request when available.
|
||||
|
||||
.Enhancements
|
||||
* Tampered requests with a bad header are now audited.
|
||||
|
||||
[float]
|
||||
==== 1.3.2
|
||||
August 10, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* When using the <<ldap-user-search,LDAP user search>> mechanism, connection errors during startup no longer cause the node to stop.
|
||||
* The {ref}/security-api-clear-cache.html[Clear Cache API] no longer generates invalid JSON.
|
||||
* The <<audit-index,index output for auditing>> starts properly when forwarding the audit events to a remote cluster and uses
|
||||
the correct user to index the audit events.
|
||||
|
||||
[float]
|
||||
==== 1.3.1
|
||||
July 21, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* Fixes message authentication serialization to work with Shield 1.2.1 and earlier.
|
||||
** NOTE: if you are upgrading from Shield 1.3.0 or Shield 1.2.2 a {ref-17}/setup-upgrade.html#restart-upgrade[cluster restart upgrade]
|
||||
will be necessary. When upgrading from other versions of Shield, follow the normal upgrade procedure.
|
||||
|
||||
[float]
|
||||
==== 1.3.0
|
||||
June 24, 2015
|
||||
|
||||
.Breaking Changes
|
||||
* The `sha2` and `apr1` hashing algorithms have been removed as options for the <<cache-hash-algo,`cache.hash_algo` setting>>.
|
||||
If your existing Shield installation uses either of these options, remove the setting and use the default `ssha256`
|
||||
algorithm.
|
||||
* The `users` file now only supports `bcrypt` password hashing. All existing passwords stored using the `esusers` tool
|
||||
have been hashed with `bcrypt` and are not affected.
|
||||
|
||||
.New Features
|
||||
* <<pki-realm,PKI Realm>>: Adds Public Key Infrastructure (PKI) authentication through the use of X.509 certificates in place of
|
||||
username and password credentials.
|
||||
* <<auditing, Index Output for Audit Events>>: An index based output has been added for storing audit events in an Elasticsearch index.
|
||||
|
||||
.Enhancements
|
||||
* TLS 1.2 is now the default protocol.
|
||||
* Clients that do not support pre-emptive basic authentication can now support both anonymous and authenticated access
|
||||
by specifying the `shield.authc.anonymous.authz_exception` <<anonymous-access,setting>> with a value of `false`.
|
||||
* Reduced logging for common SSL exceptions, such as a client closing the connection during a handshake.
|
||||
|
||||
.Bug Fixes
|
||||
* The `esusers` and `syskeygen` tools now work correctly with environment variables in the RPM and DEB installation
|
||||
environment files `/etc/sysconfig/elasticsearch` and `/etc/default/elasticsearch`.
|
||||
* Default ciphers no longer include `TLS_DHE_RSA_WITH_AES_128_CBC_SHA`.
|
||||
|
||||
[float]
|
||||
==== 1.2.3
|
||||
July 21, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* Fixes message authentication serialization to work with Shield 1.2.1 and earlier.
|
||||
** NOTE: if you are upgrading from Shield 1.2.2 a {ref-17}/setup-upgrade.html#restart-upgrade[cluster restart upgrade]
|
||||
will be necessary. When upgrading from other versions of Shield, follow the normal upgrade procedure.
|
||||
|
||||
[float]
|
||||
==== 1.2.2
|
||||
June 24, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* The `esusers` tool no longer warns about missing roles that are properly defined in the `roles.yml` file.
|
||||
* The period character, `.`, is now allowed in usernames and role names.
|
||||
* The {ref-17}/query-dsl-terms-filter.html#_caching_19[terms filter lookup cache] has been disabled to ensure all requests
|
||||
are properly authorized. This removes the need to manually disable the terms filter cache.
|
||||
* For LDAP client connections, only the protocols and ciphers specified in the `shield.ssl.supported_protocols` and
|
||||
`shield.ssl.ciphers` {ref}/security-settings.html#ssl-tls-settings[settings] will be used.
|
||||
* The auditing mechanism now logs authentication failed events when a request contains an invalid authentication token.
|
||||
|
||||
[float]
|
||||
==== 1.2.1
|
||||
April 29, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* Several bug fixes including a fix to ensure that {ref}/disk-allocator.html[Disk-based Shard Allocation]
|
||||
works properly with Shield
|
||||
|
||||
[float]
|
||||
==== 1.2.0
|
||||
March 24, 2015
|
||||
|
||||
.Enhancements
|
||||
* Adds support for Elasticsearch 1.5
|
||||
|
||||
[float]
|
||||
==== 1.1.1
|
||||
April 29, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* Several bug fixes including a fix to ensure that {ref}/disk-allocator.html[Disk-based Shard Allocation]
|
||||
works properly with Shield
|
||||
|
||||
[float]
|
||||
==== 1.1.0
|
||||
March 24, 2015
|
||||
|
||||
.New Features
|
||||
* LDAP:
|
||||
** Add the ability to bind as a specific user for LDAP searches, which removes the need to specify `user_dn_templates`.
|
||||
This mode of operation also makes use of connection pooling for better performance. Please see <<ldap-user-search, ldap user search>>
|
||||
for more information.
|
||||
** User distinguished names (DNs) can now be used for <<ldap-role-mapping, role mapping>>.
|
||||
* Authentication:
|
||||
** <<anonymous-access, Anonymous access>> is now supported (disabled by default).
|
||||
* IP Filtering:
|
||||
** IP Filtering settings can now be <<dynamic-ip-filtering,dynamically updated>> using the {ref}/cluster-update-settings.html[Cluster Update Settings API].
|
||||
|
||||
.Enhancements
|
||||
* Significant memory footprint reduction of internal data structures
|
||||
* Test if SSL/TLS ciphers are supported and warn if any of the specified ciphers are not supported
|
||||
* Reduce the amount of logging when a non-encrypted connection is opened and `https` is being used
|
||||
* Added the <<kibana-roles, `kibana_server` role>>, which is a role that contains the minimum set of permissions required for the Kibana 4 server.
|
||||
* In-memory user credential caching hash algorithm defaults now to salted SHA-256 (see <<cache-hash-algo, Cache hash algorithms>>
|
||||
|
||||
.Bug Fixes
|
||||
* Filter out sensitive settings from the settings APIs
|
||||
|
||||
[float]
|
||||
==== 1.0.2
|
||||
March 24, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* Filter out sensitive settings from the settings APIs
|
||||
* Significant memory footprint reduction of internal data structures
|
||||
|
||||
[float]
|
||||
==== 1.0.1
|
||||
February 13, 2015
|
||||
|
||||
.Bug Fixes
|
||||
* Fixed dependency issues with Elasticsearch 1.4.3 and (Lucene 4.10.3 that comes with it)
|
||||
* Fixed bug in how user roles were handled. When multiple roles were defined for a user, and one of the
|
||||
roles only had cluster permissions, not all privileges were properly evaluated.
|
||||
* Updated `kibana4` permissions to be compatible with Kibana 4 RC1
|
||||
* Ensure the mandatory `base_dn` settings is set in the `ldap` realm configuration
|
@ -21,8 +21,8 @@ the stack are connected to the cluster and therefore need to be secured as well,
|
||||
or at least communicate with the cluster in a secured way:
|
||||
|
||||
* <<hadoop, Apache Hadoop>>
|
||||
* <<logstash, Logstash>>
|
||||
* <<kibana, Kibana>>
|
||||
* {logstash-ref}/ls-security.html[Logstash]
|
||||
* {kibana-ref}/using-kibana-with-security.html[{kib}]
|
||||
* <<secure-monitoring, Monitoring>>
|
||||
* {kibana-ref}/secure-reporting.html[Reporting]
|
||||
|
||||
@ -36,10 +36,6 @@ include::tribe-clients-integrations/http.asciidoc[]
|
||||
|
||||
include::tribe-clients-integrations/hadoop.asciidoc[]
|
||||
|
||||
include::tribe-clients-integrations/logstash.asciidoc[]
|
||||
|
||||
include::tribe-clients-integrations/beats.asciidoc[]
|
||||
|
||||
include::tribe-clients-integrations/kibana.asciidoc[]
|
||||
|
||||
include::tribe-clients-integrations/monitoring.asciidoc[]
|
||||
|
@ -1,197 +0,0 @@
|
||||
|
||||
[[kibana]]
|
||||
=== Kibana and Security
|
||||
|
||||
[[using-kibana-with-security]]
|
||||
Kibana users have to log in when {security} is enabled on your cluster. You
|
||||
configure {security} roles for your Kibana users to control what data those users
|
||||
can access. You also need to configure credentials for the
|
||||
Kibana server so the requests it submits to Elasticsearch on the user's
|
||||
behalf can be authenticated.
|
||||
|
||||
To prevent user passwords from being sent in the clear, you must configure
|
||||
Kibana to encrypt communications between the browser and the Kibana server.
|
||||
If are encrypting traffic to and from the nodes in your Elasticsearch cluster,
|
||||
you must also configure Kibana to connect to Elasticsearch via HTTPS.
|
||||
|
||||
With {security} enabled, if you load a Kibana dashboard that accesses data in an
|
||||
index that you are not authorized to view, you get an error that indicates the
|
||||
index does not exist. {security} do not currently provide a way to control which
|
||||
users can load which dashboards.
|
||||
|
||||
IMPORTANT: Support for tribe nodes in Kibana was added in v5.2.
|
||||
|
||||
To use Kibana with {security}:
|
||||
|
||||
. Configure the password for the built-in `kibana` user. The Kibana server submits
|
||||
requests as this user to access the cluster monitoring APIs and the `.kibana` index.
|
||||
The server does _not_ need access to user indices.
|
||||
+
|
||||
By default, the `kibana` does not have a password. The user will not be enabled until
|
||||
a password is set. Set the password through the reset password API:
|
||||
+
|
||||
[source,shell]
|
||||
--------------------------------------------------------------------------------
|
||||
PUT /_xpack/security/user/kibana/_password
|
||||
{
|
||||
"password" : "s0m3th1ngs3cr3t"
|
||||
}
|
||||
--------------------------------------------------------------------------------
|
||||
// CONSOLE
|
||||
+
|
||||
Once you change the password, you need to specify it with the `elasticsearch.password`
|
||||
property in `kibana.yml`:
|
||||
+
|
||||
[source,yaml]
|
||||
--------------------------------------------------------------------------------
|
||||
elasticsearch.password: "s0m3th1ngs3cr3t"
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
[[kibana-roles]]
|
||||
. Assign the `kibana_user` role to grant Kibana users the privileges they
|
||||
need to use Kibana.
|
||||
+
|
||||
IMPORTANT: You also need to grant Kibana users access to the
|
||||
indices that they will be working with in Kibana.
|
||||
+
|
||||
** If you're using the `native` realm, you can assign roles using the
|
||||
<<managing-native-users, User Management API>>. For example, the following
|
||||
creates a user named `jacknich` and assigns it the `kibana_user` role:
|
||||
+
|
||||
[source,js]
|
||||
--------------------------------------------------------------------------------
|
||||
POST /_xpack/security/user/jacknich
|
||||
{
|
||||
"password" : "t0pS3cr3t",
|
||||
"roles" : [ "kibana_user" ]
|
||||
}
|
||||
--------------------------------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
** If you are using an LDAP or Active Directory realm, you can either assign
|
||||
roles on a per user basis, or assign roles to groups of users. By default, role
|
||||
mappings are stored in <<mapping-roles, `CONFIGDIR/x-pack/role_mapping.yml`>>.
|
||||
For example, the following snippet assigns the `kibana_user` role to the
|
||||
group named `admins` and the user named Jack Nicholson:
|
||||
+
|
||||
[source,yaml]
|
||||
--------------------------------------------------------------------------------
|
||||
kibana_user:
|
||||
- "cn=admins,dc=example,dc=com"
|
||||
- "cn=Jack Nicholson,dc=example,dc=com"
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
[[configure-kibana-cert]]
|
||||
. Configure Kibana to encrypt communications between the browser and the Kibana
|
||||
server:
|
||||
.. Generate a server certificate for Kibana. You must either set the certificate's
|
||||
`subjectAltName` to the hostname, fully-qualified domain name (FQDN), or IP
|
||||
address of the Kibana server, or set the CN to the Kibana server's hostname
|
||||
or FQDN. Using the server's IP address as the CN does not work.
|
||||
.. Set the `server.ssl.key` and `server.ssl.certificate` properties in `kibana.yml`:
|
||||
+
|
||||
[source,yaml]
|
||||
--------------------------------------------------------------------------------
|
||||
server.ssl.key: /path/to/your/server.key
|
||||
server.ssl.certificate: /path/to/your/server.crt
|
||||
--------------------------------------------------------------------------------
|
||||
+
|
||||
Once you enable SSL encryption between the browser and the Kibana server,
|
||||
access Kibana via HTTPS. For example, `https://localhost:5601`.
|
||||
+
|
||||
NOTE: You must enable SSL encryption between the browser and the Kibana
|
||||
server to use Kibana with {security} enabled. If {security} is configured to
|
||||
encrypt connections to Elasticsearch, you must also <<configure-kibana-ssl,
|
||||
configure Kibana to connect to Elasticsearch via HTTPS>>.
|
||||
|
||||
[[configure-kibana-ssl]]
|
||||
. If you have enabled SSL encryption in {security}, configure Kibana to connect
|
||||
to Elasticsearch via HTTPS:
|
||||
|
||||
.. Specify the HTTPS protocol in the `elasticsearch.url` setting in the Kibana
|
||||
configuration file, `kibana.yml`:
|
||||
+
|
||||
[source,yaml]
|
||||
--------------------------------------------------------------------------------
|
||||
elasticsearch.url: "https://<your_elasticsearch_host>.com:9200"
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
.. If you are using your own CA to sign certificates for Elasticsearch, set the
|
||||
`elasticsearch.ssl.certificateAuthorities` setting in `kibana.yml` to specify
|
||||
the location of the PEM file.
|
||||
+
|
||||
[source,yaml]
|
||||
--------------------------------------------------------------------------------
|
||||
elasticsearch.ssl.certificateAuthorities: /path/to/your/cacert.pem
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
. Install {xpack} into Kibana to secure user sessions and enable users
|
||||
to log in and out of Kibana:
|
||||
|
||||
.. Run the following command in your Kibana installation directory.
|
||||
+
|
||||
[source,console]
|
||||
--------------------------------------------------------------------------------
|
||||
bin/kibana-plugin install x-pack
|
||||
--------------------------------------------------------------------------------
|
||||
+
|
||||
[NOTE]
|
||||
=============================================================================
|
||||
To perform an offline install, download the {xpack} zip file from
|
||||
https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{version}.zip[
|
||||
+https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{version}.zip+]
|
||||
(https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{version}.zip.sha1[sha1])
|
||||
and run:
|
||||
|
||||
["source","sh",subs="attributes"]
|
||||
---------------------------------------------------------
|
||||
bin/kibana-plugin install file:///path/to/file/x-pack-{version}.zip
|
||||
---------------------------------------------------------
|
||||
=============================================================================
|
||||
|
||||
.. Set the `xpack.security.encryptionKey` property in the `kibana.yml` configuration file.
|
||||
You can use any text string that is 32 characters or longer as the encryption key.
|
||||
+
|
||||
[source,yaml]
|
||||
--------------------------------------------------------------------------------
|
||||
xpack.security.encryptionKey: "something_at_least_32_characters"
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
.. To change the default session duration, set the `xpack.security.sessionTimeout` property
|
||||
in the `kibana.yml` configuration file. By default, sessions will stay active until the
|
||||
browser is closed. The timeout is specified in milliseconds. For example, set the timeout
|
||||
to 600000 to expire sessions after 10 minutes:
|
||||
+
|
||||
[source,yaml]
|
||||
--------------------------------------------------------------------------------
|
||||
xpack.security.sessionTimeout: 600000
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
. Restart Kibana and verify that you can log in as a user. If you are running
|
||||
Kibana locally, go to `https://localhost:5601` and enter the credentials for a
|
||||
user you've assigned a Kibana user role. For example, you could log in as the
|
||||
`jacknich` user created above.
|
||||
+
|
||||
image::images/kibana-login.jpg["Kibana Login",link="images/kibana-login.jpg"]
|
||||
+
|
||||
NOTE: This must be a user who has been assigned the `kibana_user` role.
|
||||
Kibana server credentials should only be used internally by the
|
||||
Kibana server.
|
||||
|
||||
[float]
|
||||
[[security-ui-settings]]
|
||||
===== Kibana {security} UI Settings
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Default | Description
|
||||
| `xpack.security.encryptionKey` | - | An arbitrary string of 32 characters or more used to encrypt credentials in a
|
||||
cookie. It is crucial that this key is not exposed to
|
||||
users of Kibana. Required.
|
||||
| `xpack.security.sessionTimeout` | `1800000` (30 minutes) | Sets the session duration (in milliseconds).
|
||||
| `xpack.security.cookieName` | `"sid"` | Sets the name of the cookie used for the session.
|
||||
| `xpack.security.secureCookies` | `false` | Sets the `secure` flag of the session cookie. Is set
|
||||
to `true` if `server.ssl.certificate` and `server.ssl.key`
|
||||
are set. Set this to `true` if SSL is configured
|
||||
outside of Kibana (for example, you are routing
|
||||
requests through a load balancer or proxy).
|
||||
|======
|
@ -1,224 +0,0 @@
|
||||
[[logstash]]
|
||||
=== Logstash and Security
|
||||
|
||||
The Logstash Elasticsearch plugins (
|
||||
{logstash-ref}/plugins-outputs-elasticsearch.html[output],
|
||||
{logstash-ref}/plugins-inputs-elasticsearch.html[input],
|
||||
{logstash-ref}/plugins-filters-elasticsearch.html[filter]
|
||||
and {logstash-ref}/monitoring-logstash.html[monitoring]
|
||||
support authentication and encryption over HTTP.
|
||||
|
||||
To use Logstash with a secured cluster, you need to configure authentication
|
||||
credentials for Logstash. Logstash throws an exception and the processing
|
||||
pipeline is halted if authentication fails.
|
||||
|
||||
If encryption is enabled on the cluster, you also need to enable SSL in the
|
||||
Logstash configuration.
|
||||
|
||||
If you wish to monitor your logstash instance with x-pack monitoring, and store
|
||||
the monitoring data in a secured elasticsearch cluster, you must configure Logstash
|
||||
with a username and password for a user with the appropriate permissions.
|
||||
|
||||
In addition to configuring authentication credentials for Logstash, you need
|
||||
to grant authorized users permission to access the Logstash indices.
|
||||
|
||||
[float]
|
||||
[[ls-http-auth-basic]]
|
||||
==== Configuring Logstash to use Basic Authentication
|
||||
|
||||
Logstash needs to be able to manage index templates, create indices,
|
||||
and write and delete documents in the indices it creates.
|
||||
|
||||
To set up authentication credentials for Logstash:
|
||||
|
||||
. Create a `logstash_writer` role that has the `manage_index_templates` cluster
|
||||
privilege, and the `write`, `delete`, and `create_index` privileges for the
|
||||
Logstash indices. You can create roles from the **Management > Roles** UI in
|
||||
Kibana or through the `role` API:
|
||||
+
|
||||
[source, sh]
|
||||
---------------------------------------------------------------
|
||||
POST _xpack/security/role/logstash_writer
|
||||
{
|
||||
"cluster": ["manage_index_templates", "monitor"],
|
||||
"indices": [
|
||||
{
|
||||
"names": [ "logstash-*" ], <1>
|
||||
"privileges": ["write","delete","create_index"]
|
||||
}
|
||||
]
|
||||
}
|
||||
---------------------------------------------------------------
|
||||
|
||||
<1> If you use a custom Logstash index pattern, specify that pattern
|
||||
instead of the default `logstash-*` pattern.
|
||||
|
||||
. Create a `logstash_internal` user and assign it the `logstash_writer` role.
|
||||
You can create users from the **Management > Users** UI in Kibana or through
|
||||
the `user` API:
|
||||
+
|
||||
[source, sh]
|
||||
---------------------------------------------------------------
|
||||
POST _xpack/security/user/logstash_internal
|
||||
{
|
||||
"password" : "x-pack-test-password",
|
||||
"roles" : [ "logstash_writer"],
|
||||
"full_name" : "Internal Logstash User"
|
||||
}
|
||||
---------------------------------------------------------------
|
||||
|
||||
. Configure Logstash to authenticate as the `logstash_internal` user you just
|
||||
created. You configure credentials separately for each of the Elasticsearch
|
||||
plugins in your Logstash `.conf` file. For example:
|
||||
+
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
input {
|
||||
elasticsearch {
|
||||
...
|
||||
user => logstash_internal
|
||||
password => x-pack-test-password
|
||||
}
|
||||
}
|
||||
filter {
|
||||
elasticsearch {
|
||||
...
|
||||
user => logstash_internal
|
||||
password => x-pack-test-password
|
||||
}
|
||||
}
|
||||
output {
|
||||
elasticsearch {
|
||||
...
|
||||
user => logstash_internal
|
||||
password => x-pack-test-password
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[ls-user-access]]
|
||||
==== Granting Users Access to the Logstash Indices
|
||||
|
||||
To access the indices Logstash creates, users need the `read` and
|
||||
`view_index_metadata` privileges:
|
||||
|
||||
. Create a `logstash_reader` role that has the `read and `view_index_metadata`
|
||||
privileges for the Logstash indices. You can create roles from the
|
||||
**Management > Roles** UI in Kibana or through the `role` API:
|
||||
+
|
||||
[source, sh]
|
||||
---------------------------------------------------------------
|
||||
POST _xpack/security/role/logstash_reader
|
||||
{
|
||||
"indices": [
|
||||
{
|
||||
"names": [ "logstash-*" ], <1>
|
||||
"privileges": ["read","view_index_metadata"]
|
||||
}
|
||||
]
|
||||
}
|
||||
---------------------------------------------------------------
|
||||
|
||||
<1> If you use a custom Logstash index pattern, specify that pattern
|
||||
instead of the default `logstash-*` pattern.
|
||||
|
||||
. Assign your Logstash users the `logstash_reader` role. You can create
|
||||
and manage users from the **Management > Users** UI in Kibana or through
|
||||
the `user` API:
|
||||
+
|
||||
[source, sh]
|
||||
---------------------------------------------------------------
|
||||
POST _xpack/security/user/logstash_user
|
||||
{
|
||||
"password" : "x-pack-test-password",
|
||||
"roles" : [ "logstash_reader"],
|
||||
"full_name" : "Kibana User"
|
||||
}
|
||||
---------------------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[ls-http-auth-pki]]
|
||||
===== Configuring the elasticsearch Output to use PKI Authentication
|
||||
|
||||
The `elasticsearch` output supports PKI authentication. To use an X.509
|
||||
client-certificate for authentication, you configure the `keystore` and
|
||||
`keystore_password` options in your Logstash `.conf` file:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
output {
|
||||
elasticsearch {
|
||||
...
|
||||
keystore => /path/to/keystore.jks
|
||||
keystore_password => realpassword
|
||||
truststore => /path/to/truststore.jks <1>
|
||||
truststore_password => realpassword
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> If you use a separate truststore, the truststore path and password are
|
||||
also required.
|
||||
|
||||
[float]
|
||||
[[ls-http-ssl]]
|
||||
===== Configuring Logstash to use TLS Encryption
|
||||
|
||||
If TLS encryption is enabled on the Elasticsearch cluster, you need to
|
||||
configure the `ssl` and `cacert` options in your Logstash `.conf` file:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
output {
|
||||
elasticsearch {
|
||||
...
|
||||
ssl => true
|
||||
cacert => '/path/to/cert.pem' <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> The path to the local `.pem` file that contains the Certificate
|
||||
Authority's certificate.
|
||||
|
||||
[float]
|
||||
[[ls-monitoring-user]]
|
||||
===== Configuring Logstash Monitoring
|
||||
|
||||
If you wish to ship Logstash {logstash-ref}/monitoring-logstash.html[monitoring]
|
||||
data to a secure cluster, Logstash must be configured with a username and password.
|
||||
|
||||
X-Pack security comes preconfigured with a `logstash_system` user for this purpose.
|
||||
This user has the minimum permissions necessary for the monitoring function, and
|
||||
_should not_ be used for any other purpose - it is specifically _not intended_ for
|
||||
use within a Logstash pipeline.
|
||||
|
||||
By default, the `logstash_system` does not have a password. The user will not be enabled until
|
||||
a password is set. Set the password through the reset password API:
|
||||
|
||||
[source,js]
|
||||
---------------------------------------------------------------------
|
||||
PUT _xpack/security/user/logstash_system/_password
|
||||
{
|
||||
"password": "t0p.s3cr3t"
|
||||
}
|
||||
---------------------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
Then configure the user and password in your `logstash.yml` configuration file:
|
||||
|
||||
[source,yaml]
|
||||
----------------------------------------------------------
|
||||
xpack.monitoring.elasticsearch.username: logstash_system
|
||||
xpack.monitoring.elasticsearch.password: t0p.s3cr3t
|
||||
----------------------------------------------------------
|
||||
|
||||
If you initially installed an older version of X-Pack, and then upgraded, then
|
||||
the `logstash_system` user may have defaulted to disabled for security reasons.
|
||||
You can enable the user with the following API call:
|
||||
|
||||
[source,js]
|
||||
---------------------------------------------------------------------
|
||||
PUT _xpack/security/user/logstash_system/_enable
|
||||
---------------------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
@ -2,31 +2,32 @@
|
||||
=== Monitoring and Security
|
||||
|
||||
<<xpack-monitoring, {monitoring}>> consists of two components: an agent
|
||||
that you install on on each Elasticsearch and Logstash node, and a Monitoring UI
|
||||
in Kibana. The monitoring agent collects and indexes metrics from the nodes
|
||||
and you visualize the data through the Monitoring dashboards in Kibana. The agent
|
||||
can index data on the same Elasticsearch cluster, or send it to an external
|
||||
that you install on on each {es} and Logstash node, and a Monitoring UI
|
||||
in {kib}. The monitoring agent collects and indexes metrics from the nodes
|
||||
and you visualize the data through the Monitoring dashboards in {kib}. The agent
|
||||
can index data on the same {es} cluster, or send it to an external
|
||||
monitoring cluster.
|
||||
|
||||
To use {monitoring} with {security} enabled, you need to
|
||||
<<kibana, set up Kibana to work with {security}>> and create at least one user
|
||||
for the Monitoring UI. If you are using an external monitoring cluster, you also
|
||||
need to configure a user for the monitoring agent and configure the agent to use
|
||||
the appropriate credentials when communicating with the monitoring cluster.
|
||||
{kibana-ref}/using-kibana-with-security.html[set up {kib} to work with {security}]
|
||||
and create at least one user for the Monitoring UI. If you are using an external
|
||||
monitoring cluster, you also need to configure a user for the monitoring agent
|
||||
and configure the agent to use the appropriate credentials when communicating
|
||||
with the monitoring cluster.
|
||||
|
||||
[float]
|
||||
[[monitoring-ui-users]]
|
||||
==== Setting Up Monitoring UI Users
|
||||
|
||||
When {security} is enabled, Kibana users are prompted to log in when they access
|
||||
the UI. To use the Monitoring UI, a user must have access to the Kibana indices
|
||||
When {security} is enabled, {kib} users are prompted to log in when they access
|
||||
the UI. To use the Monitoring UI, a user must have access to the {kib} indices
|
||||
and permission to read from the monitoring indices.
|
||||
|
||||
You set up Monitoring UI users on the cluster where the monitoring data is being
|
||||
stored. To grant all of the necessary permissions, assign the user the
|
||||
`monitoring_user` and `kibana_user` roles:
|
||||
|
||||
* If you're using the `native` realm, you can assign roles through Kibana or
|
||||
* If you're using the `native` realm, you can assign roles through {kib} or
|
||||
with the <<managing-native-users, User Management API>>. For example, the following
|
||||
command creates a user named `jacknich` and assigns him the `kibana_user` and
|
||||
`monitoring_user` roles:
|
||||
@ -76,7 +77,7 @@ POST /_xpack/security/user/agent-user
|
||||
+
|
||||
|
||||
. On each node in the cluster being monitored, configure a Monitoring HTTP exporter
|
||||
in `elasticsearch.yml` and restart Elasticsearch. In the exporter configuration,
|
||||
in `elasticsearch.yml` and restart {es}. In the exporter configuration,
|
||||
you need to:
|
||||
+
|
||||
--
|
||||
@ -103,7 +104,7 @@ If SSL/TLS is enabled on the monitoring cluster:
|
||||
.. Include the CA certificate in each node's trusted certificates in order to verify
|
||||
the identities of the nodes in the monitoring cluster.
|
||||
|
||||
To add a CA certificate to an Elasticsearch node's trusted certificates, you
|
||||
To add a CA certificate to an {es} node's trusted certificates, you
|
||||
can specify the location of the PEM encoded certificate with the
|
||||
`certificate_authorities` setting:
|
||||
|
||||
@ -178,4 +179,4 @@ Alternatively, you can configure trusted certificates using a truststore
|
||||
xpack.monitoring.elasticsearch.ssl.truststore.path: /path/to/file
|
||||
xpack.monitoring.elasticsearch.ssl.truststore.password: x-pack-test-password
|
||||
--------------------------------------------------
|
||||
--
|
||||
--
|
||||
|
@ -1,6 +1,10 @@
|
||||
[role="xpack"]
|
||||
[[ml-settings]]
|
||||
=== Machine Learning Settings
|
||||
=== Machine Learning Settings in Elasticsearch
|
||||
++++
|
||||
<titleabbrev>Machine Learning Settings</titleabbrev>
|
||||
++++
|
||||
|
||||
You do not need to configure any settings to use {ml}. It is enabled by default.
|
||||
|
||||
[float]
|
||||
|
@ -1,6 +1,9 @@
|
||||
[role="xpack"]
|
||||
[[monitoring-settings]]
|
||||
=== Monitoring Settings
|
||||
=== Monitoring Settings in Elasticsearch
|
||||
++++
|
||||
<titleabbrev>Monitoring Settings</titleabbrev>
|
||||
++++
|
||||
|
||||
Monitoring is enabled by default when you install {xpack}. You can configure
|
||||
these monitoring settings in the `elasticsearch.yml` file.
|
||||
|
@ -1,6 +1,9 @@
|
||||
[role="xpack"]
|
||||
[[notification-settings]]
|
||||
=== {watcher} Settings
|
||||
=== {watcher} Settings in Elasticsearch
|
||||
++++
|
||||
<titleabbrev>{watcher} Settings</titleabbrev>
|
||||
++++
|
||||
|
||||
You configure `xpack.notification` settings in `elasticsearch.yml` to
|
||||
send set up {watcher} and send notifications via <<email-notification-settings, email>>,
|
||||
|
@ -1,6 +1,9 @@
|
||||
[role="xpack"]
|
||||
[[security-settings]]
|
||||
=== Security Settings
|
||||
=== Security Settings in Elasticsearch
|
||||
++++
|
||||
<titleabbrev>Security Settings</titleabbrev>
|
||||
++++
|
||||
|
||||
You configure `xpack.security` settings to
|
||||
<<anonymous-access-settings, enable anonymous access>>
|
||||
|
@ -190,7 +190,8 @@ public class MachineLearning implements ActionPlugin {
|
||||
ProcessCtrl.MAX_ANOMALY_RECORDS_SETTING,
|
||||
DataCountsReporter.ACCEPTABLE_PERCENTAGE_DATE_PARSE_ERRORS_SETTING,
|
||||
DataCountsReporter.ACCEPTABLE_PERCENTAGE_OUT_OF_ORDER_ERRORS_SETTING,
|
||||
AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE));
|
||||
AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE,
|
||||
AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE));
|
||||
}
|
||||
|
||||
public Settings additionalSettings() {
|
||||
@ -204,7 +205,7 @@ public class MachineLearning implements ActionPlugin {
|
||||
// TODO: the simple true/false flag will not be required once all supported versions have the number - consider removing in 7.0
|
||||
additionalSettings.put("node.attr." + ML_ENABLED_NODE_ATTR, "true");
|
||||
additionalSettings.put("node.attr." + MAX_OPEN_JOBS_NODE_ATTR,
|
||||
AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.get(settings));
|
||||
AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings));
|
||||
}
|
||||
return additionalSettings.build();
|
||||
}
|
||||
@ -434,7 +435,7 @@ public class MachineLearning implements ActionPlugin {
|
||||
if (false == enabled || tribeNode || tribeNodeClient || transportClientMode) {
|
||||
return emptyList();
|
||||
}
|
||||
int maxNumberOfJobs = AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.get(settings);
|
||||
int maxNumberOfJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings);
|
||||
// 4 threads per job: for cpp logging, result processing, state processing and
|
||||
// AutodetectProcessManager worker thread:
|
||||
FixedExecutorBuilder autoDetect = new FixedExecutorBuilder(settings, AUTODETECT_THREAD_POOL_NAME,
|
||||
|
@ -84,7 +84,7 @@ import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE;
|
||||
import static org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE;
|
||||
|
||||
public class OpenJobAction extends Action<OpenJobAction.Request, OpenJobAction.Response, OpenJobAction.RequestBuilder> {
|
||||
|
||||
@ -578,7 +578,7 @@ public class OpenJobAction extends Action<OpenJobAction.Request, OpenJobAction.R
|
||||
AutodetectProcessManager autodetectProcessManager) {
|
||||
super(settings, TASK_NAME, MachineLearning.UTILITY_THREAD_POOL_NAME);
|
||||
this.autodetectProcessManager = autodetectProcessManager;
|
||||
this.fallbackMaxNumberOfOpenJobs = AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.get(settings);
|
||||
this.fallbackMaxNumberOfOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings);
|
||||
this.maxConcurrentJobAllocations = MachineLearning.CONCURRENT_JOB_ALLOCATIONS.get(settings);
|
||||
clusterService.getClusterSettings()
|
||||
.addSettingsUpdateConsumer(MachineLearning.CONCURRENT_JOB_ALLOCATIONS, this::setMaxConcurrentJobAllocations);
|
||||
@ -739,7 +739,7 @@ public class OpenJobAction extends Action<OpenJobAction.Request, OpenJobAction.R
|
||||
long available = maxNumberOfOpenJobs - numberOfAssignedJobs;
|
||||
if (available == 0) {
|
||||
String reason = "Not opening job [" + jobId + "] on node [" + node + "], because this node is full. " +
|
||||
"Number of opened jobs [" + numberOfAssignedJobs + "], " + MAX_RUNNING_JOBS_PER_NODE.getKey() +
|
||||
"Number of opened jobs [" + numberOfAssignedJobs + "], " + MAX_OPEN_JOBS_PER_NODE.getKey() +
|
||||
" [" + maxNumberOfOpenJobs + "]";
|
||||
logger.trace(reason);
|
||||
reasons.add(reason);
|
||||
|
@ -145,7 +145,10 @@ public class DatafeedManager extends AbstractComponent {
|
||||
}
|
||||
|
||||
public void isolateDatafeed(long allocationId) {
|
||||
runningDatafeedsOnThisNode.get(allocationId).isolateDatafeed();
|
||||
Holder holder = runningDatafeedsOnThisNode.get(allocationId);
|
||||
if (holder != null) {
|
||||
holder.isolateDatafeed();
|
||||
}
|
||||
}
|
||||
|
||||
// Important: Holder must be created and assigned to DatafeedTask before setting state to started,
|
||||
|
@ -74,19 +74,24 @@ import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.common.settings.Setting.Property;
|
||||
|
||||
public class AutodetectProcessManager extends AbstractComponent {
|
||||
|
||||
// TODO: Ideally this setting shouldn't need to exist
|
||||
// We should be able from the job config to estimate the memory/cpu a job needs to have,
|
||||
// and if we know that then we can prior to assigning a job to a node fail based on the
|
||||
// available resources on that node: https://github.com/elastic/x-pack-elasticsearch/issues/546
|
||||
// Note: on small instances on cloud, this setting will be set to: 1
|
||||
// However, it is useful to also be able to apply a hard limit.
|
||||
|
||||
// WARNING: This setting cannot be made DYNAMIC, because it is tied to several threadpools
|
||||
// WARNING: These settings cannot be made DYNAMIC, because they are tied to several threadpools
|
||||
// and a threadpool's size can't be changed at runtime.
|
||||
// See MachineLearning#getExecutorBuilders(...)
|
||||
// TODO: Remove the deprecated setting in 7.0 and move the default value to the replacement setting
|
||||
@Deprecated
|
||||
public static final Setting<Integer> MAX_RUNNING_JOBS_PER_NODE =
|
||||
Setting.intSetting("max_running_jobs", 10, 1, 512, Setting.Property.NodeScope);
|
||||
Setting.intSetting("max_running_jobs", 10, 1, 512, Property.NodeScope, Property.Deprecated);
|
||||
public static final Setting<Integer> MAX_OPEN_JOBS_PER_NODE =
|
||||
Setting.intSetting("xpack.ml.max_open_jobs", MAX_RUNNING_JOBS_PER_NODE, 1, Property.NodeScope);
|
||||
|
||||
private final Client client;
|
||||
private final ThreadPool threadPool;
|
||||
@ -116,7 +121,7 @@ public class AutodetectProcessManager extends AbstractComponent {
|
||||
this.client = client;
|
||||
this.threadPool = threadPool;
|
||||
this.xContentRegistry = xContentRegistry;
|
||||
this.maxAllowedRunningJobs = MAX_RUNNING_JOBS_PER_NODE.get(settings);
|
||||
this.maxAllowedRunningJobs = MAX_OPEN_JOBS_PER_NODE.get(settings);
|
||||
this.autodetectProcessFactory = autodetectProcessFactory;
|
||||
this.normalizerFactory = normalizerFactory;
|
||||
this.jobManager = jobManager;
|
||||
|
@ -151,7 +151,7 @@ public class OpenJobActionTests extends ESTestCase {
|
||||
Assignment result = OpenJobAction.selectLeastLoadedMlNode("job_id2", cs.build(), 2, maxRunningJobsPerNode, logger);
|
||||
assertNull(result.getExecutorNode());
|
||||
assertTrue(result.getExplanation().contains("because this node is full. Number of opened jobs [" + maxRunningJobsPerNode
|
||||
+ "], max_running_jobs [" + maxRunningJobsPerNode + "]"));
|
||||
+ "], xpack.ml.max_open_jobs [" + maxRunningJobsPerNode + "]"));
|
||||
}
|
||||
|
||||
public void testSelectLeastLoadedMlNode_noMlNodes() {
|
||||
|
@ -5,12 +5,13 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.integration;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads;
|
||||
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
|
||||
import org.elasticsearch.xpack.ml.action.DeleteDatafeedAction;
|
||||
import org.elasticsearch.xpack.ml.action.GetDatafeedsStatsAction;
|
||||
import org.elasticsearch.xpack.ml.action.PutJobAction;
|
||||
import org.elasticsearch.xpack.ml.action.StopDatafeedAction;
|
||||
@ -25,6 +26,7 @@ import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeed;
|
||||
import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createScheduledJob;
|
||||
@ -52,7 +54,7 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
|
||||
client().admin().indices().prepareCreate("data-2")
|
||||
.addMapping("type", "time", "type=date")
|
||||
.get();
|
||||
ClusterHealthResponse r = client().admin().cluster().prepareHealth("data-1", "data-2").setWaitForYellowStatus().get();
|
||||
client().admin().cluster().prepareHealth("data-1", "data-2").setWaitForYellowStatus().get();
|
||||
long numDocs2 = randomIntBetween(32, 2048);
|
||||
indexDocs(logger, "data-2", numDocs2, oneWeekAgo, now);
|
||||
|
||||
@ -62,9 +64,7 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
|
||||
assertTrue(putJobResponse.isAcknowledged());
|
||||
assertThat(putJobResponse.getResponse().getJobVersion(), equalTo(Version.CURRENT));
|
||||
openJob(job.getId());
|
||||
assertBusy(() -> {
|
||||
assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED);
|
||||
});
|
||||
assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED));
|
||||
|
||||
List<String> t = new ArrayList<>(2);
|
||||
t.add("data-1");
|
||||
@ -155,6 +155,49 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testRealtime_givenSimultaneousStopAndForceDelete() throws Throwable {
|
||||
String jobId = "realtime-job-stop-and-force-delete";
|
||||
final String datafeedId = jobId + "-datafeed";
|
||||
startRealtime(jobId);
|
||||
|
||||
AtomicReference<Throwable> exception = new AtomicReference<>();
|
||||
|
||||
// The UI now force deletes datafeeds, which means they can be deleted while running.
|
||||
// The first step is to isolate the datafeed. But if it was already being stopped then
|
||||
// the datafeed may not be running by the time the isolate action is executed. This
|
||||
// test will sometimes (depending on thread scheduling) achieve this situation and ensure
|
||||
// the code is robust to it.
|
||||
Thread deleteDatafeedThread = new Thread(() -> {
|
||||
try {
|
||||
DeleteDatafeedAction.Request request = new DeleteDatafeedAction.Request(datafeedId);
|
||||
request.setForce(true);
|
||||
DeleteDatafeedAction.Response response = client().execute(DeleteDatafeedAction.INSTANCE, request).actionGet();
|
||||
if (response.isAcknowledged()) {
|
||||
GetDatafeedsStatsAction.Request statsRequest = new GetDatafeedsStatsAction.Request(datafeedId);
|
||||
expectThrows(ResourceNotFoundException.class,
|
||||
() -> client().execute(GetDatafeedsStatsAction.INSTANCE, statsRequest).actionGet());
|
||||
} else {
|
||||
exception.set(new AssertionError("Job is not deleted"));
|
||||
}
|
||||
} catch (AssertionError | Exception e) {
|
||||
exception.set(e);
|
||||
}
|
||||
});
|
||||
deleteDatafeedThread.start();
|
||||
|
||||
try {
|
||||
stopDatafeed(datafeedId);
|
||||
} catch (ResourceNotFoundException e) {
|
||||
// This is OK - it means the thread running the delete fully completed before the stop started to execute
|
||||
} finally {
|
||||
deleteDatafeedThread.join();
|
||||
}
|
||||
|
||||
if (exception.get() != null) {
|
||||
throw exception.get();
|
||||
}
|
||||
}
|
||||
|
||||
private void startRealtime(String jobId) throws Exception {
|
||||
client().admin().indices().prepareCreate("data")
|
||||
.addMapping("type", "time", "type=date")
|
||||
@ -168,9 +211,7 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
|
||||
registerJob(job);
|
||||
assertTrue(putJob(job).isAcknowledged());
|
||||
openJob(job.getId());
|
||||
assertBusy(() -> {
|
||||
assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED);
|
||||
});
|
||||
assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED));
|
||||
|
||||
DatafeedConfig datafeedConfig = createDatafeed(job.getId() + "-datafeed", job.getId(), Collections.singletonList("data"));
|
||||
registerDatafeed(datafeedConfig);
|
||||
|
@ -88,7 +88,7 @@ public class TooManyJobsIT extends BaseMlIntegTestCase {
|
||||
} catch (ElasticsearchStatusException e) {
|
||||
assertTrue(e.getMessage(), e.getMessage().startsWith("Could not open job because no suitable nodes were found, allocation explanation"));
|
||||
assertTrue(e.getMessage(), e.getMessage().endsWith("because this node is full. Number of opened jobs [" + maxNumberOfJobsPerNode +
|
||||
"], max_running_jobs [" + maxNumberOfJobsPerNode + "]]"));
|
||||
"], xpack.ml.max_open_jobs [" + maxNumberOfJobsPerNode + "]]"));
|
||||
logger.info("good news everybody --> reached maximum number of allowed opened jobs, after trying to open the {}th job", i);
|
||||
|
||||
// close the first job and check if the latest job gets opened:
|
||||
@ -111,12 +111,12 @@ public class TooManyJobsIT extends BaseMlIntegTestCase {
|
||||
}
|
||||
|
||||
private void startMlCluster(int numNodes, int maxNumberOfJobsPerNode) throws Exception {
|
||||
// clear all nodes, so that we can set max_running_jobs setting:
|
||||
// clear all nodes, so that we can set xpack.ml.max_open_jobs setting:
|
||||
internalCluster().ensureAtMostNumDataNodes(0);
|
||||
logger.info("[{}] is [{}]", AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.getKey(), maxNumberOfJobsPerNode);
|
||||
logger.info("[{}] is [{}]", AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.getKey(), maxNumberOfJobsPerNode);
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
internalCluster().startNode(Settings.builder()
|
||||
.put(AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.getKey(), maxNumberOfJobsPerNode));
|
||||
.put(AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.getKey(), maxNumberOfJobsPerNode));
|
||||
}
|
||||
logger.info("Started [{}] nodes", numNodes);
|
||||
ensureStableCluster(numNodes);
|
||||
|
@ -122,6 +122,37 @@ public class AutodetectProcessManagerTests extends ESTestCase {
|
||||
}).when(jobProvider).getAutodetectParams(any(), any(), any());
|
||||
}
|
||||
|
||||
public void testMaxOpenJobsSetting_givenDefault() {
|
||||
int maxOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(Settings.EMPTY);
|
||||
assertEquals(10, maxOpenJobs);
|
||||
}
|
||||
|
||||
public void testMaxOpenJobsSetting_givenNewSettingOnly() {
|
||||
Settings.Builder settings = Settings.builder();
|
||||
settings.put(AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.getKey(), 7);
|
||||
int maxOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings.build());
|
||||
assertEquals(7, maxOpenJobs);
|
||||
}
|
||||
|
||||
public void testMaxOpenJobsSetting_givenOldSettingOnly() {
|
||||
Settings.Builder settings = Settings.builder();
|
||||
settings.put(AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.getKey(), 9);
|
||||
int maxOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings.build());
|
||||
assertEquals(9, maxOpenJobs);
|
||||
assertWarnings("[max_running_jobs] setting was deprecated in Elasticsearch and will be removed in a future release! "
|
||||
+ "See the breaking changes documentation for the next major version.");
|
||||
}
|
||||
|
||||
public void testMaxOpenJobsSetting_givenOldAndNewSettings() {
|
||||
Settings.Builder settings = Settings.builder();
|
||||
settings.put(AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.getKey(), 7);
|
||||
settings.put(AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.getKey(), 9);
|
||||
int maxOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(settings.build());
|
||||
assertEquals(7, maxOpenJobs);
|
||||
assertWarnings("[max_running_jobs] setting was deprecated in Elasticsearch and will be removed in a future release! "
|
||||
+ "See the breaking changes documentation for the next major version.");
|
||||
}
|
||||
|
||||
public void testOpenJob_withoutVersion() {
|
||||
Client client = mock(Client.class);
|
||||
AutodetectCommunicator communicator = mock(AutodetectCommunicator.class);
|
||||
@ -137,7 +168,7 @@ public class AutodetectProcessManagerTests extends ESTestCase {
|
||||
when(jobTask.getJobId()).thenReturn(job.getId());
|
||||
|
||||
AtomicReference<Exception> errorHolder = new AtomicReference<>();
|
||||
manager.openJob(jobTask, e -> errorHolder.set(e));
|
||||
manager.openJob(jobTask, errorHolder::set);
|
||||
|
||||
Exception error = errorHolder.get();
|
||||
assertThat(error, is(notNullValue()));
|
||||
@ -180,7 +211,7 @@ public class AutodetectProcessManagerTests extends ESTestCase {
|
||||
AutodetectProcessFactory autodetectProcessFactory =
|
||||
(j, modelSnapshot, quantiles, filters, e, onProcessCrash) -> autodetectProcess;
|
||||
Settings.Builder settings = Settings.builder();
|
||||
settings.put(AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE.getKey(), 3);
|
||||
settings.put(AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.getKey(), 3);
|
||||
AutodetectProcessManager manager = spy(new AutodetectProcessManager(settings.build(), client, threadPool, jobManager, jobProvider,
|
||||
jobResultsPersister, jobDataCountsPersister, autodetectProcessFactory,
|
||||
normalizerFactory, new NamedXContentRegistry(Collections.emptyList()), auditor));
|
||||
@ -245,6 +276,7 @@ public class AutodetectProcessManagerTests extends ESTestCase {
|
||||
InputStream inputStream = createInputStream("");
|
||||
XContentType xContentType = randomFrom(XContentType.values());
|
||||
doAnswer(invocationOnMock -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
BiConsumer<DataCounts, Exception> handler = (BiConsumer<DataCounts, Exception>) invocationOnMock.getArguments()[3];
|
||||
handler.accept(null, new IOException("blah"));
|
||||
return null;
|
||||
@ -354,6 +386,7 @@ public class AutodetectProcessManagerTests extends ESTestCase {
|
||||
|
||||
FlushJobParams params = FlushJobParams.builder().build();
|
||||
doAnswer(invocationOnMock -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
BiConsumer<Void, Exception> handler = (BiConsumer<Void, Exception>) invocationOnMock.getArguments()[1];
|
||||
handler.accept(null, new IOException("blah"));
|
||||
return null;
|
||||
@ -455,9 +488,7 @@ public class AutodetectProcessManagerTests extends ESTestCase {
|
||||
InputStream inputStream = createInputStream("");
|
||||
DataCounts[] dataCounts = new DataCounts[1];
|
||||
manager.processData(jobTask, inputStream,
|
||||
randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts1, e) -> {
|
||||
dataCounts[0] = dataCounts1;
|
||||
});
|
||||
randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts1, e) -> dataCounts[0] = dataCounts1);
|
||||
|
||||
assertThat(dataCounts[0], equalTo(new DataCounts("foo")));
|
||||
}
|
||||
|
@ -5,45 +5,31 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.watcher.actions;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilders;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.xpack.watcher.condition.CompareCondition;
|
||||
import org.elasticsearch.xpack.watcher.execution.ExecutionState;
|
||||
import org.elasticsearch.xpack.watcher.history.HistoryStore;
|
||||
import org.elasticsearch.xpack.watcher.history.WatchRecord;
|
||||
import org.elasticsearch.xpack.watcher.support.xcontent.ObjectPath;
|
||||
import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;
|
||||
import org.elasticsearch.xpack.watcher.transport.actions.put.PutWatchResponse;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction;
|
||||
import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction;
|
||||
import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder;
|
||||
import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput;
|
||||
import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest;
|
||||
import static org.elasticsearch.xpack.watcher.transform.TransformBuilders.searchTransform;
|
||||
import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput;
|
||||
import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule;
|
||||
import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@TestLogging("org.elasticsearch.xpack.watcher:DEBUG," +
|
||||
"org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail:WARN," +
|
||||
"org.elasticsearch.xpack.watcher.WatcherLifeCycleService:DEBUG," +
|
||||
"org.elasticsearch.xpack.watcher.trigger.ScheduleTriggerMock:TRACE," +
|
||||
"org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE")
|
||||
@ -54,62 +40,30 @@ public class TimeThrottleIntegrationTests extends AbstractWatcherIntegrationTest
|
||||
return true;
|
||||
}
|
||||
|
||||
@Before
|
||||
public void indexTestDocument() {
|
||||
IndexResponse eventIndexResponse = client().prepareIndex("events", "event")
|
||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
||||
.setSource("level", "error")
|
||||
.get();
|
||||
assertEquals(DocWriteResponse.Result.CREATED, eventIndexResponse.getResult());
|
||||
}
|
||||
|
||||
public void testTimeThrottle() throws Exception {
|
||||
String id = randomAlphaOfLength(20);
|
||||
PutWatchResponse putWatchResponse = watcherClient().preparePutWatch()
|
||||
.setId(id)
|
||||
.setSource(watchBuilder()
|
||||
.trigger(schedule(interval("5s")))
|
||||
.input(searchInput(templateRequest(new SearchSourceBuilder(), "events")))
|
||||
.condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.GT, 0L))
|
||||
.transform(searchTransform(templateRequest(new SearchSourceBuilder(), "events")))
|
||||
.addAction("_id", indexAction("actions", "action"))
|
||||
.input(simpleInput())
|
||||
.addAction("my-logging-action", loggingAction("foo"))
|
||||
.defaultThrottlePeriod(TimeValue.timeValueSeconds(30)))
|
||||
.get();
|
||||
assertThat(putWatchResponse.isCreated(), is(true));
|
||||
|
||||
timeWarp().clock().setTime(DateTime.now(DateTimeZone.UTC));
|
||||
|
||||
timeWarp().trigger(id);
|
||||
refresh();
|
||||
|
||||
// the first fire should work
|
||||
assertHitCount(client().prepareSearch("actions").setTypes("action").get(), 1);
|
||||
assertHistoryEntryExecuted(id);
|
||||
|
||||
timeWarp().clock().fastForward(TimeValue.timeValueMillis(4000));
|
||||
timeWarp().trigger(id);
|
||||
refresh();
|
||||
|
||||
// the last fire should have been throttled, so number of actions shouldn't change
|
||||
assertHitCount(client().prepareSearch("actions").setTypes("action").get(), 1);
|
||||
assertHistoryEntryThrottled(id);
|
||||
|
||||
timeWarp().clock().fastForwardSeconds(30);
|
||||
timeWarp().trigger(id);
|
||||
refresh();
|
||||
assertHistoryEntryExecuted(id);
|
||||
|
||||
// the last fire occurred passed the throttle period, so a new action should have been added
|
||||
assertHitCount(client().prepareSearch("actions").setTypes("action").get(), 2);
|
||||
|
||||
SearchResponse response = client().prepareSearch(HistoryStore.INDEX_PREFIX_WITH_TEMPLATE + "*")
|
||||
.setSource(new SearchSourceBuilder().query(QueryBuilders.boolQuery()
|
||||
.must(matchQuery(WatchRecord.STATE.getPreferredName(), ExecutionState.THROTTLED.id()))
|
||||
.must(termQuery("watch_id", id))))
|
||||
.get();
|
||||
List<Map<String, Object>> hits = Arrays.stream(response.getHits().getHits())
|
||||
.map(SearchHit::getSourceAsMap)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
String message = String.format(Locale.ROOT, "Expected single throttled hits, but was %s", hits);
|
||||
assertThat(message, response.getHits().getTotalHits(), is(1L));
|
||||
assertTotalHistoryEntries(id, 3);
|
||||
}
|
||||
|
||||
public void testTimeThrottleDefaults() throws Exception {
|
||||
@ -118,43 +72,59 @@ public class TimeThrottleIntegrationTests extends AbstractWatcherIntegrationTest
|
||||
.setId(id)
|
||||
.setSource(watchBuilder()
|
||||
.trigger(schedule(interval("1s")))
|
||||
.input(searchInput(templateRequest(new SearchSourceBuilder(), "events")))
|
||||
.condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.GT, 0L))
|
||||
.transform(searchTransform(templateRequest(new SearchSourceBuilder(), "events")))
|
||||
.addAction("_id", indexAction("actions", "action")))
|
||||
.input(simpleInput())
|
||||
.addAction("my-logging-action", indexAction("actions", "action")))
|
||||
.get();
|
||||
assertThat(putWatchResponse.isCreated(), is(true));
|
||||
|
||||
timeWarp().clock().setTime(DateTime.now(DateTimeZone.UTC));
|
||||
|
||||
timeWarp().trigger(id);
|
||||
refresh();
|
||||
|
||||
// the first trigger should work
|
||||
SearchResponse response = client().prepareSearch("actions").setTypes("action").get();
|
||||
assertHitCount(response, 1);
|
||||
assertHistoryEntryExecuted(id);
|
||||
|
||||
timeWarp().clock().fastForwardSeconds(2);
|
||||
timeWarp().trigger(id);
|
||||
refresh("actions");
|
||||
|
||||
// the last fire should have been throttled, so number of actions shouldn't change
|
||||
response = client().prepareSearch("actions").setTypes("action").get();
|
||||
assertHitCount(response, 1);
|
||||
assertHistoryEntryThrottled(id);
|
||||
|
||||
timeWarp().clock().fastForwardSeconds(10);
|
||||
timeWarp().trigger(id);
|
||||
refresh();
|
||||
assertHistoryEntryExecuted(id);
|
||||
|
||||
// the last fire occurred passed the throttle period, so a new action should have been added
|
||||
response = client().prepareSearch("actions").setTypes("action").get();
|
||||
assertHitCount(response, 2);
|
||||
assertTotalHistoryEntries(id, 3);
|
||||
}
|
||||
|
||||
private void assertHistoryEntryExecuted(String id) {
|
||||
Map<String, Object> map = assertLatestHistoryEntry(id);
|
||||
String actionStatus = ObjectPath.eval("result.actions.0.status", map);
|
||||
assertThat(actionStatus, is("success"));
|
||||
}
|
||||
|
||||
private void assertHistoryEntryThrottled(String id) {
|
||||
Map<String, Object> map = assertLatestHistoryEntry(id);
|
||||
String actionStatus = ObjectPath.eval("result.actions.0.status", map);
|
||||
assertThat(actionStatus, is("throttled"));
|
||||
}
|
||||
|
||||
private Map<String, Object> assertLatestHistoryEntry(String id) {
|
||||
refresh(HistoryStore.INDEX_PREFIX_WITH_TEMPLATE + "*");
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch(HistoryStore.INDEX_PREFIX_WITH_TEMPLATE + "*")
|
||||
.setSize(1)
|
||||
.setSource(new SearchSourceBuilder().query(QueryBuilders.boolQuery()
|
||||
.must(matchQuery(WatchRecord.STATE.getPreferredName(), ExecutionState.THROTTLED.id()))
|
||||
.must(termQuery("watch_id", id))))
|
||||
.addSort(SortBuilders.fieldSort("result.execution_time").order(SortOrder.DESC))
|
||||
.get();
|
||||
assertHitCount(searchResponse, 1);
|
||||
|
||||
Map<String, Object> map = searchResponse.getHits().getHits()[0].getSourceAsMap();
|
||||
String actionId = ObjectPath.eval("result.actions.0.id", map);
|
||||
assertThat(actionId, is("my-logging-action"));
|
||||
return map;
|
||||
}
|
||||
|
||||
private void assertTotalHistoryEntries(String id, long expectedCount) {
|
||||
SearchResponse searchResponse = client().prepareSearch(HistoryStore.INDEX_PREFIX_WITH_TEMPLATE + "*")
|
||||
.setSize(0)
|
||||
.setSource(new SearchSourceBuilder().query(QueryBuilders.boolQuery().must(termQuery("watch_id", id))))
|
||||
.get();
|
||||
|
||||
assertHitCount(searchResponse, expectedCount);
|
||||
}
|
||||
}
|
||||
|
@ -47,8 +47,7 @@ import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
/**
|
||||
* This test makes sure that the http host and path fields in the watch_record action result are
|
||||
* not analyzed so they can be used in aggregations
|
||||
* This test makes sure that the mapping for the watch_record are correct
|
||||
*/
|
||||
public class HistoryTemplateHttpMappingsTests extends AbstractWatcherIntegrationTestCase {
|
||||
|
||||
@ -126,9 +125,12 @@ public class HistoryTemplateHttpMappingsTests extends AbstractWatcherIntegration
|
||||
assertThat(webServer.requests().get(1).getUri().getPath(), is("/webhook/path"));
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/2222")
|
||||
// This test is unstable, but cannot be reproduced locally (thus all the logging messages),
|
||||
// feel free to add back the awaits fix below when found in CI
|
||||
// Also, please provide the failed build it to allow checking logs
|
||||
// @AwaitsFix(bugUrl = "https://github.com/elastic/x-pack-elasticsearch/issues/2222")
|
||||
public void testExceptionMapping() {
|
||||
// delete all history indices to ensure that we start with a fresh mapping
|
||||
// delete all history indices to ensure that we only need to check a single index
|
||||
assertAcked(client().admin().indices().prepareDelete(HistoryStore.INDEX_PREFIX + "*"));
|
||||
|
||||
String id = randomAlphaOfLength(10);
|
||||
@ -136,13 +138,15 @@ public class HistoryTemplateHttpMappingsTests extends AbstractWatcherIntegration
|
||||
boolean abortAtInput = randomBoolean();
|
||||
if (abortAtInput) {
|
||||
webServer.enqueue(new MockResponse().setBeforeReplyDelay(TimeValue.timeValueSeconds(5)));
|
||||
logger.info("Will delay at first HTTP request at the input");
|
||||
} else {
|
||||
webServer.enqueue(new MockResponse().setBody("{}"));
|
||||
webServer.enqueue(new MockResponse().setBeforeReplyDelay(TimeValue.timeValueSeconds(5)));
|
||||
logger.info("Will delay at second HTTP request at the action");
|
||||
}
|
||||
|
||||
PutWatchResponse putWatchResponse = watcherClient().preparePutWatch(id).setSource(watchBuilder()
|
||||
.trigger(schedule(interval("5s")))
|
||||
.trigger(schedule(interval("1h")))
|
||||
.input(httpInput(HttpRequestTemplate.builder("localhost", webServer.getPort())
|
||||
.path("/")
|
||||
.readTimeout(TimeValue.timeValueMillis(10))))
|
||||
@ -163,6 +167,7 @@ public class HistoryTemplateHttpMappingsTests extends AbstractWatcherIntegration
|
||||
.setQuery(QueryBuilders.termQuery("watch_id", id))
|
||||
.get();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
logger.info("Watch history record [{}]", searchResponse.getHits().getHits()[0].getSourceAsMap());
|
||||
|
||||
// ensure that enabled is set to false
|
||||
List<Boolean> indexed = new ArrayList<>();
|
||||
@ -179,6 +184,7 @@ public class HistoryTemplateHttpMappingsTests extends AbstractWatcherIntegration
|
||||
Boolean enabled = ObjectPath.eval("properties.result.properties.actions.properties.error.enabled", docMapping);
|
||||
indexed.add(enabled);
|
||||
}
|
||||
logger.info("Full mapping [{}]", docMapping);
|
||||
}
|
||||
|
||||
assertThat(indexed, hasSize(greaterThanOrEqualTo(1)));
|
||||
|
@ -121,6 +121,11 @@ public class WatchBackwardsCompatibilityIT extends ESRestTestCase {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean preserveTemplatesUponCompletion() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings restClientSettings() {
|
||||
String token = "Basic " + Base64.getEncoder()
|
||||
|
@ -8,6 +8,7 @@ package org.elasticsearch.smoketest;
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.junit.annotations.Network;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
@ -24,6 +25,7 @@ import static org.hamcrest.Matchers.is;
|
||||
|
||||
/** Runs rest tests against external cluster */
|
||||
@Network // Needed to access to an external Jira server
|
||||
@TestLogging("_root:TRACE")
|
||||
public class SmokeTestWatcherClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public SmokeTestWatcherClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user