From ee5ab5b1d2a82b68fd2fa1a725bd1250fceee96f Mon Sep 17 00:00:00 2001 From: debadair Date: Thu, 6 Apr 2017 18:29:29 -0700 Subject: [PATCH] [DOCS] Migrated security topics from x-pack repo to x-pack-elasticsearch. Original commit: elastic/x-pack-elasticsearch@e54aa1fd0a2f0e60013d7e427329e4408fa578fe --- docs/en/security/auditing.asciidoc | 432 +++++++++++++++++ docs/en/security/authentication.asciidoc | 265 +++++++++++ .../active-directory-realm.asciidoc | 336 +++++++++++++ .../authentication/anonymous-access.asciidoc | 30 ++ .../authentication/custom-realm.asciidoc | 107 +++++ .../authentication/file-realm.asciidoc | 292 ++++++++++++ .../authentication/ldap-realm.asciidoc | 406 ++++++++++++++++ .../authentication/migrate-tool.asciidoc | 73 +++ .../authentication/native-realm.asciidoc | 196 ++++++++ .../authentication/pki-realm.asciidoc | 132 ++++++ .../authentication/user-cache.asciidoc | 60 +++ docs/en/security/authorization.asciidoc | 394 ++++++++++++++++ .../authorization/alias-privileges.asciidoc | 101 ++++ ...field-and-document-access-control.asciidoc | 443 ++++++++++++++++++ .../authorization/mapping-roles.asciidoc | 67 +++ .../authorization/run-as-privilege.asciidoc | 31 ++ docs/en/security/getting-started.asciidoc | 118 +++++ docs/en/security/how-security-works.asciidoc | 145 ++++++ docs/en/security/index.asciidoc | 118 +++++ docs/en/security/limitations.asciidoc | 87 ++++ docs/en/security/reference.asciidoc | 12 + docs/en/security/reference/files.asciidoc | 41 ++ .../en/security/reference/privileges.asciidoc | 93 ++++ docs/en/security/release-notes.asciidoc | 329 +++++++++++++ .../security/securing-communications.asciidoc | 23 + .../enabling-cipher-suites.asciidoc | 24 + .../separating-node-client-traffic.asciidoc | 78 +++ .../setting-up-ssl.asciidoc | 258 ++++++++++ .../tribe-clients-integrations.asciidoc | 42 ++ .../tribe-clients-integrations/beats.asciidoc | 187 ++++++++ .../hadoop.asciidoc | 23 + .../tribe-clients-integrations/http.asciidoc | 62 +++ .../tribe-clients-integrations/java.asciidoc | 259 ++++++++++ .../kibana.asciidoc | 197 ++++++++ .../logstash.asciidoc | 219 +++++++++ .../monitoring.asciidoc | 181 +++++++ .../reporting.asciidoc | 44 ++ .../tribe-clients-integrations/tribe.asciidoc | 109 +++++ docs/en/security/troubleshooting.asciidoc | 229 +++++++++ docs/en/security/using-ip-filtering.asciidoc | 143 ++++++ 40 files changed, 6386 insertions(+) create mode 100644 docs/en/security/auditing.asciidoc create mode 100644 docs/en/security/authentication.asciidoc create mode 100644 docs/en/security/authentication/active-directory-realm.asciidoc create mode 100644 docs/en/security/authentication/anonymous-access.asciidoc create mode 100644 docs/en/security/authentication/custom-realm.asciidoc create mode 100644 docs/en/security/authentication/file-realm.asciidoc create mode 100644 docs/en/security/authentication/ldap-realm.asciidoc create mode 100644 docs/en/security/authentication/migrate-tool.asciidoc create mode 100644 docs/en/security/authentication/native-realm.asciidoc create mode 100644 docs/en/security/authentication/pki-realm.asciidoc create mode 100644 docs/en/security/authentication/user-cache.asciidoc create mode 100644 docs/en/security/authorization.asciidoc create mode 100644 docs/en/security/authorization/alias-privileges.asciidoc create mode 100644 docs/en/security/authorization/field-and-document-access-control.asciidoc create mode 100644 docs/en/security/authorization/mapping-roles.asciidoc create mode 100644 docs/en/security/authorization/run-as-privilege.asciidoc create mode 100644 docs/en/security/getting-started.asciidoc create mode 100644 docs/en/security/how-security-works.asciidoc create mode 100644 docs/en/security/index.asciidoc create mode 100644 docs/en/security/limitations.asciidoc create mode 100644 docs/en/security/reference.asciidoc create mode 100644 docs/en/security/reference/files.asciidoc create mode 100644 docs/en/security/reference/privileges.asciidoc create mode 100644 docs/en/security/release-notes.asciidoc create mode 100644 docs/en/security/securing-communications.asciidoc create mode 100644 docs/en/security/securing-communications/enabling-cipher-suites.asciidoc create mode 100644 docs/en/security/securing-communications/separating-node-client-traffic.asciidoc create mode 100644 docs/en/security/securing-communications/setting-up-ssl.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations/beats.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations/hadoop.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations/http.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations/java.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations/kibana.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations/logstash.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations/monitoring.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations/reporting.asciidoc create mode 100644 docs/en/security/tribe-clients-integrations/tribe.asciidoc create mode 100644 docs/en/security/troubleshooting.asciidoc create mode 100644 docs/en/security/using-ip-filtering.asciidoc diff --git a/docs/en/security/auditing.asciidoc b/docs/en/security/auditing.asciidoc new file mode 100644 index 00000000000..4fd62315143 --- /dev/null +++ b/docs/en/security/auditing.asciidoc @@ -0,0 +1,432 @@ +[[auditing]] +== Auditing Security Events + +You can enable auditing to keep track of security-related events such as +authentication failures and refused connections. Logging these events enables you +to monitor your cluster for suspicious activity and provides evidence in the +event of an attack. + +[IMPORTANT] +============================================================================ +Audit logs are **disabled** by default. To enable this functionality, you +must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`. +============================================================================ + +{Security} provides two ways to persist audit logs: + +* The <> output, which persists events to + a dedicated `_access.log` file on the host's file system. +* The <> output, which persists events to an Elasticsearch index. +The audit index can reside on the same cluster, or a separate cluster. + +By default, only the `logfile` output is used when enabling auditing. +To facilitate browsing and analyzing the events, you can also enable +indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`: + +[source,yaml] +---------------------------- +xpack.security.audit.outputs: [ index, logfile ] +---------------------------- + +The `index` output type should be used in conjunction with the `logfile` +output type Because it is possible for the `index` output type to lose +messages if the target index is unavailable, the `access.log` should be +used as the official record of events. + +NOTE: Audit events are batched for indexing so there is a lag before +events appear in the index. You can control how frequently batches of +events are pushed to the index by setting +`xpack.security.audit.index.flush_interval` in `elasticsearch.yml`. + +[float] +[[audit-event-types]] +=== Audit Event Types + +Each request may generate multiple audit events. +The following is a list of the events that can be generated: + +|====== +| `anonymous_access_denied` | | | Logged when a request is denied due to a missing + authentication token. +| `authentication_success` | | | Logged when a user successfully authenticates. +| `authentication_failed` | | | Logged when the authentication token cannot be + matched to a known user. +| `realm_authentication_failed` | | | Logged for every realm that fails to present a valid + authentication token. `` represents the + realm type. +| `access_denied` | | | Logged when an authenticated user attempts to execute + an action they do not have the necessary + <> to perform. +| `access_granted` | | | Logged when an authenticated user attempts to execute + an action they have the necessary privilege to perform. + When the `system_access_granted` event is included, all system + (internal) actions are also logged. The default setting does + not log system actions to avoid cluttering the logs. +| `run_as_granted` | | | Logged when an authenticated user attempts to <> + another user that they have the necessary privileges to do. +| `run_as_denied` | | | Logged when an authenticated user attempts to <> + another user action they do not have the necessary + <> to do so. +| `tampered_request` | | | Logged when {security} detects that the request has + been tampered with. Typically relates to `search/scroll` + requests when the scroll ID is believed to have been + tampered with. +| `connection_granted` | | | Logged when an incoming TCP connection passes the + <> for a specific + profile. +| `connection_denied` | | | Logged when an incoming TCP connection does not pass the + <> for a specific + profile. +|====== + +[float] +[[audit-event-attributes]] +=== Audit Event Attributes + +The following table shows the common attributes that can be associated with every event. + +.Common Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `timestamp` | When the event occurred. +| `node_name` | The name of the node. +| `node_host_name` | The hostname of the node. +| `node_host_address` | The IP address of the node. +| `layer` | The layer from which this event originated: `rest`, `transport` or `ip_filter` +| `event_type` | The type of event that occurred: `anonymous_access_denied`, + `authentication_failed`, `access_denied`, `access_granted`, + `connection_granted`, `connection_denied`, `tampered_request`, + `run_as_granted`, `run_as_denied`. +|====== + +The following tables show the attributes that can be associated with each type of event. +The log level determines which attributes are included in a log entry. + +.REST anonymous_access_denied Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `uri` | The REST endpoint URI. +| `request_body` | The body of the request, if enabled. +|====== + +.REST authentication_success Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `user` | The authenticated user. +| `realm` | The realm that authenticated the user. +| `uri` | The REST endpoint URI. +| `params` | The REST URI query parameters. +| `request_body` | The body of the request, if enabled. +|====== + +.REST authentication_failed Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `uri` | The REST endpoint URI. +| `request_body` | The body of the request, if enabled. +|====== + +.REST realm_authentication_failed Attributes +[cols="2,7",options="header"] +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `uri` | The REST endpoint URI. +| `request_body` | The body of the request, if enabled. +| `realm` | The realm that failed to authenticate the user. + NOTE: A separate entry is logged for each + consulted realm. +|====== + +.Transport anonymous_access_denied Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +|====== + +.Transport authentication_success Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `user` | The authenticated user. +| `realm` | The realm that authenticated the user. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +|====== + +.Transport authentication_failed Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +|====== + +.Transport realm_authentication_failed Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +| `realm` | The realm that failed to authenticate the user. + NOTE: A separate entry is logged for each + consulted realm. +|====== + +.Transport access_granted Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +|====== + +.Transport access_denied Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + relates to (when applicable). +|====== + +.Transport tampered_request Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed to authenticate. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +|====== + +.IP Filter connection_granted Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `transport_profile` | The transport profile the request targeted. +| `rule` | The <> rule that granted + the request. +|====== + +.IP Filter connection_denied Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `transport_profile` | The transport profile the request targeted. +| `rule` | The <> rule that denied + the request. +|====== + +[float] +[[audit-log-output]] +=== Logfile Audit Output + +The `logfile` audit output is the default output for auditing. It writes data to +the `_access.log` file in the logs directory. + +[float] +[[audit-log-entry-format]] +=== Log Entry Format + +The format of a log entry is: + +[source,txt] +---------------------------------------------------------------------------- +[] [] [] [] +---------------------------------------------------------------------------- + +`` :: When the event occurred. You can configure the + timestamp format in `log4j2.properties`. +`` :: Information about the local node that generated + the log entry. You can control what node information + is included by configuring the + <>. +`` :: The layer from which this event originated: + `rest`, `transport` or `ip_filter`. +`` :: The type of event that occurred: `anonymous_access_denied`, + `authentication_failed`, `access_denied`, `access_granted`, + `connection_granted`, `connection_denied`. +`` :: A comma-separated list of key-value pairs that contain + data pertaining to the event. Formatted as + `attr1=[val1], attr2=[val2]`. See <> for the attributes that can be included + for each type of event. + +[float] +[[audit-log-settings]] +=== Logfile Output Settings + +The events and some other information about what gets logged can be +controlled using settings in the `elasticsearch.yml` file. + +.Audited Event Settings +[cols="4,^2,4",options="header"] +|====== +| Name | Default | Description +| `xpack.security.audit.logfile.events.include` | `access_denied`, `access_granted`, `anonymous_access_denied`, `authentication_failed`, `connection_denied`, `tampered_request`, `run_as_denied`, `run_as_granted` | Includes the specified events in the output. +| `xpack.security.audit.logfile.events.exclude` | | Excludes the specified events from the output. +| `xpack.security.audit.logfile.events.emit_request_body`| false | Include or exclude the request body from REST requests + on certain event types such as `authentication_failed`. +|====== + + +IMPORTANT: No filtering is performed when auditing, so sensitive data may be +audited in plain text when including the request body in audit events. + +[[audit-log-entry-local-node-info]] +.Local Node Info Settings +[cols="4,^2,4",options="header"] +|====== +| Name | Default | Description +| `xpack.security.audit.logfile.prefix.emit_node_name` | true | Include or exclude the node's name + from the local node info. +| `xpack.security.audit.logfile.prefix.emit_node_host_address` | false | Include or exclude the node's IP address + from the local node info. +| `xpack.security.audit.logfile.prefix.emit_node_host_name` | false | Include or exclude the node's host name + from the local node info. +|====== + +[[logging-file]] +You configure also configure how the logfile is written in the `log4j2.properties` +file located in `CONFIG_DIR/x-pack`. By default, audit information is appended to the +`_access.log` file located in the standard Elasticsearch `logs` directory +(typically located at `$ES_HOME/logs`). The file rolls over on a daily basis. + +[float] +[[audit-index]] +=== Index Audit Output + +In addition to logging to a file, you can store audit logs in Elasticsearch +rolling indices. These indices can be either on the same cluster, or on a +remote cluster. You configure the following settings in +`elasticsearch.yml` to control how audit entries are indexed. To enable +this output, you need to configure the setting `xpack.security.audit.outputs` +in the `elasticsearch.yml` file: + +[source,yaml] +---------------------------- +xpack.security.audit.outputs: [ index, logfile ] +---------------------------- + +.Audit Log Indexing Configuration +[options="header"] +|====== +| Attribute | Default Setting | Description +| `xpack.security.audit.index.bulk_size` | `1000` | Controls how many audit events are batched into a single write. +| `xpack.security.audit.index.flush_interval` | `1s` | Controls how often buffered events are flushed to the index. +| `xpack.security.audit.index.rollover` | `daily` | Controls how often to roll over to a new index: + `hourly`, `daily`, `weekly`, or `monthly`. +| `xpack.security.audit.index.events.include` | `anonymous_access_denied`, `authentication_failed`, `realm_authentication_failed`, `access_granted`, `access_denied`, `tampered_request`, `connection_granted`, `connection_denied`, `run_as_granted`, `run_as_denied` | The audit events to be indexed. See <> for the complete list. +| `xpack.security.audit.index.events.exclude` | | The audit events to exclude from indexing. +| `xpack.security.audit.index.events.emit_request_body`| false | Include or exclude the request body from REST requests + on certain event types such as `authentication_failed`. +|====== + +IMPORTANT: No filtering is performed when auditing, so sensitive data may be +audited in plain text when including the request body in audit events. + +[float] +==== Audit Index Settings + +You can also configure settings for the indices that the events are stored in. +These settings are configured in the `xpack.security.audit.index.settings` namespace +in `elasticsearch.yml`. For example, the following configuration sets the +number of shards and replicas to 1 for the audit indices: + +[source,yaml] +---------------------------- +xpack.security.audit.index.settings: + index: + number_of_shards: 1 + number_of_replicas: 1 +---------------------------- + +[float] +==== Forwarding Audit Logs to a Remote Cluster + +To index audit events to a remote Elasticsearch cluster, you configure +the following `xpack.security.audit.index.client` settings. + +.Remote Audit Log Indexing Configuration +[options="header"] +|====== +| Attribute | Description +| `xpack.security.audit.index.client.hosts` | Comma-separated list of `host:port` pairs. These hosts + should be nodes in the remote cluster. +| `xpack.security.audit.index.client.cluster.name` | The name of the remote cluster. +| `xpack.security.audit.index.client.xpack.security.user` | The `username:password` pair to use to authenticate with + the remote cluster. +|====== + +You can pass additional settings to the remote client by specifying them in the +`xpack.security.audit.index.client` namespace. For example, to allow the remote +client to discover all of the nodes in the remote cluster you can specify the +`client.transport.sniff` setting: + +[source,yaml] +---------------------------- +xpack.security.audit.index.client.transport.sniff: true +---------------------------- diff --git a/docs/en/security/authentication.asciidoc b/docs/en/security/authentication.asciidoc new file mode 100644 index 00000000000..fe57821afe4 --- /dev/null +++ b/docs/en/security/authentication.asciidoc @@ -0,0 +1,265 @@ +[[setting-up-authentication]] +== Setting Up User Authentication + +Authentication identifies an individual. To gain access to restricted resources, +a user must prove their identity, via passwords, credentials, or some other +means (typically referred to as authentication tokens). + +You can use the native support for managing and authenticating users, or +integrate with external user management systems such as LDAP and Active +Directory. For information about managing native users, +see <>. + +[float] +[[built-in-users]] +=== Built-in Users + +{security} provides built-in user credentials to help you get up and running. +These users have a fixed set of privileges and the default password `changeme`. +Please read <> and +<> below. + +.{security} Built-in Users +|======== +| Name | Description +| `elastic` | A built-in <>. +| `kibana` | The user Kibana uses to connect and communicate with Elasticsearch. +| `logstash_system` | The user Logstash uses when storing monitoring information in Elasticsearch. +|======== + +[float] +[[reset-built-in-user-passwords]] +==== Reset Built-in User Passwords +[IMPORTANT] +============================================================================= +You must reset the default passwords for all built-in users, and then +<>. +You can update passwords from the *Management > Users* UI in Kibana or with the +<>: + +[source,js] +--------------------------------------------------------------------- +PUT _xpack/security/user/elastic/_password +{ + "password": "elasticpassword" +} +--------------------------------------------------------------------- +// CONSOLE + +[source,js] +--------------------------------------------------------------------- +PUT _xpack/security/user/kibana/_password +{ + "password": "kibanapassword" +} +--------------------------------------------------------------------- +// CONSOLE + +[source,js] +--------------------------------------------------------------------- +PUT _xpack/security/user/logstash_system/_password +{ + "password": "logstashpassword" +} +--------------------------------------------------------------------- +// CONSOLE + +Once the `kibana` user password is reset, you need to update the Kibana server +with the new password by setting `elasticsearch.password` in the +`kibana.yml` configuration file: + +[source,yaml] +----------------------------------------------- +elasticsearch.password: kibanapassword +----------------------------------------------- + +The `logstash_system` user is used internally within Logstash when +<> is enabled for logstash + +If you wish to enable this feature in Logstash, then you need to update the Logstash +configuration with the new password by setting `xpack.monitoring.elasticsearch.password` in +the `logstash.yml` configuration file: + +[source,yaml] +---------------------------------------------------------- +xpack.monitoring.elasticsearch.password: logstashpassword +---------------------------------------------------------- + +If you have upgraded from an older version of elasticsearch/x-pack, +the `logstash_system` user may have defaulted to _disabled_ for security reasons. +Once the password has been changed, you can enable the user via the following API call: + +[source,js] +--------------------------------------------------------------------- +PUT _xpack/security/user/logstash_system/_enable +--------------------------------------------------------------------- +// CONSOLE +============================================================================= + +[float] +[[disabling-default-password]] +==== Disable Default Password Functionality +[IMPORTANT] +============================================================================= +The default password of `changeme` is provided as a convenience that allows you to quickly +setup your Elasticsearch stack. It should not be used when running in production. + +Once you have changed the password for the built-in users, you must disable default password support +by setting `xpack.security.authc.accept_default_password` to `false`. + +A {ref}/bootstrap-checks.html[bootstrap check] will prevent your cluster from operating in production +mode until you make this configuration change. + +============================================================================= + +=== How Authentication Works + +Authentication in {security} is handled by one or more authentication services +called _realms_. A _realm_ is used to resolve and authenticate users based on +authentication tokens. {security} provides the following built-in realms: + +_native_:: +An internal realm where users are stored in a dedicated Elasticsearch index. +This realm supports an authentication token in the form of username and password, +and is available by default when no realms are explicitly configured. See +<>. + +_ldap_:: +A realm that uses an external LDAP server to authenticate the +users. This realm supports an authentication token in the form of username and +password, and requires explicit configuration in order to be used. See +<>. + +_active_directory_:: +A realm that uses an external Active Directory Server to authenticate the +users. With this realm, users are authenticated by usernames and passwords. +See <>. + +_pki_:: +A realm that authenticates users using Public Key Infrastructure (PKI). This +realm works in conjunction with SSL/TLS and identifies the users through the +Distinguished Name (DN) of the client's X.509 certificates. See <>. + +_file_:: +An internal realm where users are defined in files stored on each node in the +Elasticsearch cluster. This realm supports an authentication token in the form +of username and password, and is always available. See <>. + +{security} also supports custom realms. If you need to integrate with another +authentication system, you can build a custom realm plugin. For more information, +see <>. + +Realms live within a _realm chain_. It is essentially a prioritized list of +configured realms (typically of various types). The order of the list determines +the order in which the realms will be consulted. During the authentication process, +{security} will consult and try to authenticate the request one realm at a time. +Once one of the realms successfully authenticates the request, the authentication +is considered to be successful and the authenticated user will be associated +with the request (which will then proceed to the authorization phase). If a realm +cannot authenticate the request, the next in line realm in the chain will be +consulted. If all realms in the chain could not authenticate the request, the +authentication is then considered to be unsuccessful and an authentication error +will be returned (as HTTP status code `401`). + +NOTE: Some systems (e.g. Active Directory) have a temporary lock-out period after + several successive failed login attempts. If the same username exists in + multiple realms, unintentional account lockouts are possible. For more + information, please see <>. + +The default realm chain contains the `native` and `file` realms. To explicitly, +configure a realm chain, you specify the chain in `elasticsearch.yml`. When you +configure a realm chain, only the realms you specify are used for authentication. +To use the `native` and `file` realms, you must include them in the chain. + +The following snippet configures a realm chain that includes the `file` and +`native` realms, as well as two LDAP realms and an Active Directory realm. + +[source,yaml] +---------------------------------------- +xpack.security.authc: + realms: + + file: + type: file + order: 0 + + native: + type: native + order: 1 + + ldap1: + type: ldap + order: 2 + enabled: false + url: 'url_to_ldap1' + ... + + ldap2: + type: ldap + order: 3 + url: 'url_to_ldap2' + ... + + ad1: + type: active_directory + order: 4 + url: 'url_to_ad' +---------------------------------------- + +As can be seen above, each realm has a unique name that identifies it and each +realm type dictates its own set of required and optional settings. That said, +there are three settings that are common to all realms: + +[cols=",^,",options="header"] +|========= +| Setting | Required | Description + +| `type` | true | Identifies the type of the realm. The realm type + determines what other settings the realms should be + configured with. The type can be one of: `native`, + `ldap`, `active_directory`, `pki`, `file`, or in case + of a custom realm, the type name that identifies it. + +| `order` | false | A numeric value representing the priority/index of + the realm within the realm chain. This will determine + the order by which the realms will be consulted + during authentication, with lower order being consulted + first. + +| `enabled` | false | When set to `false` the realm will be disabled and + will not be added to the realm chain. This is useful + for debugging purposes as it enables you to remove + a realm from the chain without deleting and losing + its configuration. +|========= + +Realm types can roughly be classified in two categories: + +Internal:: Realms that are internal to Elasticsearch and don't require any + communication with external parties. They are fully managed by + {security}. There can only be a maximum of one configured realm + per internal realm type. {security} provides two internal realm + types: `native` and `file`. + +External:: Realms that require interaction with parties/components external to + Elasticsearch, typically, with enterprise grade identity management + systems. Unlike internal realms, there can be as many external realms + as one would like - each with its own unique name and configuration. + {security} provides three external realm types: `ldap`, + `active_directory` and `pki`. + +include::authentication/anonymous-access.asciidoc[] + +include::authentication/native-realm.asciidoc[] + +include::authentication/ldap-realm.asciidoc[] + +include::authentication/active-directory-realm.asciidoc[] + +include::authentication/pki-realm.asciidoc[] + +include::authentication/file-realm.asciidoc[] + +include::authentication/custom-realm.asciidoc[] + +include::authentication/user-cache.asciidoc[] diff --git a/docs/en/security/authentication/active-directory-realm.asciidoc b/docs/en/security/authentication/active-directory-realm.asciidoc new file mode 100644 index 00000000000..e222ad4f5da --- /dev/null +++ b/docs/en/security/authentication/active-directory-realm.asciidoc @@ -0,0 +1,336 @@ +[[active-directory-realm]] +=== Active Directory User Authentication + +You can configure {security} to communicate with Active Directory to authenticate +users. To integrate with Active Directory, you configure an `active_directory` +realm and map Active Directory users and groups to {security} roles in the +<>. + +To protect passwords, communications between Elasticsearch and the Active Directory +server should be encrypted using SSL/TLS. Clients and nodes that connect via +SSL/TLS to the Active Directory server need to have the Active Directory server's +certificate or the server's root CA certificate installed in their keystore or +truststore. For more information about installing certificates, see +<>. + +==== Configuring an Active Directory Realm + +{security} uses LDAP to communicate with Active Directory, so `active_directory` +realms are similar to <>. Like LDAP directories, +Active Directory stores users and groups hierarchically. The directory's +hierarchy is built from containers such as the _organizational unit_ (`ou`), +_organization_ (`o`), and _domain controller_ (`dc`). + +The path to an entry is a _Distinguished Name_ (DN) that uniquely identifies a +user or group. User and group names typically have attributes such as a +_common name_ (`cn`) or _unique ID_ (`uid`). A DN is specified as a string, for +example `"cn=admin,dc=example,dc=com"` (white spaces are ignored). + +{security} only supports Active Directory security groups. You cannot map +distribution groups to roles. + +NOTE: When you use Active Directory for authentication, the username entered by + the user is expected to match the `sAMAccountName` or `userPrincipalName`, + not the common name. + +To configure an `active_directory` realm: + +. Add a realm configuration of type `active_directory` to `elasticsearch.yml` +under the `xpack.security.authc.realms` namespace. At a minimum, you must set the realm +`type` to `active_directory` and specify the Active Directory `domain_name`. To +use SSL/TLS for secured communication with the Active Directory server, you must +also set the `url` attribute and specify the `ldaps` protocol and secure port +number. If you are configuring multiple realms, you should also explicitly set +the `order` attribute to control the order in which the realms are consulted +during authentication. See <> +for all of the options you can set for an `active_directory` realm. ++ +NOTE: Binding to Active Directory fails if the domain name is not mapped in DNS. + If DNS is not being provided by a Windows DNS server, add a mapping for + the domain in the local `/etc/hosts` file. ++ +For example, the following realm configuration configures {security} to connect +to `ldaps://example.com:636` to authenticate users through Active Directory. ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 <1> + domain_name: ad.example.com + url: ldaps://ad.example.com:636 <2> + unmapped_groups_as_roles: true <3> +------------------------------------------------------------ +<1> The realm order controls the order in which the configured realms are checked + when authenticating a user. +<2> If you don't specify the URL, it defaults to `ldap::389`. +<3> When this option is enabled, Active Directory groups are automatically mapped + to roles of the same name. ++ +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. + +. Restart Elasticsearch. + +===== Multiple Domain Support +When authenticating users across multiple domains in a forest, there are a few minor +differences in the configuration and the way that users will authenticate. The `domain_name` +setting should be set to the forest root domain name. The `url` setting also needs to +be set as you will need to authenticate against the Global Catalog, which uses a different +port and may not be running on every Domain Controller. + +For example, the following realm configuration configures {security} to connect to specific +Domain Controllers on the Global Catalog port with the domain name set to the forest root. + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 + domain_name: example.com <1> + url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2> + load_balance: + type: "round_robin" <3> +------------------------------------------------------------ +<1> The `domain_name` is set to the name of the root domain in the forest. +<2> The `url` value used in this example has URLs for two different Domain Controllers, +which are also Global Catalog servers. Port 3268 is the default port for unencrypted +communication with the Global Catalog; port 3269 is the default port for SSL connections. +The servers that are being connected to can be in any domain of the forest as long as +they are also Global Catalog servers. +<3> A load balancing setting is provided to indicate the desired behavior when choosing +the server to connect to. + +In this configuration, users will need to use either their full User Principal +Name (UPN) or their Down-Level Logon Name. A UPN is typically a concatenation of +the username with `@:`. + {security} attempts to authenticate against this URL. If the + URL is not specified, it is derived from the `domain_name`, + assuming an unencrypted connection to port 389. For example, + `ldap://:389`. This settings is required when + connecting using SSL/TLS or via a custom port. +| `load_balance.type` | no | The behavior to use when there are multiple LDAP URLs defined. + For supported values see <>. +| `load_balance.cache_ttl` | no | When using `dns_failover` or `dns_round_robin` as the load + balancing type, this setting controls the amount of time to + cache DNS lookups. Defaults to `1h`. +| `user_search.base_dn` | no | Specifies the context to search for the user. Defaults to the + root of the Active Directory domain. +| `user_search.scope` | no | Specifies whether the user search should be `sub_tree` (default), + `one_level`, or `base`. `sub_tree` searches all objects contained + under `base_dn`. `one_level` only searches users directly + contained within the `base_dn`. `base` specifies that the + `base_dn` is a user object and that it is the only user considered. +| `user_search.filter` | no | Specifies a filter to use to lookup a user given a username. + The default filter looks up `user` objects with either + `sAMAccountName` or `userPrincipalName`. If specified, this + must be a valid LDAP user search filter, for example + `(&(objectClass=user)(sAMAccountName={0}))`. For more + information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax]. +| `group_search.base_dn` | no | Specifies the context to search for groups in which the user + has membership. Defaults to the root of the Active Directory + domain. +| `group_search.scope` | no | Specifies whether the group search should be `sub_tree` (default), + `one_level` or `base`. `sub_tree` searches all objects contained + under `base_dn`. `one_level` searches for groups directly + contained within the `base_dn`. `base` specifies that the + `base_dn` is a group object and that it is the only group considered. +| `unmapped_groups_as_roles` | no | Specifies whether the names of any unmapped Active Directory + groups should be used as role names and assigned to the user. + Defaults to `false`. +| `files.role_mapping` | no | Specifies the path and file name of the + <>. + Defaults to `CONF_DIR/x-pack/role_mapping.yml`, + where `CONF_DIR` is `ES_HOME/config` (zip/tar installations) + or `/etc/elasticsearch` (package installations). +| `follow_referrals` | no | Specifies whether {security} should follow referrals returned + by the Active Directory server. Referrals are URLs returned by + the server that are to be used to continue the LDAP operation + (such as `search`). Defaults to `true`. +| `ssl.key` | no | Specifies the path to the PEM encoded private key to use if the Active Directory + server requires client authentication. `ssl.key` and `ssl.keystore.path` may not be used at the + same time. +| `ssl.key_passphrase` | no | Specifies the passphrase to decrypt the PEM encoded private key if it is encrypted. +| `ssl.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate chain) that goes with the key + if the Active Directory server requires client authentication. +| `ssl.certificate_authorities`| no | Specifies the paths to the PEM encoded certificate authority certificates that + should be trusted. `ssl.certificate_authorities` and `ssl.trustsore.path` may not be used at + the same time. +| `ssl.keystore.path` | no | The path to the Java Keystore file that contains a private key and certificate. `ssl.key` and + `ssl.keystore.path` may not be used at the same time. +| `ssl.keystore.password` | no | The password to the keystore. +| `ssl.keystore.key_password`| no | The password for the key in the keystore. Defaults to the keystore password. +| `ssl.truststore.path` | no | The path to the Java Keystore file that contains the certificates to trust. + `ssl.certificate_authorities` and `ssl.trustsore.path` may not be used at the same time. +| `ssl.truststore.password` | no | The password to the truststore. +| `ssl.verification_mode` | no | Specifies the type of verification to be performed when + connecting to an Active Directory server using `ldaps`. When + set to `full`, the hostname or IP address used in the `url` + must match one of the names in the certificate or the + connection will not be allowed. Due to their potential security impact, + `ssl` settings are not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. + Values are `none`, `certificate`, and `full`. Defaults to `full`. +| `ssl.supported_protocols` | no | Specifies the supported protocols for TLS/SSL. +| `ssl.cipher_suites` | no | Specifies the cipher suites that should be supported when communicating + with the Active Directory server. +| `cache.ttl` | no | Specifies the time-to-live for cached user entries. A user's + credentials are cached for this period of time. Specify the + time period using the standard Elasticsearch + {ref}/common-options.html#time-units[time units]. + Defaults to `20m`. +| `cache.max_users` | no | Specifies the maximum number of user entries that can be + stored in the cache at one time. Defaults to 100,000. +| `cache.hash_algo` | no | Specifies the hashing algorithm that is used for the + cached user credentials. + See <> for the + possible values. (Expert Setting). +|======================= + +[[mapping-roles-ad]] +==== Mapping Active Directory Users and Groups to Roles + +An integral part of a realm authentication process is to resolve the roles +associated with the authenticated user. Roles define the privileges a user has +in the cluster. + +Since with the `active_directory` realm the users are managed externally in the +Active Directory server, the expectation is that their roles are managed there +as well. In fact, Active Directory supports the notion of groups, which often +represent user roles for different systems in the organization. + +The `active_directory` realm enables you to map Active Directory users and groups +to roles in the role mapping file stored on each node. You specify users and +groups using their distinguished names (DNs). For example, the following mapping +configuration maps the Active Directory `admins` group to both the `monitoring` +and `user` roles, maps the `users` group to the `user` role and maps the `John Doe` +user to the `user` role. + +[source, yaml] +------------------------------------------------------------ +monitoring: <1> + - "cn=admins,dc=example,dc=com" <2> +user: + - "cn=users,dc=example,dc=com" <3> + - "cn=admins,dc=example,dc=com" + - "cn=John Doe,cn=contractors,dc=example,dc=com" <4> +------------------------------------------------------------ +<1> The name of the role. +<2> The Active Directory distinguished name (DN) of the `admins` group. +<3> The Active Directory distinguished name (DN) of the `users` group. +<4> The Active Directory distinguished name (DN) of the user `John Doe`. + +For more information, see <>. + +[[active-directory-ssl]] +==== Setting up SSL Between Elasticsearch and Active Directory + +To protect the user credentials that are sent for authentication, it's highly +recommended to encrypt communications between Elasticsearch and your Active +Directory server. Connecting via SSL/TLS ensures that the identity of the Active +Directory server is authenticated before {security} transmits the user +credentials, and the usernames and passwords are encrypted in transit. + +To encrypt communications between Elasticsearch and Active Directory: + +. Configure each node to trust certificates signed by the CA that signed your +Active Directory server certificates. The following example demonstrates how to trust a CA certificate, +`cacert.pem`, located within the {xpack} configuration directory: ++ +[source,shell] +-------------------------------------------------- +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 + domain_name: ad.example.com + url: ldaps://ad.example.com:636 + ssl: + certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ] +-------------------------------------------------- ++ +The CA cert must be a PEM encoded certificate. + +. Set the `url` attribute in the realm configuration to specify the LDAPS protocol +and the secure port number. For example, `url: ldaps://ad.example.com:636`. + +. Restart Elasticsearch. + +NOTE: By default, when you configure {security} to connect to Active Directory + using SSL/TLS, {security} attempts to verify the hostname or IP address + specified with the `url` attribute in the realm configuration with the + values in the certificate. If the values in the certificate and realm + configuration do not match, {security} does not allow a connection to the + Active Directory server. This is done to protect against man-in-the-middle + attacks. If necessary, you can disable this behavior by setting the + <> property to `none`. diff --git a/docs/en/security/authentication/anonymous-access.asciidoc b/docs/en/security/authentication/anonymous-access.asciidoc new file mode 100644 index 00000000000..c95328e99a3 --- /dev/null +++ b/docs/en/security/authentication/anonymous-access.asciidoc @@ -0,0 +1,30 @@ +[[anonymous-access]] +=== Enabling Anonymous Access + +Incoming requests are considered to be _anonymous_ if no authentication token +can be extracted from the incoming request. By default, anonymous requests are rejected and an authentication error is returned (status code `401`). + +To enable anonymous access, you assign one or more roles to anonymous +users in the `elasticsearch.yml` configuration file. For example, the following +configuration assigns anonymous users `role1` and `role2`: + +[source,yaml] +---------------------------------------- +xpack.security.authc: + anonymous: + username: anonymous_user <1> + roles: role1, role2 <2> + authz_exception: true <3> +---------------------------------------- +<1> The username/principal of the anonymous user. Defaults to +`_es_anonymous_user` if not specified. +<2> The roles to associate with the anonymous user. If no roles are specified, anonymous access is disabled--anonymous requests will be rejected and return an authentication error. +<3> When `true`, a 403 HTTP status code is returned if the anonymous user +does not have the permissions needed to perform the requested action and the +user will NOT be prompted to provide credentials to access the requested +resource. When `false`, a 401 HTTP status code is returned if the anonymous user +does not have the necessary permissions and the user is prompted for +credentials to access the requested resource. If you are using anonymous access +in combination with HTTP, you might need to set `authz_exception` to `false` +if your client does not support preemptive basic authentication. Defaults to +`true`. \ No newline at end of file diff --git a/docs/en/security/authentication/custom-realm.asciidoc b/docs/en/security/authentication/custom-realm.asciidoc new file mode 100644 index 00000000000..55d010a364e --- /dev/null +++ b/docs/en/security/authentication/custom-realm.asciidoc @@ -0,0 +1,107 @@ +[[custom-realms]] +=== Integrating with Other Authentication Systems + +If you are using an authentication system that is not supported out-of-the-box +by {security}, you can create a custom realm to interact with it to authenticate +users. You implement a custom realm as an {xpack} extension. + +[[implementing-custom-realm]] +==== Implementing a Custom Realm + +Sample code that illustrates the structure and implementation of a custom realm +is provided in the https://github.com/elastic/shield-custom-realm-example[custom-realm-example] +repository on GitHub. You can use this code as a starting point for creating your +own realm. + +To create a custom realm, you need to: + +. Extend `org.elasticsearch.xpack.security.authc.Realm` to communicate with your + authentication system to authenticate users. +. Implement the `org.elasticsearch.xpack.security.authc.Realm.Factory` interface in + a class that will be used to create the custom realm. +. Extend `org.elasticsearch.xpack.security.authc.DefaultAuthenticationFailureHandler` to + handle authentication failures when using your custom realm. + +To package your custom realm as a plugin: + +. Implement an extension class for your realm that extends + `org.elasticsearch.xpack.extensions.XPackExtension`. There you need to + override one or more of the following methods: ++ +[source,java] +---------------------------------------------------- +@Override +public Map getRealms() { + ... +} +---------------------------------------------------- ++ +The `getRealms` method is used to provide a map of type names to the `Factory` that +will be used to create the realm. ++ +[source,java] +---------------------------------------------------- +@Override +public AuthenticationFailureHandler getAuthenticationFailureHandler() { + ... +} +---------------------------------------------------- ++ +The `getAuthenticationFailureHandler` method is used to optionally provide a +custom `AuthenticationFailureHandler`, which will control how X-Pack responds +in certain authentication failure events. ++ +[source,java] +---------------------------------------------------- +@Override +public Collection getRestHeaders() { + ... +} +---------------------------------------------------- ++ +The `getRestHeaders` method returns a collection of header names that should be +copied from the request into the `ThreadContext` where they can be accessed by +the realm. ++ +[source,java] +---------------------------------------------------- +@Override +public List getSettingsFilter() { + ... +} +---------------------------------------------------- ++ +The `getSettingsFilter` method returns a list of setting names that should be +filtered from the settings APIs as they may contain sensitive credentials. + +. Create a build configuration file for the plugin; Gradle is our recommendation. +. Create a `x-pack-extension-descriptor.properties` descriptor file for the + extension. +. Bundle all in a single zip file. + +[[using-custom-realm]] +==== Using a Custom Realm to Authenticate Users + +To use a custom realm: + +. Install the realm extension on each node in the cluster. You run + `bin/x-pack/extension` with the `install` sub-command and specify the URL + pointing to the zip file that contains the extension. For example: ++ +[source,shell] +---------------------------------------- +bin/x-pack/extension install file:////my-realm-1.0.zip +---------------------------------------- + +. Add a realm configuration of the appropriate realm type to `elasticsearch.yml` +under the `xpack.security.authc.realms` namespace. The options you can set depend +on the settings exposed by the custom realm. At a minimum, you must set the realm +`type` to the type defined by the extension. If you are configuring multiple +realms, you should also explicitly set the `order` attribute to control the +order in which the realms are consulted during authentication. ++ +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. + +. Restart Elasticsearch. diff --git a/docs/en/security/authentication/file-realm.asciidoc b/docs/en/security/authentication/file-realm.asciidoc new file mode 100644 index 00000000000..87ee9839660 --- /dev/null +++ b/docs/en/security/authentication/file-realm.asciidoc @@ -0,0 +1,292 @@ +[[file-realm]] +=== File-based User Authentication + +You can manage and authenticate users with the built-in `file` internal realm. +With the `file` realm users are defined in local files on each node in the cluster. + +IMPORTANT: As the administrator of the cluster, it is your responsibility to + ensure the same users are defined on every node in the cluster. + {security} does not deliver any mechanism to guarantee this. + +The `file` realm is primarily supported to serve as a fallback/recovery realm. It +is mostly useful in situations where all users locked themselves out of the system +(no one remembers their username/password). In this type of scenarios, the `file` +realm is your only way out - you can define a new `admin` user in the `file` realm +and use it to log in and reset the credentials of all other users. + +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. To use the +`file` realm as a fallback, you must include it in the realm chain. + +To define users, {security} provides the <> command-line +tool. This tool enables you to add and remove users, assign user roles and manage +user passwords. + +==== Configuring a File Realm + +The `file` realm is added to the realm chain by default. You don't need to +explicitly configure a `file` realm to manage users with the `users` tool. + +Like other realms, you can configure options for a `file` realm in the +`xpack.security.authc.realms` namespace in `elasticsearch.yml`. + +To configure an `file` realm: + +. Add a realm configuration of type `file` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to +`file`. If you are configuring multiple realms, you should also explicitly set +the `order` attribute. See <> for all of the options you can set +for a `file` realm. ++ +For example, the following snippet shows a `file` realm configuration that sets +the `order` to zero so the realm is checked first: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + file1: + type: file + order: 0 +------------------------------------------------------------ + +. Restart Elasticsearch. + +[[file-realm-settings]] +===== File Realm Settings + +[cols="4,^3,10"] +|======================= +| Setting | Required | Description +| `type` | yes | Indicates the realm type. Must be set to `file`. +| `order` | no | Indicates the priority of this realm within the + realm chain. Realms with a lower order are + consulted first. Although not required, we + recommend explicitly setting this value when you + configure multiple realms. Defaults to + `Integer.MAX_VALUE`. +| `enabled` | no | Indicates whether this realm is enabled or + disabled. Enables you to disable a realm without + removing its configuration. Defaults to `true`. +| `cache.ttl` | no | Specifies the time-to-live for cached user entries. + A user's credentials are cached for this period of + time. Specify the time period using the standard + Elasticsearch {ref}/common-options.html#time-units[time units]. + Defaults to `20m`. +| `cache.max_users` | no | Specifies the maximum number of user entries that + can be stored in the cache at one time. Defaults + to 100,000. +| `cache.hash_algo` | no | Specifies the hashing algorithm that is used for + the cached user credentials. See <> for the possible values. + (Expert Setting). +|======================= + +[[managing-file-users]] +==== Managing Users + +The `users` command-line tool is located in `ES_HOME/bin/x-pack` and enables +several administrative tasks for managing users: + +* <> +* <> +* <> +* <> +* <> + +[[file-realm-add-user]] +===== Adding Users + +Use the `useradd` sub-command to add a user to your local node. + +NOTE: To ensure that Elasticsearch can read the user and role information at + startup, run `users useradd` as the same user you use to run Elasticsearch. + Running the command as root or some other user will update the permissions + for the `users` and `users_roles` files and prevent Elasticsearch from + accessing them. + +[source,shell] +---------------------------------------- +bin/x-pack/users useradd +---------------------------------------- + +A username must be at least 1 character and no longer than 30 characters. The +first character must be a letter (`a-z` or `A-Z`) or an underscore (`_`). +Subsequent characters can be letters, underscores (`_`), digits (`0-9`), or any +of the following symbols `@`, `-`, `.` or `$`. + +You can specify the user's password at the command-line with the `-p` option. +When this option is absent, the command prompts you for the password. Omit the +`-p` option to keep plaintext passwords out of the terminal session's command +history. + +[source,shell] +---------------------------------------------------- +bin/x-pack/users useradd -p +---------------------------------------------------- + +Passwords must be at least 6 characters long. + +You can define a user's roles with the `-r` option. This option accepts a +comma-separated list of role names to assign to the user. + +[source,shell] +------------------------------------------------------------------- +bin/x-pack/users useradd -r +------------------------------------------------------------------- + +The following example adds a new user named `jacknich` to the `file` realm. The +password for this user is `theshining`, and this user is associated with the +`network` and `monitoring` roles. + +[source,shell] +------------------------------------------------------------------- +bin/x-pack/users useradd jacknich -p theshining -r network,monitoring +------------------------------------------------------------------- + +For valid role names please see <>. + +[[file-realm-list-users]] +===== Listing Users + +Use the `list` sub-command to list the users registered with the `file` realm +on the local node. + +[source, shell] +---------------------------------- +bin/x-pack/users list +rdeniro : admin +alpacino : power_user +jacknich : monitoring,network +---------------------------------- + +Users are in the left-hand column and their corresponding roles are listed in +the right-hand column. + +The `list ` sub-command lists a specific user. Use this command to +verify that a user was successfully added to the local `file` realm. + +[source,shell] +----------------------------------- +bin/x-pack/users list jacknich +jacknich : monitoring,network +----------------------------------- + +[[file-realm-manage-passwd]] +===== Managing User Passwords + +Use the `passwd` sub-command to reset a user's password. You can specify the new +password directly with the `-p` option. When `-p` option is omitted, the tool +will prompt you to enter and confirm a password in interactive mode. + +[source,shell] +-------------------------------------------------- +bin/x-pack/users passwd +-------------------------------------------------- + +[source,shell] +-------------------------------------------------- +bin/x-pack/users passwd -p +-------------------------------------------------- + +[[file-realm-manage-roles]] +===== Assigning Users to Roles + +Use the `roles` sub-command to manage the roles of a particular user. The `-a` +option adds a comma-separated list of roles to a user. The `-r` option removes +a comma-separated list of roles from a user. You can combine adding and removing +roles within the same command to change a user's roles. + +[source,shell] +------------------------------------------------------------------------------------------------------------ +bin/x-pack/users roles -a -r +------------------------------------------------------------------------------------------------------------ + +The following command removes the `network` and `monitoring` roles from user +`jacknich` and adds the `user` role: + +[source,shell] +------------------------------------------------------------ +bin/x-pack/users roles jacknich -r network,monitoring -a user +------------------------------------------------------------ + +Listing the user displays the new role assignment: + +[source,shell] +--------------------------------- +bin/x-pack/users list jacknich +jacknich : user +--------------------------------- + +[[file-realm-remove-user]] +===== Deleting Users + +Use the `userdel` sub-command to delete a user. + +[source,shell] +-------------------------------------------------- +bin/x-pack/users userdel +-------------------------------------------------- + +==== A Look Under the Hood + +All the data about the users for the `file` realm is stored in two files, `users` +and `users_roles`. Both files are located in `CONFIG_DIR/x-pack/` and are read +on startup. + +By default, {security} checks these files for changes every 5 seconds. You can +change this default behavior by changing the `resource.reload.interval.high` setting in +the `elasticsearch.yml` file (as this is a common setting in Elasticsearch, +changing its value may effect other schedules in the system). + +[IMPORTANT] +============================== +These files are managed locally by the node and are **not** managed +globally by the cluster. This means that with a typical multi-node cluster, +the exact same changes need to be applied on each and every node in the +cluster. + +A safer approach would be to apply the change on one of the nodes and have the +`users` and `users_roles` files distributed/copied to all other nodes in the +cluster (either manually or using a configuration management system such as +Puppet or Chef). +============================== + +While it is possible to modify these files directly using any standard text +editor, we strongly recommend using the `bin/x-pack/users` command-line tool +to apply the required changes. + +[float] +[[users-file]] +===== The `users` File +The `users` file stores all the users and their passwords. Each line in the +`users` file represents a single user entry consisting of the username and +**hashed** password. + +[source,bash] +---------------------------------------------------------------------- +rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W +alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS +jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni +---------------------------------------------------------------------- + +NOTE: {security} uses `bcrypt` to hash the user passwords. + +[float] +[[users_defining-roles]] +==== The `users_roles` File + +The `users_roles` file stores the roles associated with the users, as in the +following example: + +[source,shell] +-------------------------------------------------- +admin:rdeniro +power_user:alpacino,jacknich +user:jacknich +-------------------------------------------------- + +Each row maps a role to a comma-separated list of all the users that are +associated with that role. diff --git a/docs/en/security/authentication/ldap-realm.asciidoc b/docs/en/security/authentication/ldap-realm.asciidoc new file mode 100644 index 00000000000..33ae53d77e9 --- /dev/null +++ b/docs/en/security/authentication/ldap-realm.asciidoc @@ -0,0 +1,406 @@ +[[ldap-realm]] +=== LDAP User Authentication + +You can configure {security} to communicate with a Lightweight Directory Access +Protocol (LDAP) server to authenticate users. To integrate with LDAP, you +configure an `ldap` realm and map LDAP groups to user roles in the +<>. + +To protect passwords, communications between Elasticsearch and the LDAP server +should be encrypted using SSL/TLS. Clients and nodes that connect via SSL/TLS to +the LDAP server need to have the LDAP server's certificate or the server's root +CA certificate installed in their _keystore_ or _truststore_. For more information +about installing certificates, see <>. + +==== Configuring an LDAP Realm + +LDAP stores users and groups hierarchically, similar to the way folders are +grouped in a file system. An LDAP directory's hierarchy is built from containers +such as the _organizational unit_ (`ou`), _organization_ (`o`), and +_domain controller_ (`dc`). + +The path to an entry is a _Distinguished Name_ (DN) that uniquely identifies a +user or group. User and group names typically have attributes such as a +_common name_ (`cn`) or _unique ID_ (`uid`). A DN is specified as a string, +for example `"cn=admin,dc=example,dc=com"` (white spaces are ignored). + +The `ldap` realm supports two modes of operation, a user search mode +and a mode with specific templates for user DNs. See +<> for all of the options you can set for an +`ldap` realm. + +[[ldap-user-search]] +===== User Search Mode +LDAP user search is the most common mode of operation. In this mode, a specific +user with permission to search the LDAP directory is used to search for the +authenticating user DN based on its username and an LDAP attribute. Once found, +the user will be authenticated by attempting to bind to the LDAP server using the +found DN and the provided password. + +To configure an `ldap` Realm with User Search: + +. Add a realm configuration of type `ldap` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` +to `ldap`, specify the `url` of the LDAP server, and set `user_search.base_dn` +to the container DN where the users are searched for. If you are configuring +multiple realms, you should also explicitly set the `order` attribute to control +the order in which the realms are consulted during authentication. See +<> for all of the options you can set for an +`ldap` realm. ++ +For example, the following snippet shows an LDAP realm configured with a user search: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + ldap1: + type: ldap + order: 0 + url: "ldaps://ldap.example.com:636" + bind_dn: "cn=ldapuser, ou=users, o=services, dc=example, dc=com" + bind_password: changeme + user_search: + base_dn: "dc=example,dc=com" + attribute: cn + group_search: + base_dn: "dc=example,dc=com" + files: + role_mapping: "CONFIG_DIR/x-pack/role_mapping.yml" + unmapped_groups_as_roles: false +------------------------------------------------------------ ++ +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. + +. Restart Elasticsearch + + +===== User DN Templates Mode +If your LDAP environment uses a few specific standard naming conditions for +users, you can use User DN templates to configure the realm. The advantage of +this method is that a search does not have to be performed to find the user DN. +However, multiple bind operations might be needed to find the correct user DN. + +To configure an `ldap` Realm with User Search: + +. Add a realm configuration of type `ldap` to `elasticsearch.yml` in the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to +`ldap`, specify the `url` of the LDAP server, and specify at least one template +with the `user_dn_templates` option. If you are configuring multiple realms, you +should also explicitly set the `order` attribute to control the order in which +the realms are consulted during authentication. See <> +for all of the options you can set for an `ldap` realm. ++ +For example, the following snippet shows an LDAP realm configured with User DN templates: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + ldap1: + type: ldap + order: 0 + url: "ldaps://ldap.example.com:636" + user_dn_templates: + - "cn={0}, ou=users, o=marketing, dc=example, dc=com" + - "cn={0}, ou=users, o=engineering, dc=example, dc=com" + group_search: + base_dn: "dc=example,dc=com" + files: + role_mapping: "/mnt/elasticsearch/group_to_role_mapping.yml" + unmapped_groups_as_roles: false +------------------------------------------------------------ + +. Restart Elasticsearch + + +[[ldap-load-balancing]] +===== Load Balancing and Failover +The `load_balance.type` setting can be used at the realm level to configure how +{security} should interact with multiple LDAP servers. {security} supports both +failover and load balancing modes of operation. + +.Load Balancing and Failover Types +|======================= +| Type | | | Description +| `failover` | | | The URLs specified are used in the order that they are specified. + The first server that can be connected to will be used for all + subsequent connections. If a connection to that server fails then + the next server that a connection can be established to will be + used for subsequent connections. +| `dns_failover` | | | In this mode of operation, only a single URL may be specified. + This URL must contain a DNS name. The system will be queried for + all IP addresses that correspond to this DNS name. Connections to + the LDAP server will always be tried in the order in which they + were retrieved. This differs from `failover` in that there is no + reordering of the list and if a server has failed at the beginning + of the list, it will still be tried for each subsequent connection. +| `round_robin` | | | Connections will continuously iterate through the list of provided + URLs. If a server is unavailable, iterating through the list of + URLs will continue until a successful connection is made. +| `dns_round_robin` | | | In this mode of operation, only a single URL may be specified. This + URL must contain a DNS name. The system will be queried for all IP + addresses that correspond to this DNS name. Connections will + continuously iterate through the list of addresses. If a server is + unavailable, iterating through the list of URLs will continue until + a successful connection is made. +|======================= + + +[[ldap-settings]] +===== LDAP Realm Settings + +.Common LDAP Realm Settings +[cols="4,^3,10"] +|======================= +| Setting | Required | Description +| `type` | yes | Indicates the realm type. Must be set to `ldap`. +| `order` | no | Indicates the priority of this realm within the realm + chain. Realms with a lower order are consulted first. + Although not required, we recommend explicitly + setting this value when you configure multiple realms. + Defaults to `Integer.MAX_VALUE`. +| `enabled` | no | Indicates whether this realm is enabled or disabled. + Enables you to disable a realm without removing its + configuration. Defaults to `true`. +| `url` | yes | Specifies one or more LDAP URLs of the form of + `ldap[s]://:`. Multiple URLs can be + defined using a comma separated value or array syntax: + `[ "ldaps://server1:636", "ldaps://server2:636" ]`. + `ldaps` and `ldap` URL protocols cannot be mixed in + the same realm. +| `load_balance.type` | no | The behavior to use when there are multiple LDAP URLs + defined. For supported values see + <>. +| `load_balance.cache_ttl` | no | When using `dns_failover` or `dns_round_robin` as the + load balancing type, this setting controls the amount of time + to cache DNS lookups. Defaults to `1h`. +| `user_group_attribute` | no | Specifies the attribute to examine on the user for group + membership. The default is `memberOf`. This setting will + be ignored if any `group_search` settings are specified. +| `group_search.base_dn` | no | Specifies a container DN to search for groups in which + the user has membership. When this element is absent, + Security searches for the attribute specified by + `user_group_attribute` set on the user to determine + group membership. +| `group_search.scope` | no | Specifies whether the group search should be + `sub_tree`, `one_level` or `base`. `one_level` only + searches objects directly contained within the + `base_dn`. The default `sub_tree` searches all objects + contained under `base_dn`. `base` specifies that the + `base_dn` is a group object, and that it is the only + group considered. +| `group_search.filter` | no | Specifies a filter to use to lookup a group. If not + set, the realm searches for `group`, + `groupOfNames`, `groupOfUniqueNames`, or `posixGroup` with the + attributes `member`, `memberOf`, or `memberUid`. Any instance of + `{0}` in the filter is replaced by the user + attribute defined in `group_search.user_attribute` +| `group_search.user_attribute` | no | Specifies the user attribute that is fetched and + provided as a parameter to the filter. If not set, + the user DN is passed to the filter. +| `unmapped_groups_as_roles` | no | Specifies whether the names of any unmapped LDAP groups + should be used as role names and assigned to the user. + Defaults to `false`. +| `timeout.tcp_connect` | no | Specifies the TCP connect timeout period for establishing an + LDAP connection. An `s` at the end indicates seconds, or `ms` + indicates milliseconds. Defaults to `5s` (5 seconds). +| `timeout.tcp_read` | no | Specifies the TCP read timeout period after establishing an LDAP connection. + An `s` at the end indicates seconds, or `ms` indicates milliseconds. + Defaults to `5s` (5 seconds). +| `timeout.ldap_search` | no | Specifies the LDAP Server enforced timeout period for an LDAP search. + An `s` at the end indicates seconds, or `ms` indicates milliseconds. + Defaults to `5s` (5 seconds). +| `files.role_mapping` | no | Specifies the path and file name for the + <>. + Defaults to `ES_HOME/config/x-pack/role_mapping.yml`. +| `follow_referrals` | no | Specifies whether {security} should follow referrals + returned by the LDAP server. Referrals are URLs returned by + the server that are to be used to continue the LDAP operation + (e.g. search). Defaults to `true`. +| `ssl.key` | no | Specifies the path to the PEM encoded private key to use if the LDAP + server requires client authentication. `ssl.key` and `ssl.keystore.path` + may not be used at the same time. +| `ssl.key_passphrase` | no | Specifies the passphrase to decrypt the PEM encoded private key if it is encrypted. +| `ssl.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate chain) that goes with the + key if the LDAP server requires client authentication. +| `ssl.certificate_authorities` | no | Specifies the paths to the PEM encoded certificate authority certificates that + should be trusted. `ssl.certificate_authorities` and `ssl.trustsore.path` may not be used + at the same time. +| `ssl.keystore.path` | no | The path to the Java Keystore file that contains a private key and certificate. `ssl.key` and + `ssl.keystore.path` may not be used at the same time. +| `ssl.keystore.password` | no | The password to the keystore. +| `ssl.keystore.key_password` | no | The password for the key in the keystore. Defaults to the keystore password. +| `ssl.truststore.path` | no | The path to the Java Keystore file that contains the certificates to trust. + `ssl.certificate_authorities` and `ssl.trustsore.path` may not be used at the same time. +| `ssl.truststore.password` | no | The password to the truststore. +| `ssl.verification_mode` | no | Specifies the type of verification to be performed when + connecting to a LDAP server using `ldaps`. When + set to `full`, the hostname or IP address used in the `url` + must match one of the names in the certificate or the + connection will not be allowed. Due to their potential security impact, + `ssl` settings are not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. + Values are `none`, `certificate`, and `full`. Defaults to `full`. +| `ssl.supported_protocols` | no | Specifies the supported protocols for SSL/TLS. +| `ssl.cipher_suites` | no | Specifies the cipher suites that should be supported when communicating + with the LDAP server. +| `cache.ttl` | no | Specifies the time-to-live for cached user entries. A + user's credentials are cached for this period of time. + Specify the time period using the standard Elasticsearch + {ref}/common-options.html#time-units[time units]. + Defaults to `20m`. +| `cache.max_users` | no | Specifies the maximum number of user entries that can be + stored in the cache at one time. Defaults to 100,000. +| `cache.hash_algo` | no | Specifies the hashing algorithm that is used for the + cached user credentials. See + <> for the possible + values. (Expert Setting). +|======================= + +.User Search Mode Settings +|======================= +| Setting | Required | Description +| `bind_dn` | no | The DN of the user that is used to bind to the LDAP + and perform searches. If not specified, an anonymous + bind is attempted. Due to its potential security + impact, `bind_dn` is not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. +| `bind_password` | no | The password for the user that is used to bind to the + LDAP. Due to its potential security impact, + `bind_password` is not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. +| `user_search.base_dn` | yes | Specifies a container DN to search for users. +| `user_search.scope` | no | The scope of the user search. Valid values are `sub_tree`, + `one_level` or `base`. `one_level` only searches objects + directly contained within the `base_dn`. `sub_tree` searches + all objects contained under `base_dn`. `base` specifies + that the `base_dn` is the user object, and that it is the + only user considered. Defaults to `sub_tree`. +| `user_search.attribute` | no | Specifies the attribute to match with the username presented + to. Defaults to `uid`. +| `user_search.pool.enabled` | no | Enables or disables connection pooling for user search. When + disabled a new connection is created for every search. The + default is `true`. +| `user_search.pool.size` | no | Specifies the maximum number of connections to the LDAP + server to allow in the connection pool. Defaults to `20`. +| `user_search.pool.initial_size` | no | The initial number of connections to create to the LDAP + server on startup. Defaults to `0`. Values greater than `0` + could cause startup failures if the LDAP server is down. +| `user_search.pool.health_check.enabled` | no | Enables or disables a health check on LDAP connections in + the connection pool. Connections are checked in the + background at the specified interval. Defaults to `true`. +| `user_search.pool.health_check.dn` | no/yes | Specifies the distinguished name to retrieve as part of + the health check. Defaults to the value of `bind_dn`. + This setting is required when `bind_dn` is not configured. +| `user_search.pool.health_check.interval` | no | How often to perform background checks of connections in + the pool. Defaults to `60s`. +|======================= + +.User Templates Mode Settings +[cols="4,^3,10"] +|======================= +| Setting | Required | Description +| `user_dn_templates` | yes | Specifies the DN template that replaces the + user name with the string `{0}`. This element + is multivalued, allowing for multiple user + contexts. +|======================= + + +NOTE: If any settings starting with `user_search` are specified, the + `user_dn_templates` the settings are ignored. + + +[[mapping-roles-ldap]] +==== Mapping LDAP Groups to Roles + +An integral part of a realm authentication process is to resolve the roles +associated with the authenticated user. Roles define the privileges a user has +in the cluster. + +Since with the `ldap` realm the users are managed externally in the LDAP server, +the expectation is that their roles are managed there as well. If fact, LDAP +supports the notion of groups, which often represent user roles for different +systems in the organization. + +The `ldap` realm enables you to map LDAP groups to roles in the role mapping +file stored on each node. When a user authenticates with LDAP, the privileges +for that user are the union of all privileges defined by the roles assigned to +the set of groups that the user belongs to. + +You specify groups using their distinguished names. For example, the following +mapping configuration maps the LDAP `admins` group to both the `monitoring` and +`user` roles, and maps the `users` group to the `user` role. + +[source, yaml] +------------------------------------------------------------ +monitoring: <1> + - "cn=admins,dc=example,dc=com" <2> +user: + - "cn=users,dc=example,dc=com" <3> + - "cn=admins,dc=example,dc=com" +------------------------------------------------------------ +<1> The name of the mapped role. +<2> The LDAP distinguished name (DN) of the `admins` group. +<3> The LDAP distinguished name (DN) of the `users` group. + +For more information, see <>. + +[[ldap-ssl]] +==== Setting up SSL Between Elasticsearch and LDAP + +To protect the user credentials that are sent for authentication, it's highly +recommended to encrypt communications between Elasticsearch and your LDAP server. +Connecting via SSL/TLS ensures that the identity of the LDAP server is +authenticated before {security} transmits the user credentials and the contents +of the connection are encrypted. + +To encrypt communications between Elasticsearch and your LDAP server: + +. Configure the realm's SSL settings on each node to trust certificates signed by the CA that signed your +LDAP server certificates. The following example demonstrates how to trust a CA certificate, +`cacert.pem`, located within the {xpack} configuration directory: ++ +[source,shell] +-------------------------------------------------- +xpack: + security: + authc: + realms: + ldap1: + type: ldap + order: 0 + url: "ldaps://ldap.example.com:636" + ssl: + certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ] +-------------------------------------------------- ++ +The CA cert must be a PEM encoded certificate. ++ +[NOTE] +=============================== +You can also specify the individual server certificates rather than the CA +certificate, but this is only recommended if you have a single LDAP server +or the certificates are self-signed. +=============================== + +. Set the `url` attribute in the realm configuration to specify the LDAPS +protocol and the secure port number. For example, `url: ldaps://ldap.example.com:636`. + +. Restart Elasticsearch. + +NOTE: By default, when you configure {security} to connect to an LDAP server + using SSL/TLS, {security} attempts to verify the hostname or IP address + specified with the `url` attribute in the realm configuration with the + values in the certificate. If the values in the certificate and realm + configuration do not match, {security} does not allow a connection to the + LDAP server. This is done to protect against man-in-the-middle attacks. If + necessary, you can disable this behavior by setting the + `ssl.verification_mode` property to `none`. diff --git a/docs/en/security/authentication/migrate-tool.asciidoc b/docs/en/security/authentication/migrate-tool.asciidoc new file mode 100644 index 00000000000..cd5de6568a7 --- /dev/null +++ b/docs/en/security/authentication/migrate-tool.asciidoc @@ -0,0 +1,73 @@ +[[migrate-tool]] +==== Migrating File-based Users and Roles to the Native Realm + +From 5.0 onward, you should use the `native` realm to manage roles and local +users. To migrate existing file-based users and roles to the native realm, use +the `migrate` tool that's included with the X-Pack plugin. + +NOTE: When migrating from Shield 2.x, the `migrate` tool should be run prior +to upgrading to ensure all roles can be migrated as some may be in a deprecated +format that {xpack} cannot read. The `migrate` tool is available in Shield +2.4.0 and higher. + +The `migrate` tool loads the existing file-based users and roles and calls the +user and roles APIs to add them to the native realm. You can migrate all users +and roles, or specify the ones you want to migrate. Users and roles that +already exist in the `native` realm are not replaced or overridden. If +the names you specify with the `--users` and `--roles` options don't +exist in the `file` realm, they are skipped. + +Run the migrate tool after you install the X-Pack plugin. For example: + +[source, sh] +---------------------------------------------------------------------- +$ bin/x-pack/migrate native -U http://localhost:9200 -u elastic -p changeme +-n lee,foo -r role1,role2,role3,role4,foo +starting migration of users and roles... +importing users from [/home/es/config/shield/users]... +found existing users: [test_user, joe3, joe2] +migrating user [lee] +{"user":{"created":true}} +no user [foo] found, skipping +importing roles from [/home/es/config/shield/roles.yml]... +found existing roles: [marvel_user, role_query_fields, admin_role, role3, admin, +remote_marvel_agent, power_user, role_new_format_name_array, role_run_as, +logstash, role_fields, role_run_as1, role_new_format, kibana4_server, user, +transport_client, role1.ab, role_query] +migrating role [role1] +{"role":{"created":true}} +migrating role [role2] +{"role":{"created":true}} +role [role3] already exists, skipping +no role [foo] found, skipping +users and roles imported. +---------------------------------------------------------------------- + +[[migrate-tool-options]] +The `native` subcommand supports the following options: + +`-U`, `--url`:: +Endpoint URL of the Elasticsearch cluster to which you want to migrate the +file-based users and roles. Required. + +`-u`, `--username`:: +Username to use for authentication. + +`-p`, `--password`:: +Password to use for authentication. + +`-n`, `--users`:: +Comma-separated list of the users you want to migrate. If not specified, all +users are migrated. + +`-r`, `--roles`:: +Comma-separated list of the roles you want to migrate. If not specified, all +roles are migrated. + +Additionally, the `-E` flag can be used to specify additional settings. For example +to specify a different configuration directory, the command would look like: + +[source, sh] +---------------------------------------------------------------------- +$ bin/x-pack/migrate native -U http://localhost:9200 -u elastic -p changeme -Epath.conf=/etc/elasticsearch +---------------------------------------------------------------------- diff --git a/docs/en/security/authentication/native-realm.asciidoc b/docs/en/security/authentication/native-realm.asciidoc new file mode 100644 index 00000000000..7677184f31b --- /dev/null +++ b/docs/en/security/authentication/native-realm.asciidoc @@ -0,0 +1,196 @@ +[[native-realm]] +=== Native User Authentication + +The easiest way to manage and authenticate users is with the internal `native` +realm. You can use the REST APIs or Kibana to add and remove users, assign user roles, and +manage user passwords. + +[[native-realm-configuration]] +[float] +==== Configuring a Native Realm + +The native realm is added to the realm chain by default. You don't need to +explicitly configure a native realm to manage users through the REST APIs. + + +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. To use the +`native` realm as a fallback, you must include it in the realm chain. + +You can, however, configure options for the `native` realm in the +`xpack.security.authc.realms` namespace in `elasticsearch.yml`. Explicitly +configuring a native realm enables you to set the order in which it appears in +the realm chain, temporary disable the realm, and control its cache options. + +To configure a native realm: + +. Add a realm configuration of type `native` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm +`type` to `native`. If you are configuring multiple realms, you should also +explicitly set the `order` attribute. See <> +for all of the options you can set for the `native` realm. ++ +For example, the following snippet shows a `native` realm configuration that +sets the `order` to zero so the realm is checked first: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + native1: + type: native + order: 0 +------------------------------------------------------------ + +. Restart Elasticsearch. + +[[native-settings]] +.Native Realm Settings +[cols="4,^3,10"] +|======================= +| Setting | Required | Description + +| `type` | yes | Indicates the realm type. Must be set to `native`. + +| `order` | no | Indicates the priority of this realm within + the realm chain. Realms with a lower order + are consulted first. Although not required, + we recommend explicitly setting this value + when you configure multiple realms. Defaults + to `Integer.MAX_VALUE`. + +| `enabled` | no | Indicates whether this realm is enabled or + disabled. When set to `false`, the realm is + not added to the realm chain and therefore + is inactive. Defaults to `true`. + +| `cache.ttl` | no | Specifies the time-to-live for cached user + entries. A user's credentials are cached for + this period of time. Specify the time period + using the standard Elasticsearch + {ref}/common-options.html#time-units[time units]. + Defaults to `20m`. + +| `cache.max_users` | no | Specifies the maximum number of user entries + that can be cached at any given time. Defaults + to 100,000. + +| `cache.hash_algo` | no | Specifies the hashing algorithm that is used + for the cached user credentials. See + <> + for the possible values. (Expert Setting) +|======================= + + +[[managing-native-users]] +==== Managing Native Users + +You manage users in the `native` realm through the <>. + +[[migrating-from-file]] +NOTE: To migrate file-based users to the `native` realm, use the +<> tool. + +[float] +[[native-add]] +===== Adding Users + +To add a user, submit a PUT or POST request to the `/_xpack/security/user/` +endpoint. A username must be at least 1 character long and no longer than 30 +characters. The first character must be a letter (`a-z` or `A-Z`) or an +underscore (`_`). Subsequent characters can be letters, underscores (`_`), +digits (`0-9`), or any of the following symbols `@`, `-`, `.` or `$`. + +[source,js] +-------------------------------------------------- +POST /_xpack/security/user/jacknich +{ + "password" : "j@rV1s", <1> + "roles" : [ "admin", "other_role1" ], <2> + "full_name" : "Jack Nicholson", <3> + "email" : "jacknich@example.com", <4> + "metadata" : { <5> + "intelligence" : 7 + }, + "enabled": true <6> +} +-------------------------------------------------- +// CONSOLE +<1> You must specify a password when adding a user. Passwords must be at least 6 + characters long. +<2> You must assign at least one role to the user. The roles determine the user's + access permissions. +<3> The user's full name. Optional. +<4> The user's email address. Optional. +<5> Arbitrary metadata you want to associate with the user. Optional. +<6> Specifies whether the user should be enabled. Optional with a default of true. + + +[float] +[[native-list]] +===== Retrieving Users + +To retrieve all users, submit a GET request to the `/_xpack/security/user` endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/user +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +To retrieve particular users, specify the users as a comma-separated list: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/user/jacknich,rdeniro +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +An object is returned holding the found users, each keyed by the relevant +username. Note that user passwords are not included. + +[source,js] +-------------------------------------------------- +{ + "jacknich" : { + "username": "jacknich", + "roles" : [ "admin", "other_role1" ], + "full_name" : "Jack Nicholson", + "email" : "jacknich@example.com", + "enabled" : true, + "metadata" : { + "intelligence" : 7 + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +[float] +[[native-delete]] +===== Deleting Users + +To delete a user, submit a DELETE request to the `/_xpack/security/user/` +endpoint: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/user/jacknich +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the user is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/docs/en/security/authentication/pki-realm.asciidoc b/docs/en/security/authentication/pki-realm.asciidoc new file mode 100644 index 00000000000..4d7f13fb86d --- /dev/null +++ b/docs/en/security/authentication/pki-realm.asciidoc @@ -0,0 +1,132 @@ +[[pki-realm]] +=== PKI User Authentication + +You can configure {security} to use Public Key Infrastructure (PKI) certificates +to authenticate users. This requires clients to present X.509 certificates. To +use PKI, you configure a PKI realm, enable client authentication on the desired +network layers (transport or http), and map the Distinguished Names (DNs) from +the user certificates to {security} roles in the <>. + +You can also use a combination of PKI and username/password authentication. For +example, you can enable SSL/TLS on the transport layer and define a PKI realm to +require transport clients to authenticate with X.509 certificates, while still +authenticating HTTP traffic using username and password credentials. You can also set +`xpack.security.transport.ssl.client_authentication` to `optional` to allow clients without +certificates to authenticate with other credentials. + +IMPORTANT: You must enable SSL/TLS and enabled client authentication to use PKI. + For more information, see <>. + +==== PKI Realm Configuration + +Like other realms, you configure options for a `pki` realm under the +`xpack.security.authc.realms` namespace in `elasticsearch.yml`. + +To configure `pki` realm: + +. Add a realm configuration of type `pki` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to +`pki`. If you are configuring multiple realms, you should also explicitly set +the `order` attribute. See <> for all of the options you can set +for a `pki` realm. ++ +For example, the following snippet shows the most basic `pki` realm configuration: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + pki1: + type: pki +------------------------------------------------------------ ++ +With this configuration, any certificate trusted by the SSL/TLS layer is accepted +for authentication. The username is the common name (CN) extracted from the DN +of the certificate. ++ +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. ++ +If you want to use something other than the CN of the DN as the username, you +can specify a regex to extract the desired username. For example, the regex in +the following configuration extracts the email address from the DN: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + pki1: + type: pki + username_pattern: "EMAILADDRESS=(.*?)(?:,|$)" +------------------------------------------------------------ ++ +You can also specify which truststore to use for authentication. This is useful +when the SSL/TLS layer trusts clients with certificates that are signed by a +different CA than the one that signs your users' certificates. To specify the +location of the truststore, specify the `truststore.path` option: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + pki1: + type: pki + truststore: + path: "/path/to/pki_truststore.jks" + password: "changeme" +------------------------------------------------------------ + +. Restart Elasticsearch. + +[[pki-settings]] +===== PKI Realm Settings + +[cols="4,^3,10"] +|======================= +| Setting | Required | Description +| `type` | yes | Indicates the realm type. Must be set to `pki`. +| `order` | no | Indicates the priority of this realm within the realm + chain. Realms with a lower order are consulted first. + Although not required, we recommend explicitly + setting this value when you configure multiple realms. + Defaults to `Integer.MAX_VALUE`. +| `enabled` | no | Indicates whether this realm is enabled or disabled. + Enables you to disable a realm without removing its + configuration. Defaults to `true`. +| `username_pattern` | no | Specifies the regular expression pattern used to extract + the username from the certificate DN. The first match + group is used as the username. Defaults to `CN=(.*?)(?:,\|$)`. +| `truststore.path` | no | The path to the truststore. Defaults to the path + defined by <>. +| `truststore.password` | no/yes | Specifies the password for the truststore. Must be + provided if `truststore.path` is set. +| `truststore.algorithm` | no | Specifies the algorithm used for the truststore. + Defaults to `SunX509`. +| `files.role_mapping` | no | Specifies the <> + for the <>. + Defaults to `CONFIG_DIR/x-pack/role_mapping.yml`. +|======================= + +[[assigning-roles-pki]] +==== Mapping Roles for PKI Users + +You map roles for PKI users in the role mapping file stored on each node. You +identify a user by the distinguished name in their certificate. For example, the +following mapping configuration maps `John Doe` to the `user` role: + +[source, yaml] +------------------------------------------------------------ +user: <1> + - "cn=John Doe,ou=example,o=com" <2> +------------------------------------------------------------ +<1> The name of a role. +<2> The distinguished name (DN) of a PKI user. + +For more information, see <>. diff --git a/docs/en/security/authentication/user-cache.asciidoc b/docs/en/security/authentication/user-cache.asciidoc new file mode 100644 index 00000000000..8e7b43fec21 --- /dev/null +++ b/docs/en/security/authentication/user-cache.asciidoc @@ -0,0 +1,60 @@ +[[controlling-user-cache]] +=== Controlling the User Cache + +User credentials are cached in memory on each node to avoid connecting to a +remote authentication service or hitting the disk for every incoming request. +You can configure characteristics of the user cache with the `cache.ttl`, +`cache.max_users`, and `cache.hash_algo` realm settings. + +NOTE: PKI realms do not use the user cache. + +The cached user credentials are hashed in memory. By default, {security} uses a +salted `sha-256` hash algorithm. You can use a different hashing algorithm by +setting the `cache_hash_algo` setting to any of the following: + +[[cache-hash-algo]] +.Cache hash algorithms +|======================= +| Algorithm | | | Description +| `ssha256` | | | Uses a salted `sha-256` algorithm (default). +| `md5` | | | Uses `MD5` algorithm. +| `sha1` | | | Uses `SHA1` algorithm. +| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 10 rounds. +| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 4 rounds. +| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 5 rounds. +| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 6 rounds. +| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 7 rounds. +| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 8 rounds. +| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 9 rounds. +| `noop`,`clear_text` | | | Doesn't hash the credentials and keeps it in clear text in + memory. CAUTION: keeping clear text is considered insecure + and can be compromised at the OS level (for example through + memory dumps and using `ptrace`). +|======================= + +[[cache-eviction-api]] +==== Evicting Users from the Cache + +{security} exposes a <> you can use +to force the eviction of cached users. For example, the following request evicts +all users from the `ad1` realm: + +[source, js] +------------------------------------------------------------ +$ curl -XPOST 'http://localhost:9200/_xpack/security/realm/ad1/_clear_cache' +------------------------------------------------------------ + +To clear the cache for multiple realms, specify the realms as a comma-separated +list: + +[source, js] +------------------------------------------------------------ +$ curl -XPOST 'http://localhost:9200/_xpack/security/realm/ad1,ad2/_clear_cache' +------------------------------------------------------------ + +You can also evict specific users: + +[source, java] +------------------------------------------------------------ +$ curl -XPOST 'http://localhost:9200/_xpack/security/realm/ad1/_clear_cache?usernames=rdeniro,alpacino' +------------------------------------------------------------ diff --git a/docs/en/security/authorization.asciidoc b/docs/en/security/authorization.asciidoc new file mode 100644 index 00000000000..58239da64af --- /dev/null +++ b/docs/en/security/authorization.asciidoc @@ -0,0 +1,394 @@ +[[authorization]] +== Configuring Role-based Access Control + +{security} introduces the concept of _authorization_ to Elasticsearch. +Authorization is the process of determining whether the user behind an incoming +request is allowed to execute it. This process takes place once a request is +successfully authenticated and the user behind the request is identified. + +[[roles]] +[float] +=== Roles, Permissions and Privileges + +The authorization process revolves around the following 5 constructs: + +_Secured Resource_:: +A resource to which access is restricted. Indices/aliases, documents, fields, +users and the Elasticsearch cluster itself are all examples of secured objects. + +_Privilege_:: +A named group representing one or more actions that a user may execute against a +secured resource. Each secured resource has its own sets of available privileges. +For example, `read` is an index privilege that represents all actions that enable +reading the indexed/stored data. For a complete list of available privileges +see <>. + +_Permissions_:: +A set of one or more privileges against a secured resource. Permissions can +easily be described in words, here are few examples: + * `read` privilege on the `products` index + * `manage` privilege on the cluster + * `run_as` privilege on `john` user + * `read` privilege on documents that match query X + * `read` privilege on `credit_card` field + +_Role_:: +A named sets of permissions + +_User_:: +The authenticated user. + +A secure Elasticsearch cluster manages the privileges of users through _roles_. +A role has a unique name and identifies a set of permissions that translate to +privileges on resources. A user can be associated with an arbitrary number of +roles. The total set of permissions that a user has is therefore defined by +union of the permissions in all its roles. + +As an administrator, you will need to define the roles that you want to use, +then assign users to the roles. These can be assigned to users in a number of +ways depending on the realms by which the users are authenticated. + +[[built-in-roles]] +=== Built-in Roles + +{security} applies a default role to all users, including <>. The default role enables users to access the authenticate +endpoint, change their own passwords, and get information about themselves. + +{security} also provides a set of built-in roles you can explicitly assign +to users. These roles have a fixed set of privileges and cannot be updated. + +[[built-in-roles-superuser]] +`superuser`:: +Grants full access to the cluster, including all indices and data. A user with +the `superuser` role can also manage users and roles and <> any other user in the system. Due to the permissive nature of +this role, take extra care when assigning it to a user. + +[[built-in-roles-transport-client]] +`transport_client`:: +Grants the privileges required to access the cluster through the Java Transport Client. The Java Transport Client fetches information about the nodes in the +cluster using the _Node Liveness API_ and the _Cluster State API_ (when +sniffing is enabled). Assign your users this role if they use the +Transport Client. ++ +NOTE: Using the Transport Client effectively means the users are granted access +to the cluster state. This means users can view the metadata over all indices, +index templates, mappings, node and basically everything about the cluster. +However, this role does not grant permission to view the data in all indices. + +[[built-in-roles-kibana-user]] +`kibana_user` :: +Grants the minimum privileges required for any user of Kibana. This role grants +access to the Kibana indices and grants monitoring privileges for the cluster. + +[[built-in-roles-monitoring-user]] +`monitoring_user` :: +Grants the minimum privileges required for any user of Monitoring other than those +required to use Kibana. This role grants access to the monitoring indices. Monitoring +users should also be assigned the `kibana_user` role. + +[[built-in-roles-reporting-user]] +`reporting_user` :: +Grants the specific privileges required for users of Reporting other than those +required to use Kibana. This role grants access to the reporting indices. Reporting +users should also be assigned the `kibana_user` role and a role that grants them +access to the data that will be used to generate reports with. + +[[built-in-roles-remote-monitoring-agent]] +`remote_monitoring_agent` :: +Grants the minimum privileges required for a remote monitoring agent to write data +into this cluster. + +[[built-in-roles-ingest-user]] +`ingest_admin` :: +Grants access to manage *all* index templates and *all* ingest pipeline configurations. ++ +NOTE: This role does *not* provide the ability to create indices; those privileges +must be defined in a separate role. + +[[built-in-roles-kibana-system]] +`kibana_system` :: +Grants access necessary for the Kibana system user to read from and write to the Kibana indices +and check the availability of the Elasticsearch cluster. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. + +[[built-in-roles-logstash-system]] +`logstash_system` :: +Grants access necessary for the Logstash system user to send data to Elasticsearch. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. + +[[defining-roles]] +=== Defining Roles + +A role is defined by the following JSON structure: + +[source,js] +----- +{ + "name": "...", <1> + "run_as": [ ... ] <2> + "cluster": [ ... ], <3> + "indices": [ ... ] <4> +} +----- +<1> The role name, also used as the role ID. +<2> A list of usernames the owners of this role can <>. +<3> A list of cluster privileges. These privileges define the + cluster level actions users with this role are able to execute. This field + is optional (missing `cluster` privileges effectively mean no cluster level + permissions). +<4> A list of indices permissions entries. This field is optional (missing `indices` + privileges effectively mean no index level permissions). + +[[valid-role-name]] +NOTE: A valid role name must be at least 1 character and no longer than 30 + characters. It must begin with a letter (`a-z`) or an underscore (`_`). + Subsequent characters can be letters, underscores (`_`), digits (`0-9`) or + any of the following symbols `@`, `-`, `.` or `$` + +The following describes the structure of an indices permissions entry: + +[source,js] +------- +{ + "names": [ ... ], <1> + "privileges": [ ... ], <2> + "field_security" : { ... }, <3> + "query": "..." <4> +} +------- +<1> A list of indices (or index name patterns) to which the permissions in this + entry apply. +<2> The index level privileges the owners of the role have on the associated + indices (those indices that are specified in the `name` field) +<3> Specification for document fields the owners of the role have read access to. + See <> for details. +<4> A search query that defines the documents the owners of the role have read + access to. A document within the associated indices must match this query + in order for it to be accessible by the owners of the role. + +[TIP] +============================================================================== +When specifying index names, you can use indices and aliases with their full +names or regular expressions that refer to multiple indices. + +* Wildcard (default) - simple wildcard matching where `*` is a placeholder + for zero or more characters, `?` is a placeholder for a single character + and `\` may be used as an escape character. + +* Regular Expressions - A more powerful syntax for matching more complex + patterns. This regular expression is based on Lucene's regexp automaton + syntax. To enable this syntax, it must be wrapped within a pair of + forward slashes (`/`). Any pattern starting with `/` and not ending with + `/` is considered to be malformed. + +.Example Regular Expressions +[source,yaml] +------------------------------------------------------------------------------ +"foo-bar": # match the literal `foo-bar` +"foo-*": # match anything beginning with "foo-" +"logstash-201?-*": # ? matches any one character +"/.*-201[0-9]-.*/": # use a regex to match anything containing 2010-2019 +"/foo": # syntax error - missing final / +------------------------------------------------------------------------------ +============================================================================== + +The following snippet shows an example definition of a `clicks_admin` role: + +[source,js] +----------- +{ + "run_as": [ "clicks_watcher_1" ] + "cluster": [ "monitor" ], + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "field_security" : { + "grant" : [ "category", "@timestamp", "message" ] + }, + "query": "{\"match\": {\"category\": \"click\"}}" + } + ] +} +----------- + +Based on the above definition, users owning the `clicks_admin` role can: + + * Impersonate the `clicks_watcher_1` user and execute requests on its behalf. + * Monitor the Elasticsearch cluster + * Read data from all indices prefixed with `events-` + * Within these indices, only read the events of the `click` category + * Within these document, only read the `category`, `@timestamp` and `message` + fields. + +TIP: For a complete list of available <> + +There are two available mechanisms to define roles: using the _Role Management APIs_ +or in local files on the Elasticsearch nodes. + +[float] +[[roles-management-ui]] +=== Role Management UI + +If you are a Kibana user, make sure to <>. +This enables you to easily manage users and roles from within Kibana. To manage roles, +log in to Kibana and go to *Management / Elasticsearch / Roles*. + +[float] +[[roles-management-api]] +=== Role Management API + +The _Role Management APIs_ enable you to add, update, remove and retrieve roles +dynamically. When you use the APIs to manage roles in the `native` realm, the +roles are stored in an internal Elasticsearch index. + +[[roles-api-add]] +==== Adding a Role + +To add a role, submit a PUT or POST request to the `/_xpack/security/role/` +endpoint. + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role/clicks_admin +{ + "run_as": [ "clicks_watcher_1" ], + "cluster": [ "monitor" ], + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "field_security" : { + "grant" : [ "category", "@timestamp", "message" ] + }, + "query": "{\"match\": {\"category\": \"click\"}}" + } + ] +} +-------------------------------------------------- +// CONSOLE +// TESTSETUP + +NOTE: This API can also be used for updating role definitions. + +[[roles-api-list]] +==== List Role + +To retrieve all roles, submit a GET request to the `/_xpack/security/role` endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role +-------------------------------------------------- +// CONSOLE + +To retrieve particular roles, specify the roles as a comma-separated list: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role/clicks_admin +-------------------------------------------------- +// CONSOLE + +Response: + +[source,js] +-------------------------------------------------- +{ + "clicks_admin": { + "run_as": [ "clicks_watcher_1" ], + "cluster": [ "monitor" ], + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "field_security" : { + "grant" : [ "category", "@timestamp", "message" ] + }, + "query": "{\"match\": {\"category\": \"click\"}}" + } + ], + "metadata": { }, + "transient_metadata": { + "enabled": true + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +NOTE: If single role is requested, that role is returned as the response. When + requesting multiple roles, an object is returned holding the found roles, + each keyed by the relevant role name. + +[[roles-api-delete]] +==== Deleting a Role + +To delete a role, submit a DELETE request to the `/_xpack/security/role/` +endpoint: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/role/clicks_admin +-------------------------------------------------- +// CONSOLE + +[float] +[[roles-management-file]] +=== File-based Role Management + +Apart from the _Role Management APIs_ roles can also be defined in local +`roles.yml` file located in `CONFIG_DIR/x-pack`. This is a YAML file where each +role definition is keyed by its name. + +[IMPORTANT] +============================== +If the same role name is used in the `roles.yml` file and through the +_Role Management APIs_, the role found in the file will be used. +============================== + +While the _Role Management APIs_ is the preferred mechanism to define roles, +using the `roles.yml` file becomes useful if you want to define fixed roles that +no one (beside an administrator having physical access to the Elasticsearch nodes) +would be able to change. + +[IMPORTANT] +============================== +The `roles.yml` file is managed locally by the node and is not globally by the +cluster. This means that with a typical multi-node cluster, the exact same +changes need to be applied on each and every node in the cluster. + +A safer approach would be to apply the change on one of the nodes and have the +`roles.yml` distributed/copied to all other nodes in the cluster (either +manually or using a configuration management system such as Puppet or Chef). +============================== + +The following snippet shows an example of the `roles.yml` file configuration: + +[source,yaml] +----------------------------------- +click_admins: + run_as: [ 'clicks_watcher_1' ] + cluster: [ 'monitor' ] + indices: + - names: [ 'events-*' ] + privileges: [ 'read' ] + field_security: + grant: ['category', '@timestamp', 'message' ] + query: '{"match": {"category": "click"}}' +----------------------------------- + +{security} continuously monitors the `roles.yml` file and automatically picks +up and apples any changes to it. + +include::authorization/alias-privileges.asciidoc[] + +include::authorization/mapping-roles.asciidoc[] + +include::authorization/field-and-document-access-control.asciidoc[] + +include::authorization/run-as-privilege.asciidoc[] diff --git a/docs/en/security/authorization/alias-privileges.asciidoc b/docs/en/security/authorization/alias-privileges.asciidoc new file mode 100644 index 00000000000..6916e2ab2ca --- /dev/null +++ b/docs/en/security/authorization/alias-privileges.asciidoc @@ -0,0 +1,101 @@ +[[securing-aliases]] +=== Granting Privileges for Indices & Aliases + +Elasticsearch allows to execute operations against {ref}/indices-aliases.html[index aliases], +which are effectively virtual indices. An alias points to one or more indices, +holds metadata and potentially a filter. {security} treats aliases and indices +the same. Privileges for indices actions are granted on specific indices or +aliases. In order for an indices action to be authorized, the user that executes +it needs to have permissions for that action on all the specific indices or +aliases that the request relates to. + +Let's look at an example. Assuming we have an index called `2015`, an alias that +points to it called `current_year`, and a user with the following role: + +[source,js] +-------------------------------------------------- +{ + "names" : [ "2015" ], + "privileges" : [ "read" ] +} +-------------------------------------------------- +// NOTCONSOLE + +The user attempts to retrieve a document from `current_year`: + +[source,shell] +------------------------------------------------------------------------------- +GET /current_year/event/1 +------------------------------------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT 2015\n{"aliases": {"current_year": {}}}\nPUT 2015\/event\/1\n{}\n/] + +The above request gets rejected, although the user has `read` privilege on the +concrete index that the `current_year` alias points to. The correct permission +would be as follows: + +[source,js] +-------------------------------------------------- +{ + "names" : [ "current_year" ], + "privileges" : [ "read" ] +} +-------------------------------------------------- +// NOTCONSOLE + +[float] +==== Managing aliases + +Unlike creating indices, which requires the `create_index` privilege, adding, +removing and retrieving aliases requires the `manage` permission. Aliases can be +added to an index directly as part of the index creation: + +[source,shell] +------------------------------------------------------------------------------- +PUT /2015 +{ + "aliases" : { + "current_year" : {} + } +} +------------------------------------------------------------------------------- +// CONSOLE + +or via the dedicated aliases api if the index already exists: + +[source,shell] +------------------------------------------------------------------------------- +POST /_aliases +{ + "actions" : [ + { "add" : { "index" : "2015", "alias" : "current_year" } } + ] +} +------------------------------------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT 2015\n/] + +The above requests both require the `manage` privilege on the alias name as well +as the targeted index, as follows: + +[source,js] +-------------------------------------------------- +{ + "names" : [ "20*", "current_year" ], + "privileges" : [ "manage" ] +} +-------------------------------------------------- +// NOTCONSOLE + +The index aliases api also allows also to delete aliases from existing indices. +The privileges required for such a request are the same as above. Both index and +alias need the `manage` permission. + + +[float] +==== Filtered aliases + +Aliases can hold a filter, which allows to select a subset of documents that can +be accessed out of all the documents that the physical index contains. These +filters are not always applied and should not be used in place of +<>. diff --git a/docs/en/security/authorization/field-and-document-access-control.asciidoc b/docs/en/security/authorization/field-and-document-access-control.asciidoc new file mode 100644 index 00000000000..b1c0648a547 --- /dev/null +++ b/docs/en/security/authorization/field-and-document-access-control.asciidoc @@ -0,0 +1,443 @@ +[[field-and-document-access-control]] +=== Setting Up Field and Document Level Security + +You can control access to data within an index by adding field and document level +security permissions to a role. Field level security permissions restrict access +to particular fields within a document. Document level security permissions +restrict access to particular documents within an index. + +NOTE: Document and field level security is currently meant to operate with +read-only privileged accounts. Users with document and field level +security enabled for an index should not perform write operations. + +A role can define both field and document level permissions on a per-index basis. +A role that doesn’t specify field level permissions grants access to ALL fields. +Similarly, a role that doesn't specify document level permissions grants access +to ALL documents in the index. + +[IMPORTANT] +===================================================================== +When assigning users multiple roles, be careful that you don't inadvertently +grant wider access than intended. Each user has a single set of field level and +document level permissions per index. When you assign a user multiple roles, +the permissions are ORed together. This means if you assign one role that +restricts access to particular fields in an index, and another that doesn't +specify any field level access restrictions for that index, the user will have +access to all fields. The same is true for document level permissions. + +For example, let's say `role_a` only grants access to the `address` +field of the documents in `index1`, but doesn't specify any document +restrictions. Conversely, `role_b` limits access to a subset of the documents +in `index1`, but doesn't specify any field restrictions. If you assign a user +both roles, `role_a` gives the user access to all documents and `role_b` gives +the user access to all fields. + +If you need to restrict access to both documents and fields, consider splitting +documents by index instead. +===================================================================== + +[[field-level-security]] +==== Field Level Security + +To enable field level security, you specify the fields that each role can access +as part of the indices permissions in a role definition. This binds field level +security to a well defined set of indices (and potentially a set of +<>). + +The following role definition grants read access only to the `category`, +`@timestamp`, and `message` fields in all the `events-*` indices. + +[source,js] +-------------------------------------------------- +{ + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "field_security" : { + "grant" : [ "category", "@timestamp", "message" ] + } + } + ] +} +-------------------------------------------------- + +To allow access to the `_all` meta field, you must explicitly list it as an +allowed field. Access to the following meta fields is always allowed: `_id`, +`_type`, `_parent`, `_routing`, `_timestamp`, `_ttl`, `_size` and `_index`. If +you specify an empty list of fields, only these meta fields are accessible. + +NOTE: Omitting the fields entry entirely disables field-level security. + +You can also specify field expressions. For example, the following +example grants read access to all fields starting with `event_` prefix: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "event_*" ] + } + } + ] +} +-------------------------------------------------- + +Use the dot notations to refer to nested fields in more complex documents. For +example, assuming the following document: + +[source,js] +-------------------------------------------------- +{ + "customer": { + "handle": "Jim", + "email": "jim@mycompany.com", + "phone": "555-555-5555" + } +} +-------------------------------------------------- + +The following role definition only allows access to the customer `handle` field: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "customer.handle" ] + } + } + ] +} +-------------------------------------------------- + +This is where wildcard support shines. For example, use `customer.*` to only +enable read access to the `customer` data: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "customer.*" ] + } + } + ] +} +-------------------------------------------------- + +Similar to granting field permissions the permission to access fields can be denied with the following syntax: + + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "*"], + "except": [ "customer.handle" ] + } + } + ] +} +-------------------------------------------------- + + +The following rules apply: + +Absence of "field_security" in a role is equivalent to * access. +Denied fields may only be provided if permission has been granted explicitly to other fields. The exceptions given must be a subset of the +fields that permissions have been granted to. +Denied and granted fields defined implies access to all granted fields except those which match the pattern in denied fields. Example: + + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "except": [ "customer.handle" ], + "grant" : [ "customer.*" ] + } + } + ] +} +-------------------------------------------------- + +In the above example all fields with the prefix "customer." are allowed except for "customer.handle". + +An empty array for grant (eg. "grant" : []) means that no fields are granted access to. + +===== Field Level Security and Roles + +When a user has several roles that specify field level permissions then the resulting field level permissions per index are the union +of the individual role permissions. +For example if these two roles are merged: + +[source,js] +-------------------------------------------------- +{ + // role 1 + ... + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant": [ "a.*" ], + "except" : [ "a.b*" ] + } + } + ] +} + +{ + // role 2 + ... + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant": [ "a.b*" ], + "except" : [ "a.b.c*" ] + } + } + ] +} +-------------------------------------------------- + +Then the resulting permission would be equal to: + +[source,js] +-------------------------------------------------- +{ + // role 1 + role 2 + ... + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant": [ "a.*" ], + "except" : [ "a.b.c*" ] + } + } + ] +} +-------------------------------------------------- + + +[[document-level-security]] +==== Document Level Security + +Document level security restricts the documents that users have read access to. +To enable document level security, you specify a query that matches all the +accessible documents as part of the indices permissions within a role definition. +This binds document level security to a well defined set of indices. + +Enabling document level security restricts which documents can be accessed from any document based read API. +To enable document level security, you use a query to specify the documents that each role can access in the `roles.yml` file. +You specify the document query with the `query` option. The document query is associated with a particular index or index pattern and +operates in conjunction with the privileges specified for the indices. + +The following role definition grants read access only to documents that +belong to the `click` category within all the `events-*` indices. + +[source,js] +-------------------------------------------------- +{ + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "query": "{\"match\": {\"category\": \"click\"}}" + } + ] +} +-------------------------------------------------- + +NOTE: Omitting the `query` entry entirely disables document level security for + the respective indices permission entry. + +The specified `query` expects the same format as if it was defined in the +search request and supports ELasticsearch's full {ref}/query-dsl.html[Query DSL]. + +For example, the following role grants read access to all indices, but restricts +access to documents whose `department_id` equals `12`. + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "query" : { + "term" : { "department_id" : 12 } + } + } + ] +} +-------------------------------------------------- + +NOTE: `query` also accepts queries written as string values + +[[templating-role-query]] +===== Templating a Role Query + +You can use Mustache templates in a role query to insert the username of the +current authenticated user into the role. Like other places in Elasticsearch +that support templating or scripting, you can specify inline, stored, +or file based templates and define custom parameters. You access the current +authenticated user's details through the `_user` parameter. + +For example, the following role query uses a template to insert the username +of the current authenticated user: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "my_index" ], + "privileges" : [ "read" ], + "query" : { + "template" : { + "inline" : { + "term" : { "acl.username" : "{{_user.username}}" } + } + } + } + } + ] +} +-------------------------------------------------- + +You can access the following information through the `_user` variable: + +[options="header"] +|====== +| Property | Description +| `_user.username` | The username of the current authenticated user. +| `_user.full_name` | If specified, the full name of the current authenticated user. +| `_user.email` | If specified, the email of the current authenticated user. +| `_user.roles` | If associated, a list of the role names of the current authenticated user. +| `_user.metadata` | If specified, a hash holding custom metadata of the current authenticated user. +|====== + +You can also access custom user metadata. For example, if you maintain a +`group_id` in your user metadata, you can apply document level security +based on the `group.id` field in your documents: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "my_index" ], + "privileges" : [ "read" ], + "query" : { + "template" : { + "inline" : { + "term" : { "group.id" : "{{_user.metadata.group_id}}" } + } + } + } + } + ] +} +-------------------------------------------------- + +[[set-security-user-processor]] +===== Set Security User Ingest Processor + +If an index is being shared by many small users it makes sense put all these users into the same index as having a +dedicated index or shard per user is too wasteful. In order to guarantee that a user only read its own documents it +makes sense to set up document level security. In order to use document level security for this each document must have +the username or role name associated with it, so that it can be queried by the document level security's role query. +This is where the `set_security_user` ingest processor can help. + +NOTE: You need to make sure to use unique ids for each user that uses the same index, because document level security + doesn't apply on write APIs and you can overwrite other users' documents. This ingest processor just adds + properties of the current authenticated user to the documents being indexed. + + +The `set_security_user` processor attaches user related details (`username`, `roles`, `email`, `full_name` and `metadata` ) +from the current authenticated user to the current document by pre-processed by ingest. + +So when indexing data with an ingest pipeline then user details get automatically attached with the document: + +[source,js] +-------------------------------------------------- +PUT shared-logs/log/1?pipeline=my_pipeline_id +{ + ... +} +-------------------------------------------------- + +Read the {ref}/ingest.html[ingest docs] for more information +about setting up a pipeline and other processors. + +[[set-security-user-options]] +.Set Security User Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to store the user information into. +| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`. +|====== + +Example config that adds all user details of the current authenticated user to the `user` field to all documents being +processed by this pipeline: + +[source,js] +-------------------------------------------------- +{ + "processors" : [ + { + "set_security_user": { + "field": "user" + } + } + ] +} +-------------------------------------------------- + +[[multiple-roles-dls-fls]] +==== Multiple Roles with Document and Field Level Security + +A user can have many roles and each role can define different permissions on the +same index. It is important to understand the behavior of Document and Field Level +security in this scenario. + +Document level security will take into account each role held by the user, and +combine each document level security query for a given index with an "OR". This +means that only one of the role queries must match for a document to be returned. +For example, if a role grants access to an index without document level security +and another grants access with document level security, document level security +will not be applied; the user with both roles will have access to all of the +documents in the index. + +Field level security will take into account each role the user has and combine +all of the fields listed into a single set for each index. For example, if a +role grants access to an index without field level security and another grants +access with field level security, field level security will not be applied for +that index; the user with both roles will have access to all of the fields in +in the index. diff --git a/docs/en/security/authorization/mapping-roles.asciidoc b/docs/en/security/authorization/mapping-roles.asciidoc new file mode 100644 index 00000000000..c4b51e64bc5 --- /dev/null +++ b/docs/en/security/authorization/mapping-roles.asciidoc @@ -0,0 +1,67 @@ +[[mapping-roles]] +=== Mapping Users and Groups to Roles + +If you authenticate users with the `native` or `file` realms, you can manage +role assignment user the <> or the +<> command-line tool respectively. + +For other types of realms, you configure role mappings for users and groups in a +YAML file and copy it to each node in the cluster. Tools like Puppet or Chef can +help with this. + +By default, role mappings are stored in `CONF_DIR/x-pack/role_mapping.yml`, where +`CONF_DIR` is `ES_HOME/config` (zip/tar installations) or `/etc/elasticsearch` +(package installations). To specify a different location, you configure the +`files.role_mapping` realm settings in `elasticsearch.yml`. This setting enable +you to use a different set of mappings for each realm type: + +|===== +| `xpack.security.authc.ldap.files.role_mapping` | | | The location of the role mappings for LDAP realms. +| `xpack.security.authc.active_directory.files.role_mapping` | | | The location of the role mappings for Active Directory realms. +| `xpack.security.authc.pki.files.role_mapping` | | | The location of the role mappings for PKI realms. +|===== + +Within the role mapping file, the security roles are keys and groups and users +are values. The mappings can have a many-to-many relationship. When you map roles +to groups, the roles of a user in that group are the combination of the roles +assigned to that group and the roles assigned to that user. + +[[ad-role-mapping]] +The available roles are either added using the <> +or defined in the <>. To specify users and +groups in the role mappings, you use their _Distinguished Names_ (DNs). A DN is +a string that uniquely identifies the user or group, for example +`"cn=John Doe,cn=contractors,dc=example,dc=com"`. + +NOTE: {security} only supports Active Directory security groups. You cannot map + distribution groups to roles. + +[[ldap-role-mapping]] +For example, the following snippet maps the `admins` group to the `monitoring` +role and maps the `John Doe` user, the `users` group, and the `admins` group to +the `user` role. + +[source, yaml] +------------------------------------------------------------ +monitoring: <1> + - "cn=admins,dc=example,dc=com" <2> +user: + - "cn=John Doe,cn=contractors,dc=example,dc=com" <3> + - "cn=users,dc=example,dc=com" + - "cn=admins,dc=example,dc=com" +------------------------------------------------------------ +<1> The name of a {security} role. +<2> The distinguished name of an LDAP group or an Active Directory security group. +<3> The distinguished name of an LDAP or Active Directory user. + +[[pki-role-mapping]] +PKI realms only support mapping users to roles, as there is no notion of a group +in PKI. For example: + +[source, yaml] +------------------------------------------------------------ +monitoring: + - "cn=Admin,ou=example,o=com" +user: + - "cn=John Doe,ou=example,o=com" +------------------------------------------------------------ diff --git a/docs/en/security/authorization/run-as-privilege.asciidoc b/docs/en/security/authorization/run-as-privilege.asciidoc new file mode 100644 index 00000000000..fb9d159a786 --- /dev/null +++ b/docs/en/security/authorization/run-as-privilege.asciidoc @@ -0,0 +1,31 @@ +[[run-as-privilege]] +=== Submitting Requests on Behalf of Other Users + +{security} supports a permission that enables an authenticated user to submit +requests on behalf of other users. If your application already authenticates +users, you can use the _run as_ mechanism to restrict data access according to +{security} permissions without having to re-authenticate each user through. + +To "run as" (impersonate) another user, you must be able to retrieve the user from +the realm you use to authenticate. Both the internal `native` and `file` realms +support this out of the box. The LDAP realm however must be configured to enable +user search. For more information, see <>. + +To submit requests on behalf of other users, you need to have the `run_as` +permission. For example, the following role grants permission to submit request +on behalf of `jacknich` or `redeniro`: + +[source,js] +--------------------------------------------------- +{ + "run_as" : [ "jacknich", "rdeniro" ] +} +--------------------------------------------------- + +To submit a request as another user, you specify the user in the +`es-security-runas-user` request header. For example: + +[source,shell] +--------------------------------------------------- +curl -H "es-security-runas-user: jacknich" -u es_admin -XGET 'http://localhost:9200/' +--------------------------------------------------- diff --git a/docs/en/security/getting-started.asciidoc b/docs/en/security/getting-started.asciidoc new file mode 100644 index 00000000000..d8d583b49cf --- /dev/null +++ b/docs/en/security/getting-started.asciidoc @@ -0,0 +1,118 @@ +[[security-getting-started]] +== Getting Started with Security + +To secure a cluster, you must install {xpack} on every node in the +cluster. Basic authentication is enabled by default--to communicate +with the cluster, you must specify a username and password. +Unless you <>, all +requests that don't include a user name and password are rejected. + +{security} provides a built-in `elastic` superuser you can use +to start setting things up. This `elastic` user has full access +to the cluster, including all indices and data, so make sure +you change the default password and protect the `elastic` user +credentials accordingly. + +To get started with {security}: + +. <> and start Elasticsearch and Kibana. + +. Change the passwords of the built in `kibana`, `logstash_system` and `elastic` users: ++ +[source,shell] +---------------------------------------------------------- +curl -XPUT -u elastic 'localhost:9200/_xpack/security/user/elastic/_password' -H "Content-Type: application/json" -d '{ + "password" : "elasticpassword" +}' + +curl -XPUT -u elastic 'localhost:9200/_xpack/security/user/kibana/_password' -H "Content-Type: application/json" -d '{ + "password" : "kibanapassword" +}' + +curl -XPUT -u elastic 'localhost:9200/_xpack/security/user/logstash_system/_password' -H "Content-Type: application/json" -d '{ + "password" : "logstashpassword" +}' +---------------------------------------------------------- +// NOTCONSOLE ++ +NOTE: The default password for the `elastic` user is `changeme`. + +. Set up roles and users to control access to Elasticsearch and Kibana. +For example, to grant _John Doe_ full access to all indices that match +the pattern `events*` and enable him to create visualizations and dashboards +for those indices in Kibana, you could create an `events_admin` role and +and assign the role to a new `johndoe` user. ++ +[source,shell] +---------------------------------------------------------- +curl -XPOST -u elastic 'localhost:9200/_xpack/security/role/events_admin' -H "Content-Type: application/json" -d '{ + "indices" : [ + { + "names" : [ "events*" ], + "privileges" : [ "all" ] + }, + { + "names" : [ ".kibana*" ], + "privileges" : [ "manage", "read", "index" ] + } + ] +}' + +curl -XPOST -u elastic 'localhost:9200/_xpack/security/user/johndoe' -H "Content-Type: application/json" -d '{ + "password" : "userpassword", + "full_name" : "John Doe", + "email" : "john.doe@anony.mous", + "roles" : [ "events_admin" ] +}' +---------------------------------------------------------- +// NOTCONSOLE + +[[enable-message-authentication]] +. Enable message authentication to verify that messages are not tampered with or corrupted in transit: +.. Run the `syskeygen` tool from `ES_HOME` without any options: ++ +[source, shell] +---------------- +bin/x-pack/syskeygen +---------------- ++ +This creates a system key file in `CONFIG_DIR/x-pack/system_key`. + +.. Copy the generated system key to the rest of the nodes in the cluster. ++ +IMPORTANT: The system key is a symmetric key, so the same key must be on every + node in the cluster. + + +[[enable-auditing]] +. Enable Auditing to keep track of attempted and successful interactions with + your Elasticsearch cluster: ++ +-- +.. Add the following setting to `elasticsearch.yml` on all nodes in your cluster: ++ +[source,yaml] +---------------------------- +xpack.security.audit.enabled: true +---------------------------- +.. Restart Elasticsearch. + +By default, events are logged to a dedicated `elasticsearch-access.log` file in +`ES_HOME/logs`. You can also store the events in an Elasticsearch index for +easier analysis and control what events are logged. For more information, see +<>. +-- + +[[moving-on]] +IMPORTANT: Once you get these basic security measures in place, we strongly + recommend that you secure communications to and from nodes by + configuring your cluster to use <>. + Nodes that do not have encryption enabled send passwords in plain + text! + +Depending on your security requirements, you might also want to: + +* Integrate with <> or <>, +or <> for authentication. +* Use <> to allow or deny requests from particular +IP addresses or address ranges. diff --git a/docs/en/security/how-security-works.asciidoc b/docs/en/security/how-security-works.asciidoc new file mode 100644 index 00000000000..4576212973f --- /dev/null +++ b/docs/en/security/how-security-works.asciidoc @@ -0,0 +1,145 @@ +[[how-security-works]] +== How Security Works + +An Elasticsearch cluster is typically made out of many moving parts. There are +the Elasticsearch nodes that form the cluster, and often Logstash instances, +Kibana instances, Beats agents an clients, all communicating with the it. +It should not come as a surprise that securing such clusters has many facets and +layers. + +{security} provides the means to secure the Elastic cluster on several levels: + + * User authentication + * Authorization and Role Based Access Control (a.k.a RBAC) + * Node/Client Authentication and Channel Encryption + * Auditing + + +[float] +=== User Authentication + +User authentication is the process of identifying the users behind the requests +that hit the cluster and verifying that indeed they are who they claim to be. The +authentication process is handled by one or more authentication services called +_realms_. {security} provides the following built-in realms: + +|====== +| `native` | | | An internal realm where users are stored in a dedicated + Elasticsearch index. With this realm, users are + authenticated by usernames and passwords. The users + are managed via the <>. + +| `ldap` | | | A realm that uses an external LDAP server to authenticate + the users. With this realm, users are authenticated by + usernames and passwords. + +| `active_directory` | | | A realm that uses an external Active Directory Server + to authenticate the users. With this realm, users + are authenticated by usernames and passwords. + +| `pki` | | | A realm that authenticates users using Public Key + Infrastructure (PKI). This realm works in conjunction + with SSL/TLS and identifies the users through the + Distinguished Name (DN) of the client's X.509 + certificates. + +| `file` | | | An internal realm where users are defined in files + stored on each node in the Elasticsearch cluster. + With this realm, users are authenticated by usernames + and passwords. The users are managed via + <> that are + provided by {xpack} on installation. +|====== + +If none of the built-in realms meets your needs, you can also build your own +custom realm and plug it into {xpack}. + +When {security} is enabled, depending on the realms you've configured, you will +need to attach your user credentials to the requests sent to Elasticsearch. For +example, when using realms that support usernames and passwords you can simply +attach {wikipedia}/Basic_access_authentication[basic auth] header to the requests. + +For more information on user authentication see <> + + +[float] +=== Authorization + +The authorization process takes place once a request is authenticated and the +User behind the request is identified. Authorization is the process of determining +whether the user behind an incoming request is allowed to execute it. Naturally, +this process takes place right after an successful authentication - when the +user identity is known. + +The authorization process revolves around the following 5 constructs: + +_Secured Resource_:: +A resource to which access is restricted. Indices/aliases, documents, fields, +users and the Elasticsearch cluster itself are all examples of secured objects. + +_Privilege_:: +A named group representing one or more actions that a user may execute against a +secured resource. Each secured resource has its own sets of available privileges. +For example, `read` is an index privilege that represents all actions that enable +reading the indexed/stored data. For a complete list of available privileges +see <>. + +_Permissions_:: +A set of one or more privileges against a secured resource. Permissions can +easily be described in words, here are few examples: + * `read` privilege on the `products` index + * `manage` privilege on the cluster + * `run_as` privilege on `john` user + * `read` privilege on documents that match query X + * `read` privilege on `credit_card` field + +_Role_:: +A named sets of permissions + +_User_:: +The authenticated user. + +A secure Elasticsearch cluster manages the privileges of users through _roles_. +A role has a unique name and identifies a set of permissions that translate to +privileges on resources. A user can be associated with an arbitrary number of +roles. The total set of permissions that a user has is therefore defined by +union of the permissions in all its roles. + +Roles can be assigned to users in a number of ways depending on the realms by +which the users are authenticated. + +For more information on user authentication see <> + + +[float] +=== Node/Client Authentication and Channel Encryption + +{security} supports configuring SSL/TLS for securing the communication channels +to, from and within the cluster. This support accounts for: + + * Encryption of data transmitted over the wires + * Certificate based node authentication - preventing unauthorized nodes/clients + from establishing a connection with the cluster. + +For more information, see <>. + +{security} also enables you to <> which can +be seen as a light mechanism for node/client authentication. With IP Filtering +you can restrict the nodes and clients that can connect to the cluster based +on their IP addresses. The IP filters configuration provides whitelisting +and blacklisting of IPs, subnets and DNS domains. + + +[float] +=== Auditing +When dealing with any secure system, it is critical to have a audit trail +mechanism set in place. Audit trails log various activities/events that occur in +the system, enabling you to analyze and back track past events when things go +wrong (e.g. security breach). + +{security} provides such audit trail functionality for all nodes in the cluster. +You can configure the audit level which accounts for the type of events that are +logged. These events include failed authentication attempts, user access denied, +node connection denied, and more. + +For more information on auditing see <>. diff --git a/docs/en/security/index.asciidoc b/docs/en/security/index.asciidoc new file mode 100644 index 00000000000..f7334f9ea9e --- /dev/null +++ b/docs/en/security/index.asciidoc @@ -0,0 +1,118 @@ +[[xpack-security]] += Securing Elasticsearch and Kibana + +:imagesdir: images/security + +[partintro] +-- +{security} enables you to easily secure a cluster. With Security, +you can password-protect your data as well as implement more advanced security +measures such as encrypting communications, role-based access control, +IP filtering, and auditing. This guide describes how to configure the security +features you need, and interact with your secured cluster. + +Security protects Elasticsearch clusters by: + +* <> + with password protection, role-based access control, and IP filtering. +* <> + with message authentication and SSL/TLS encryption. +* <> + so you know who's doing what to your cluster and the data it stores. + +[float] +[[preventing-unauthorized-access]] +=== Preventing Unauthorized Access + +To prevent unauthorized access to your Elasticsearch cluster, you must have a +way to _authenticate_ users. This simply means that you need a way to validate +that a user is who they claim to be. For example, you have to make sure only +the person named _Kelsey Andorra_ can sign in as the user `kandorra`. X-Pack +Security provides a standalone authentication mechanism that enables you to +quickly password-protect your cluster. If you're already using <>, +<>, or <> to manage +users in your organization, {security} is able to integrate with those +systems to perform user authentication. + +In many cases, simply authenticating users isn't enough. You also need a way to +control what data users have access to and what tasks they can perform. {security} +enables you to _authorize_ users by assigning access _privileges_ to _roles_, +and assigning those roles to users. For example, this +<> mechanism (a.k.a RBAC) enables +you to specify that the user `kandorra` can only perform read operations on the +`events` index and can't do anything at all with other indices. + +{security} also supports <>. You can +whitelist and blacklist specific IP addresses or subnets to control network-level +access to a server. + +[float] +[[preserving-data-integrity]] +=== Preserving Data Integrity + +A critical part of security is keeping confidential data confidential. +Elasticsearch has built-in protections against accidental data loss and +corruption. However, there's nothing to stop deliberate tampering or data +interception. {security} preserves the integrity of your data by +<> to and from nodes and +<> to verify that they +have not been tampered with or corrupted in transit during node-to-node +communication. For even greater protection, you can increase the +<> and +<>. + + +[float] +[[maintaining-audit-trail]] +=== Maintaining an Audit Trail + +Keeping a system secure takes vigilance. By using {security} to maintain +an audit trail, you can easily see who is accessing your cluster and what they're +doing. By analyzing access patterns and failed attempts to access your cluster, +you can gain insights into attempted attacks and data breaches. Keeping an +auditable log of the activity in your cluster can also help diagnose operational +issues. + +[float] +=== Where to Go Next + +* <> + steps through how to install and start using Security for basic authentication. + +* <> + provides more information about how Security supports user authentication, + authorization, and encryption. + +* <> + shows you how to interact with an Elasticsearch cluster protected by + X-Pack Security. + +* <> + provides detailed information about the access privileges you can grant to + users, the settings you can configure for Security in `elasticsearch.yml`, + and the files where Security configuration information is stored. + +[float] +=== Have Comments, Questions, or Feedback? + +Head over to our {security-forum}[Security Discussion Forum] +to share your experience, questions, and suggestions. +-- + +include::getting-started.asciidoc[] + +include::how-security-works.asciidoc[] + +include::authentication.asciidoc[] + +include::authorization.asciidoc[] + +include::auditing.asciidoc[] + +include::securing-communications.asciidoc[] + +include::using-ip-filtering.asciidoc[] + +include::tribe-clients-integrations.asciidoc[] + +include::reference.asciidoc[] \ No newline at end of file diff --git a/docs/en/security/limitations.asciidoc b/docs/en/security/limitations.asciidoc new file mode 100644 index 00000000000..4882c187fbc --- /dev/null +++ b/docs/en/security/limitations.asciidoc @@ -0,0 +1,87 @@ +[[security-limitations]] +== Security Limitations + +[float] +=== Plugins + +Elasticsearch's plugin infrastructure is extremely flexible in terms of what can +be extended. While it opens up Elasticsearch to a wide variety of (often custom) +additional functionality, when it comes to security, this high extensibility level +comes at a cost. We have no control over the third-party plugins' code (open +source or not) and therefore we cannot guarantee their compliance with {security}. +For this reason, third-party plugins are not officially supported on clusters +with {security} enabled. + +[float] +=== Changes in Index Wildcard Behavior + +Elasticsearch clusters with {security} enabled apply the `/_all` wildcard, and +all other wildcards, to the indices that the current user has privileges for, not +the set of all indices on the cluster. +While creating or retrieving aliases by providing wildcard expressions for alias names, if there are no existing authorized aliases +that match the wildcard expression provided an IndexNotFoundException is returned. + +[float] +=== Multi Document APIs + +Multi get and multi term vectors API throw IndexNotFoundException when trying to access non existing indices that the user is +not authorized for. By doing that they leak information regarding the fact that the index doesn't exist, while the user is not +authorized to know anything about those indices. + +[float] +=== Filtered Index Aliases + +Aliases containing filters are not a secure way to restrict access to individual +documents, due to the limitations described in <>. +{security} provides a secure way to restrict access to documents through the +<> feature. + +[float] +=== Field and Document Level Security Limitations + +When a user's role enables document or field level security for an index: + +* The user cannot perform write operations: +** The update API isn't supported. +** Update requests included in bulk requests aren't supported. +* The request cache is disabled for search requests. + +When a user's role enables document level security for an index: + +* Document level security isn't applied for APIs that aren't document based. + An example is the field stats API. +* Document level security doesn't affect global index statistics that relevancy + scoring uses. So this means that scores are computed without taking the role + query into account. Note that documents not matching with the role query are + never returned. +* The `has_child` and `has_parent` queries aren't supported as query in the + role definition. The `has_child` and `has_parent` queries can be used in the + search API with document level security enabled. +* Any query that makes remote calls to fetch data to query by isn't supported. + The following queries aren't supported: +** The `terms` query with terms lookup isn't supported. +** The `geo_shape` query with indexed shapes isn't supported. +** The `percolate` query isn't supported. + +[float] +[[alias-limitations]] +=== Index and Field Names Can Be Leaked When Using Aliases + +Calling certain Elasticsearch APIs on an alias can potentially leak information +about indices that the user isn't authorized to access. For example, when you get +the mappings for an alias with the `_mapping` API, the response includes the +index name and mappings for each index that the alias applies to. Similarly, the +response to a `_field_stats` request includes the name of the underlying index, +rather than the alias name. + +Until this limitation is addressed, avoid index and field names that contain +confidential or sensitive information. + +[float] +=== LDAP Realm + +The <> does not currently support the discovery of nested +LDAP Groups. For example, if a user is a member of `group_1` and `group_1` is a +member of `group_2`, only `group_1` will be discovered. However, the +<> *does* support transitive +group membership. diff --git a/docs/en/security/reference.asciidoc b/docs/en/security/reference.asciidoc new file mode 100644 index 00000000000..926d26bc276 --- /dev/null +++ b/docs/en/security/reference.asciidoc @@ -0,0 +1,12 @@ +[[security-reference]] +== Reference +* <> +* <> +* <> +* <> + +include::reference/privileges.asciidoc[] + +// include::reference/settings.asciidoc[] + +include::reference/files.asciidoc[] diff --git a/docs/en/security/reference/files.asciidoc b/docs/en/security/reference/files.asciidoc new file mode 100644 index 00000000000..d1529a07d69 --- /dev/null +++ b/docs/en/security/reference/files.asciidoc @@ -0,0 +1,41 @@ +[[security-files]] +=== Security Files + +The {security} uses the following files: + +* `CONFIG_DIR/x-pack/roles.yml` defines the roles in use on the cluster + (read more <>). + +* `CONFIG_DIR/x-pack/users` defines the users and their hashed passwords for + the <>. + +* `CONFIG_DIR/x-pack/users_roles` defines the user roles assignment for the + the <>. + +* `CONFIG_DIR/x-pack/role_mapping.yml` defines the role assignments for a + Distinguished Name (DN) to a role. This allows for LDAP and Active Directory + groups and users and PKI users to be mapped to roles (read more + <>). + +* `CONFIG_DIR/x-pack/log4j2.properties` contains audit information (read more + <>). + +* `CONFIG_DIR/x-pack/system_key` holds a cluster secret key that's used to + authenticate messages during node to node communication. For more information, + see <>. + +[[security-files-location]] + +IMPORTANT: Any files that {security} uses must be stored in the Elasticsearch + configuration directory. Elasticsearch runs with restricted permissions + and is only permitted to read from the locations configured in the + directory layout for enhanced security. + +Several of these files are in the YAML format. When you edit these files, be +aware that YAML is indentation-level sensitive and indentation errors can lead +to configuration errors. Avoid the tab character to set indentation levels, or +use an editor that automatically expands tabs to spaces. + +Be careful to properly escape YAML constructs such as `:` or leading exclamation +points within quoted strings. Using the `|` or `>` characters to define block +literals instead of escaping the problematic characters can help avoid problems. diff --git a/docs/en/security/reference/privileges.asciidoc b/docs/en/security/reference/privileges.asciidoc new file mode 100644 index 00000000000..a7bd32f5942 --- /dev/null +++ b/docs/en/security/reference/privileges.asciidoc @@ -0,0 +1,93 @@ +[[security-privileges]] +=== Security Privileges + +This section lists the privileges that you can assign to a role. + +[[privileges-list-cluster]] +==== Cluster Privileges + +[horizontal] +`all`:: +All cluster administration operations, like snapshotting, node shutdown/restart, +settings update, rerouting, or managing users and roles. + +`monitor`:: +All cluster read-only operations, like cluster health & state, hot threads, node +info, node & cluster stats, snapshot/restore status, pending cluster tasks. + +`manage`:: +Builds on `monitor` and adds cluster operations that change values in the cluster. +This includes snapshotting,updating settings, and rerouting. This privilege does +not include the ability to manage security. + +`manage_security`:: +All security related operations such as CRUD operations on users and roles and +cache clearing. + +`manage_index_templates`:: +All operations on index templates. + +`manage_pipeline`:: +All operations on ingest pipelines. + +`transport_client`:: +All privileges necessary for a transport client to connect. + +[[privileges-list-indices]] +==== Indices Privileges + +[horizontal] +`all`:: +Any action on an index + +`monitor`:: +All actions that are required for monitoring (recovery, segments info, index stats +& status). + +`manage`:: +All `monitor` privileges plus index administration (aliases, analyze, cache clear, +close, delete, exists, flush, mapping, open, force merge, refresh, settings, +search shards, templates, validate). + +`view_index_metadata`:: +Read-only access to index metadata (aliases, aliases exists, get index, exists, field mappings, +mappings, search shards, type exists, validate, warmers, settings). This +privilege is primarily available for use by <>. + +`read`:: +Read only access to actions (count, explain, get, mget, get indexed scripts, +more like this, multi percolate/search/termvector, percolate, scroll, +clear_scroll, search, tv). Also grants access to the update mapping +action. + +`index`:: +Privilege to index and update documents. Also grants access to the update +mapping action. + +`create`:: +Privilege to index documents. Also grants access to the update mapping +action. + +`delete`:: +Privilege to delete documents. + +`write`:: +Privilege to perform all write operations to documents, which includes the +permission to index, update, and delete documents as well as performing bulk +operations. Also grants access to the update mapping action. + +`delete_index`:: +Privilege to delete an index. + +`create_index`:: +Privilege to create an index. A create index request may contain aliases to be +added to the index once created. In that case the request requires the `manage` +privilege as well, on both the index and the aliases names. + +==== Run As Privilege + +The `run_as` permission enables an authenticated user to submit requests on +behalf of another user. The value can be a user name or a comma-separated list +of user names. (You can also specify users as an array of strings or a YAML +sequence.) For more information, see +<>. diff --git a/docs/en/security/release-notes.asciidoc b/docs/en/security/release-notes.asciidoc new file mode 100644 index 00000000000..73af4b2f802 --- /dev/null +++ b/docs/en/security/release-notes.asciidoc @@ -0,0 +1,329 @@ +[[security-release-notes]] +== Shield Release Notes (Pre-5.0) + +[float] +[[update-roles]] +=== Updated Role Definitions +The default role definitions in the `roles.yml` file may need to be changed to ensure proper interoperation with other +applications such as Monitoring and Kibana. Any role changes are stored in `roles.yml.new` when you upgrade. We recommend copying the following changes to your `roles.yml` file. + +* The `kibana4` role now grants access to the Field Stats API. +* The permission on all the roles are updated to the verbose format to make it easier to enable field level and document level security. The `transport_client` role has been updated to work with Elasticsearch 2.0.0. + The `marvel_user` role has been updated to work with Monitoring 2.0 and a `remote_marvel_agent` role has been added. The `kibana3` and `marvel_agent` roles have been removed. +* `kibana` role added that defines the minimum set of permissions necessary for the Kibana 4 server. +* `kibana4` role updated to work with new features in Kibana 4 RC1 + +[float] +[[security-change-list]] +=== Change List + +[float] +==== 2.4.2 +November 22, 2016 + +.Bug Fixes +* Users with `manage` or `manage_security` cluster privileges can now access the `.security` index if they have the appropriate index +privileges. + +.Breaking Changes +* Shield on tribe nodes now requires `tribe.on_conflict` to prefer one of the clusters. + +[float] +==== 2.4.0 +August 31, 2016 + +.Breaking Changes +* The `monitor` cluster privilege now grants access to the GET `/_license` API + + +[float] +==== 2.3.5 +August 3, 2016 + +.Bug Fixes + +* Fixed a license problem that was preventing tribe nodes from working with +Shield. + +[float] +==== 2.3.4 +July 7, 2016 + +.Bug Fixes +* The `default` transport profile SSL settings now override the `shield.ssl.*` +settings properly. +* Fixed a memory leak that occured when indices were deleted or closed. + +[float] +==== 2.3.3 +May 18, 2016 + +.Bug Fixes +* Fixed the `/_shield/realm/{realms}/_cache/clear` REST endpoint. This endpoint is deprecated and `/_shield/realm/{realms}/_clear_cache` should be used going forward. + +[float] +==== 2.3.2 +April 26, 2016 + +.Bug Fixes +* Date math expressions in index names are now resolved before attempting to authorize access to the indices. +* Fixed an issue where active directory realms did not work unless the url setting was configured. +* Enabled `_cat/indices` to be used when Shield is installed. + +[float] +==== 2.3.1 +April 4, 2016 + +.Bug Fixes +* Fixed an issue that could prevent nodes from joining the cluster. + +[float] +==== 2.3.0 +March 30, 2016 + +.New Features +* <> with support for <>. +* <> have been added. + +.Bug Fixes +* When evaluating permissions for multiple roles that have document level security enabled for the same index, Shield performed an `AND` +on the queries, which is not consistent with how role privileges work in Shield. This has been changed to an `OR` relationship and may +affect the behavior of existing roles; please ensure you are not relying on the `AND` behavior of document level security queries. +* When evaluation permissions for user that has roles with and without document level security (and/or field level security), the roles that +granted unrestricted access were not being applied properly and the user's access was still being restricted. + +.Enhancements +* Added new <> to simplify access control. + +[float] +==== 2.2.1 +March 15, 2016 + +.Bug Fixes +* Enable <> by default. +* Fix issues with <> on certain JDKs that do not support cloning message +authentication codes. +* Built in <> no longer throw an exception if the `Authorization` header does not contain a basic +authentication token. +* Ensure each tribe client node has the same shield configuration as defined in the settings. + +[float] +==== 2.2.0 +February 2, 2016 + +.New Features +* Shield plugin for Kibana: Secures user sessions and enables users to log in and out of Kibana. +For information about installing the Shield plugin, see <>. + +.Bug Fixes +* Update requests (including within bulk requests) are blocked when document +and field level security is enabled + +[float] +==== 2.1.2 +February 2, 2016 + +.Enhancements +* Adds support for Elasticssearch 2.1.2 + +[float] +==== 2.1.1 +December 17, 2015 + +.Bug Fixes +* Disable the request cache when <> is in use for a search request. +* Fix startup failures when using auditing and <>. +* Updated the `kibana4` role to include the Field Stats API. + +[float] +==== 2.1.0 +November 24, 2015 + +.Breaking Changes +* Same as 2.0.1. <> is now disabled by default. Set `shield.dls_fls.enabled` to `true` in `elasticsearch.yml` to enable it. You cannot submit `_bulk` update requests when document and field level security is enabled. + +.Enhancements +* Adds support for Elasticsearch 2.1.0. + +[float] +==== 2.0.2 +December 16, 2015 + +.Bug Fixes +* Disable the request cache when <> is in use for a search request. + +[float] +==== 2.0.1 +November 24, 2015 + +.Breaking Changes +* <> is now disabled by default. Set `shield.dls_fls.enabled` to `true` in `elasticsearch.yml` to enable it. You cannot submit `_bulk` update requests when document and field level security is enabled. + +.Enhancement +* Adds support for Elasticsearch 2.0.1. + +[float] +==== 2.0.0 +October 28, 2015 + +.Breaking Changes +* All files that Shield uses must be kept in the <> due to the enhanced security of Elasticsearch 2.0. +* The network format has been changed from all previous versions of Shield and a full cluster restart is required to upgrade to Shield 2.0. + +.New Features +* <> support has been added and can be +configured per role. +* Support for <> has been added, allowing Shield to integrate with more authentication sources and methods. +* <> has also been added, which allows a user to send a request to Elasticsearch that will be run +with the specified user's permissions. + +.Bug Fixes +* <> now captures requests from nodes using a different system key as tampered requests. +* The <> stores the type of request when available. +* `esusers` and `syskeygen` work when spaces are in the Elasticsearch installation path. +* Fixed a rare issue where authentication fails even when the username and password are correct. + +[float] +==== 1.3.3 + +.Bug Fixes +* Fixed a rare issue where authentication fails even when the username and password are correct. +* The <> stores the type of request when available. + +.Enhancements +* Tampered requests with a bad header are now audited. + +[float] +==== 1.3.2 +August 10, 2015 + +.Bug Fixes +* When using the <> mechanism, connection errors during startup no longer cause the node to stop. +* The <> no longer generates invalid JSON. +* The <> starts properly when forwarding the audit events to a remote cluster and uses +the correct user to index the audit events. + +[float] +==== 1.3.1 +July 21, 2015 + +.Bug Fixes +* Fixes <> serialization to work with Shield 1.2.1 and earlier. +** NOTE: if you are upgrading from Shield 1.3.0 or Shield 1.2.2 a {ref-17}/setup-upgrade.html#restart-upgrade[cluster restart upgrade] +will be necessary. When upgrading from other versions of Shield, follow the normal upgrade procedure. + +[float] +==== 1.3.0 +June 24, 2015 + +.Breaking Changes +* The `sha2` and `apr1` hashing algorithms have been removed as options for the <>. + If your existing Shield installation uses either of these options, remove the setting and use the default `ssha256` + algorithm. +* The `users` file now only supports `bcrypt` password hashing. All existing passwords stored using the `esusers` tool + have been hashed with `bcrypt` and are not affected. + +.New Features +* <>: Adds Public Key Infrastructure (PKI) authentication through the use of X.509 certificates in place of + username and password credentials. +* <>: An index based output has been added for storing audit events in an Elasticsearch index. + +.Enhancements +* TLS 1.2 is now the default protocol. +* Clients that do not support pre-emptive basic authentication can now support both anonymous and authenticated access + by specifying the `shield.authc.anonymous.authz_exception` <> with a value of `false`. +* Reduced logging for common SSL exceptions, such as a client closing the connection during a handshake. + +.Bug Fixes +* The `esusers` and `syskeygen` tools now work correctly with environment variables in the RPM and DEB installation + environment files `/etc/sysconfig/elasticsearch` and `/etc/default/elasticsearch`. +* Default ciphers no longer include `TLS_DHE_RSA_WITH_AES_128_CBC_SHA`. + +[float] +==== 1.2.3 +July 21, 2015 + +.Bug Fixes +* Fixes <> serialization to work with Shield 1.2.1 and earlier. +** NOTE: if you are upgrading from Shield 1.2.2 a {ref-17}/setup-upgrade.html#restart-upgrade[cluster restart upgrade] +will be necessary. When upgrading from other versions of Shield, follow the normal upgrade procedure. + +[float] +==== 1.2.2 +June 24, 2015 + +.Bug Fixes +* The `esusers` tool no longer warns about missing roles that are properly defined in the `roles.yml` file. +* The period character, `.`, is now allowed in usernames and role names. +* The {ref-17}/query-dsl-terms-filter.html#_caching_19[terms filter lookup cache] has been disabled to ensure all requests + are properly authorized. This removes the need to manually disable the terms filter cache. +* For LDAP client connections, only the protocols and ciphers specified in the `shield.ssl.supported_protocols` and + `shield.ssl.ciphers` <> will be used. +* The auditing mechanism now logs authentication failed events when a request contains an invalid authentication token. + +[float] +==== 1.2.1 +April 29, 2015 + +.Bug Fixes +* Several bug fixes including a fix to ensure that {ref}/disk-allocator.html[Disk-based Shard Allocation] +works properly with Shield + +[float] +==== 1.2.0 +March 24, 2015 + +.Enhancements +* Adds support for Elasticsearch 1.5 + +[float] +==== 1.1.1 +April 29, 2015 + +.Bug Fixes +* Several bug fixes including a fix to ensure that {ref}/disk-allocator.html[Disk-based Shard Allocation] +works properly with Shield + +[float] +==== 1.1.0 +March 24, 2015 + +.New Features +* LDAP: +** Add the ability to bind as a specific user for LDAP searches, which removes the need to specify `user_dn_templates`. +This mode of operation also makes use of connection pooling for better performance. Please see <> +for more information. +** User distinguished names (DNs) can now be used for <>. +* Authentication: +** <> is now supported (disabled by default). +* IP Filtering: +** IP Filtering settings can now be <> using the {ref}/cluster-update-settings.html[Cluster Update Settings API]. + +.Enhancements +* Significant memory footprint reduction of internal data structures +* Test if SSL/TLS ciphers are supported and warn if any of the specified ciphers are not supported +* Reduce the amount of logging when a non-encrypted connection is opened and `https` is being used +* Added the <>, which is a role that contains the minimum set of permissions required for the Kibana 4 server. +* In-memory user credential caching hash algorithm defaults now to salted SHA-256 (see <> + +.Bug Fixes +* Filter out sensitive settings from the settings APIs + +[float] +==== 1.0.2 +March 24, 2015 + +.Bug Fixes +* Filter out sensitive settings from the settings APIs +* Significant memory footprint reduction of internal data structures + +[float] +==== 1.0.1 +February 13, 2015 + +.Bug Fixes +* Fixed dependency issues with Elasticsearch 1.4.3 and (Lucene 4.10.3 that comes with it) +* Fixed bug in how user roles were handled. When multiple roles were defined for a user, and one of the + roles only had cluster permissions, not all privileges were properly evaluated. +* Updated `kibana4` permissions to be compatible with Kibana 4 RC1 +* Ensure the mandatory `base_dn` settings is set in the `ldap` realm configuration diff --git a/docs/en/security/securing-communications.asciidoc b/docs/en/security/securing-communications.asciidoc new file mode 100644 index 00000000000..f28221939d2 --- /dev/null +++ b/docs/en/security/securing-communications.asciidoc @@ -0,0 +1,23 @@ +[[encrypting-communications]] +== Encrypting Communications + +Elasticsearch nodes store data that may be confidential. Attacks on the data may +come from the network. These attacks could include sniffing of the data, +manipulation of the data, and attempts to gain access to the server and thus the +files storing the data. Securing your nodes with the procedures below helps to +reduce risk from network-based attacks. + +This section shows how to: + +* Encrypt traffic to, from and within an Elasticsearch cluster using SSL/TLS, +* Require nodes to authenticate as they join the cluster using SSL certificates, and +* Make it more difficult for remote attackers to issue any commands to Elasticsearch. + +The authentication of new nodes helps prevent a rogue node from joining the +cluster and receiving data through replication. + +include::securing-communications/setting-up-ssl.asciidoc[] + +include::securing-communications/enabling-cipher-suites.asciidoc[] + +include::securing-communications/separating-node-client-traffic.asciidoc[] \ No newline at end of file diff --git a/docs/en/security/securing-communications/enabling-cipher-suites.asciidoc b/docs/en/security/securing-communications/enabling-cipher-suites.asciidoc new file mode 100644 index 00000000000..e7feb6d161d --- /dev/null +++ b/docs/en/security/securing-communications/enabling-cipher-suites.asciidoc @@ -0,0 +1,24 @@ +[[ciphers]] +=== Enabling Cipher Suites for Stronger Encryption + +The TLS and SSL protocols use a cipher suite that determines the strength of +encryption used to protect the data. You may want to increase the strength of +encryption used when using a Oracle JVM; the IcedTea OpenJDK ships without these +restrictions in place. This step is not required to successfully use encrypted +communication. + +The _Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy +Files_ enable the use of additional cipher suites for Java in a separate JAR file +that you need to add to your Java installation. You can download this JAR file +from Oracle's http://www.oracle.com/technetwork/java/javase/downloads/index.html[download page]. +The _JCE Unlimited Strength Jurisdiction Policy Files`_ are required for +encryption with key lengths greater than 128 bits, such as 256-bit AES encryption. + +After installation, all cipher suites in the JCE are available for use. To enable +the use of stronger cipher suites with {security}, configure the `cipher_suites` +parameter. See the <> +section of this document for specific parameter information. + +NOTE: The _JCE Unlimited Strength Jurisdiction Policy Files_ must be installed + on all nodes in the cluster to establish an improved level of encryption + strength. diff --git a/docs/en/security/securing-communications/separating-node-client-traffic.asciidoc b/docs/en/security/securing-communications/separating-node-client-traffic.asciidoc new file mode 100644 index 00000000000..e02f99d6108 --- /dev/null +++ b/docs/en/security/securing-communications/separating-node-client-traffic.asciidoc @@ -0,0 +1,78 @@ +[[separating-node-client-traffic]] +=== Separating node-to-node and client traffic + +Elasticsearch has the feature of so called {ref}/modules-transport.html#_tcp_transport_profiles[TCP transport profiles] +that allows it to bind to several ports and addresses. {security} extends on this +functionality to enhance the security of the cluster by enabling the separation +of node-to-node transport traffic from client transport traffic. This is important +if the client transport traffic is not trusted and could potentially be malicious. +To separate the node-to-node traffic from the client traffic, add the following +to `elasticsearch.yml`: + +[source, yaml] +-------------------------------------------------- +transport.profiles.client: <1> + port: 9500-9600 <2> + shield: + type: client <3> +-------------------------------------------------- +<1> `client` is the name of this example profile +<2> The port range that will be used by transport clients to communicate with + this cluster +<3> Categorizes the profile as a `client`. This accounts for additional security + filters by denying request attempts on for internal cluster operations + (e.g shard level actions and ping requests) from this profile. + +If supported by your environment, an internal network can be used for node-to-node +traffic and public network can be used for client traffic by adding the following +to `elasticsearch.yml`: + +[source, yaml] +-------------------------------------------------- +transport.profiles.default.bind_host: 10.0.0.1 <1> +transport.profiles.client.bind_host: 1.1.1.1 <2> +-------------------------------------------------- +<1> The bind address for the network that will be used for node-to-node communication +<2> The bind address for the network used for client communication + +If separate networks are not available, then <> can +be enabled to limit access to the profiles. + +The TCP transport profiles also allow for enabling SSL on a per profile basis. +This is useful if you have a secured network for the node-to-node communication, +but the client is on an unsecured network. To enable SSL on a client profile when +SSL is disabled for node-to-node communication, add the following to +`elasticsearch.yml`: + +[source, yaml] +-------------------------------------------------- +transport.profiles.client.xpack.security.ssl.enabled: true <1> +-------------------------------------------------- +<1> This enables SSL on the client profile. The default value for this setting + is the value of `xpack.security.transport.ssl.enabled`. + +When using SSL for transport, a different set of certificates can also be used +for the client traffic by adding the following to `elasticsearch.yml`: + +[source, yaml] +-------------------------------------------------- +transport.profiles.client.xpack.security.ssl.truststore: + path: /path/to/another/truststore + password: changeme + +transport.profiles.client.xpack.security.ssl.keystore: + path: /path/to/another/keystore + password: changeme +-------------------------------------------------- + +To change the default behavior that requires certificates for transport clients, +set the following value in the `elasticsearch.yml` file: + +[source, yaml] +-------------------------------------------------- +transport.profiles.client.xpack.security.ssl.client_authentication: no +-------------------------------------------------- + +This setting keeps certificate authentication active for node-to-node traffic, +but removes the requirement to distribute a signed certificate to transport +clients. Please see the <> section. diff --git a/docs/en/security/securing-communications/setting-up-ssl.asciidoc b/docs/en/security/securing-communications/setting-up-ssl.asciidoc new file mode 100644 index 00000000000..a46e4d4c5bf --- /dev/null +++ b/docs/en/security/securing-communications/setting-up-ssl.asciidoc @@ -0,0 +1,258 @@ +[[ssl-tls]] +=== Setting Up SSL/TLS on a Cluster + +{security} enables you to encrypt traffic to, from and within your Elasticsearch +cluster. Connections are secured using Transport Layer Security (TLS), which is +commonly referred to as "SSL". + +WARNING: Clusters that do not have encryption enabled send all data in plain text +including passwords. + +To enable encryption, you need to perform the following steps on each node in +the cluster: + +. <>. + +. <> to: +.. Identify itself using its signed certificate. +.. Enable SSL on the transport and HTTP layers. + +. Restart Elasticsearch. + +[[installing-node-certificates]] +==== Node Certificates + +TLS requires X.509 certificates to perform encryption and authentication of the application +that is being communicated with. In order for the communication between nodes to be truly +secure, the certificates must be validated. The recommended approach for validating +certificate authenticity in a Elasticsearch cluster is to trust the certificate authority (CA) +that signed the certificate. By doing this, as nodes are added to your cluster they just need +to use a certificate signed by the same CA and the node is automatically allowed to join the +cluster. Additionally, it is recommended that the certificates contain subject alternative +names (SAN) that correspond to the node's ip address and dns name so that hostname verification +can be performed. + +In order to simplify the process of generating certificates for the Elastic Stack, a command +line tool, `certgen` has been included with {xpack}. This tool takes care of the generating +a CA and signing certificates with the CA. `certgen` can be used interactively or in a silent +mode through the use of an input file. The `certgen` tool also supports generation of certificate +signing requests (CSR), so that a commercial or organization specific CA may be used to sign +the certificates. + +NOTE: If you choose not to use the `certgen`, the certificates that you obtain must allow for both +`clientAuth` and `serverAuth` if the extended key usage extension is present. The certificates +need to be in PEM format. Although not required, it is highly recommended that the certificate contain +the dns name(s) and/or ip address(es) of the node so that hostname verification may be used. + +[[generating-signed-certificates]] +===== Generating Certificates with `certgen` + +The `certgen` tool can be used to generate a CA and signed certificates for your nodes. The tool +can be used interactively: + +[listing] +.... +bin/x-pack/certgen +This tool assists you in the generation of X.509 certificates and certificate +signing requests for use with SSL in the Elastic stack. Depending on the command +line option specified, you may be prompted for the following: + +* The path to the output file + * The output file is a zip file containing the signed certificates and + private keys for each instance. If a Certificate Authority was generated, + the certificate and private key will also be included in the output file. +* Information about each instance + * An instance is any piece of the Elastic Stack that requires a SSL certificate. + Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats + may all require a certificate and private key. + * The minimum required value for each instance is a name. This can simply be the + hostname, which will be used as the Common Name of the certificate. A full + distinguished name may also be used. + * IP addresses and DNS names are optional. Multiple values can be specified as a + comma separated string. If no IP addresses or DNS names are provided, you may + disable hostname verification in your SSL configuration. +* Certificate Authority private key password + * The password may be left empty if desired. + +Let's get started... + +Please enter the desired output file [/home/es/config/x-pack/certificate-bundle.zip]: +Enter instance name: node01 +Enter name for directories and files [node01]: +Enter IP Addresses for instance (comma-separated if more than one) []: 10.10.0.1 +Enter DNS names for instance (comma-separated if more than one) []: node01.mydomain.com,node01 +Would you like to specify another instance? Press 'y' to continue entering instance information: y +Enter instance name: node02 +Enter name for directories and files [node02]: +Enter IP Addresses for instance (comma-separated if more than one) []: 10.10.0.2 +Enter DNS names for instance (comma-separated if more than one) []: node02.mydomain.com +Would you like to specify another instance? Press 'y' to continue entering instance information: +Certificates written to /Users/jmodi/dev/tmp/elasticsearch-5.0.0-alpha5-SNAPSHOT/config/x-pack/certificate-bundle.zip + +This file should be properly secured as it contains the private keys for all +instances and the certificate authority. + +After unzipping the file, there will be a directory for each instance containing +the certificate and private key. Copy the certificate, key, and CA certificate +to the configuration directory of the Elastic product that they will be used for +and follow the SSL configuration instructions in the product guide. + +For client applications, you may only need to copy the CA certificate and +configure the client to trust this certificate. +.... + +The usage of `certgen` above generates a zip file with the CA certificate, private key, two signed certificates and keys +in PEM format for `node01` and `node02`. + +[[generating-csr]] +===== Generating Certificate Signing Requests with `certgen` + +When using a commercial or organization specific CA, the `certgen` tool may be used to generate +certificate signing requests (CSR) for the nodes in your cluster: + +[listing] +.... +bin/x-pack/certgen -csr +This tool assists you in the generation of X.509 certificates and certificate +signing requests for use with SSL in the Elastic stack. Depending on the command +line option specified, you may be prompted for the following: + +* The path to the output file + * The output file is a zip file containing the certificate signing requests + and private keys for each instance. +* Information about each instance + * An instance is any piece of the Elastic Stack that requires a SSL certificate. + Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats + may all require a certificate and private key. + * The minimum required value for each instance is a name. This can simply be the + hostname, which will be used as the Common Name of the certificate. A full + distinguished name may also be used. + * IP addresses and DNS names are optional. Multiple values can be specified as a + comma separated string. If no IP addresses or DNS names are provided, you may + disable hostname verification in your SSL configuration. + +Let's get started... + +Please enter the desired output file [/home/es/config/x-pack/csr-bundle.zip]: +Enter instance name: node01 +Enter name for directories and files [node01]: +Enter IP Addresses for instance (comma-separated if more than one) []: 10.10.0.1 +Enter DNS names for instance (comma-separated if more than one) []: node01.mydomain.com,node01 +Would you like to specify another instance? Press 'y' to continue entering instance information: y +Enter instance name: node02 +Enter name for directories and files [node02]: +Enter IP Addresses for instance (comma-separated if more than one) []: 10.10.0.2 +Enter DNS names for instance (comma-separated if more than one) []: node02.mydomain.com +Would you like to specify another instance? Press 'y' to continue entering instance information: +Certificate signing requests written to /Users/jmodi/dev/tmp/elasticsearch-5.0.0-alpha5-SNAPSHOT/config/x-pack/csr-bundle.zip + +This file should be properly secured as it contains the private keys for all +instances. + +After unzipping the file, there will be a directory for each instance containing +the certificate signing request and the private key. Provide the certificate +signing requests to your certificate authority. Once you have received the +signed certificate, copy the signed certificate, key, and CA certificate to the +configuration directory of the Elastic product that they will be used for and +follow the SSL configuration instructions in the product guide. +.... + +The usage of `certgen` above generates a zip file with two CSRs and private +keys. The CSRs should be provided to the CA in order to obtain the signed +certificates. The signed certificates will need to be in PEM format in order to +be used. + +===== Using `certgen` in Silent Mode + +`certgen` supports a silent mode of operation to enable easier batch operations. In order +to use this mode, a YAML file containing the information about the instances needs to be +created matching the format shown below: + +[source, yaml] +-------------------------------------------------- +instances: + - name: "node1" <1> + ip: <2> + - "192.0.2.1" + dns: <3> + - "node1.mydomain.com" + - name: "node2" + ip: + - "192.0.2.2" + - "198.51.100.1" + - name: "node3" + - name: "node4" + dns: + - "node4.mydomain.com" + - "node4.internal" + - name: "CN=node5,OU=IT,DC=mydomain,DC=com" + filename: "node5" <4> +-------------------------------------------------- +<1> The name of the instance. This can be a simple string value or can be a Distinguished Name (DN). This is the only required field. +<2> An optional array of strings that represent IP Addresses for this instance. Both IPv4 and IPv6 values are allowed. The values will +be added as Subject Alternative Names. +<3> An optional array of strings that represent DNS names for this instance. The values will be added as Subject Alternative Names. +<4> The filename to use for this instance. This name will be the name of the directory in the zip file that this instance's files will +stored in and it will used be used in the naming of the files within the directory. This filename should not have an extension. Note: If +the `name` provided for the instance does not represent a valid filename, then the `filename` field must be present. + +With the YAML file ready, the `certgen` tool can be used to generate certificates or certificate signing requests. Simply pass the file's +path to `certgen` using the `-in` option. For example: + +[source, sh] +-------------------------------------------------- +bin/x-pack/certgen -in instances.yml <1> +-------------------------------------------------- +<1> Generates a CA certificate and private key in addition to certificates and private keys for the instances +contained in the YAML file. The other options to the tool can be specified in addition to the `-in` option. For all of the available +options, run `bin/x-pack/certgen -h`. + +[[enable-ssl]] +==== Enabling SSL in the Node Configuration + +Once you have the signed certificate, private key, and CA certificate you need to +modify the node configuration to enable SSL. + +[[configure-ssl]] +To enable SSL, make the following changes in `elasticsearch.yml`: + +. Specify the location of the node's keystore and the password(s) needed to +access the node's certificate. For example: ++ +-- +[source, yaml] +-------------------------------------------------- +xpack.ssl.key: /home/es/config/x-pack/node01.key <1> +xpack.ssl.certificate: /home/es/config/x-pack/node01.crt <2> +xpack.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] <3> +-------------------------------------------------- +<1> The full path to the node key file. This must be a location within the + Elasticsearch configuration directory. +<2> The full path to the node certificate. This must be a location within the + Elasticsearch configuration directory. +<3> An array of paths to the CA certificates that should be trusted. These paths + must be a location within the Elasticsearch configuration directory. +-- + +. Enable SSL on the transport networking layer to ensure that communication +between nodes is encrypted: ++ +[source, yaml] +-------------------------------------------------- +xpack.security.transport.ssl.enabled: true +-------------------------------------------------- ++ +. Enable SSL on the HTTP layer to ensure that communication between HTTP clients +and the cluster is encrypted: ++ +[source, yaml] +-------------------------------------------------- +xpack.security.http.ssl.enabled: true +-------------------------------------------------- ++ + +. Restart Elasticsearch. + +NOTE: All SSL related node settings that are considered to be highly sensitive + and therefore are not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. diff --git a/docs/en/security/tribe-clients-integrations.asciidoc b/docs/en/security/tribe-clients-integrations.asciidoc new file mode 100644 index 00000000000..a059f4da956 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations.asciidoc @@ -0,0 +1,42 @@ +[[tribe-clients-integrations]] +== Tribe, Clients and Integrations + +When using a {ref}/modules-tribe.html[Tribe Nodes] you need to take extra steps to secure its communication +with the connected clusters + +* <> + +You will need to update the configuration for several clients to work with a +secured cluster: + +* <> +* <> + + +{security} enables you to secure you Elasticsearch cluster. But Elasticsearch +itself is only one product within the Elastic Stack. It is often the case that +other products in the stack are connected to the cluster and therefore need to +be secured as well, or at least communicate with the cluster in a secured way: + +* <> +* <> +* <> +* <> + +include::tribe-clients-integrations/tribe.asciidoc[] + +include::tribe-clients-integrations/java.asciidoc[] + +include::tribe-clients-integrations/http.asciidoc[] + +include::tribe-clients-integrations/hadoop.asciidoc[] + +include::tribe-clients-integrations/logstash.asciidoc[] + +include::tribe-clients-integrations/beats.asciidoc[] + +include::tribe-clients-integrations/kibana.asciidoc[] + +include::tribe-clients-integrations/monitoring.asciidoc[] + +include::tribe-clients-integrations/reporting.asciidoc[] diff --git a/docs/en/security/tribe-clients-integrations/beats.asciidoc b/docs/en/security/tribe-clients-integrations/beats.asciidoc new file mode 100644 index 00000000000..58eaa81aa08 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations/beats.asciidoc @@ -0,0 +1,187 @@ +[[beats]] +=== Beats and Security + +To send data to a secured cluster through the `elasticsearch` output, +a Beat needs to authenticate as a user who can manage index templates, +monitor the cluster, create indices, and read, and write to the indices +it creates. + +If encryption is enabled on the cluster, you also need to enable HTTPS in the +Beat configuration. + +In addition to configuring authentication credentials for the Beat itself, you +need to grant authorized users permission to access the indices it creates. + +[float] +[[beats-basic-auth]] +==== Configuring Authentication Credentials for a Beat + +When sending data to a secured cluster through the `elasticsearch` +output, a Beat must either provide basic authentication credentials +or present a client certificate. + +To configure authentication credentials for a Beat: + +. Create a role that has the `manage_index_templates` and +`monitor` cluster privileges, and `read`, `write`, and `create_index` +privileges for the indices the Beat creates. You can create roles from the +**Management / Roles** UI in Kibana or through the `role` API. +For example, the following request creates a `packetbeat_writer` role: ++ +[source, sh] +--------------------------------------------------------------- +POST _xpack/security/role/packetbeat_writer +{ + "cluster": ["manage_index_templates", "monitor"], + "indices": [ + { + "names": [ "packetbeat-*" ], <1> + "privileges": ["write","create_index"] + } + ] +} +--------------------------------------------------------------- +<1> If you use a custom Packetbeat index pattern, specify that pattern +instead of the default `packetbeat-*` pattern. + +. Assign the writer role to the user the Beat is going to use to +connect to Elasticsearch: + +.. To authenticate as a native user, create a user for the Beat +to use internally and assign it the writer role. You can create +users from the **Management / Users** UI in Kibana or through the +`user` API. For example, the following request creates a +`packetbeat_internal` user that has the `packetbeat_writer` role: ++ +[source, sh] +--------------------------------------------------------------- +POST /_xpack/security/user/packetbeat_internal +{ + "password" : "changeme", + "roles" : [ "packetbeat_writer"], + "full_name" : "Internal Packetbeat User" +} +--------------------------------------------------------------- + +.. To authenticate using PKI authentication, assign the writer role +to the internal Beat user in the <> +configuration file. Specify the user by the distinguished name that +appears in its certificate. ++ +[source, yaml] +--------------------------------------------------------------- +packetbeat_writer: + - "cn=Internal Packetbeat User,ou=example,o=com" +--------------------------------------------------------------- + +. Configure authentication credentials for the `elasticsearch` output +in the Beat configuration file: + +.. To use basic authentication, configure the `username` and `password` +settings. For example, the following Packetbeat output configuration +uses the native `packetbeat_internal` user to connect to Elasticsearch: ++ +[source,js] +-------------------------------------------------- +output.elasticsearch: + hosts: ["localhost:9200"] + index: "packetbeat" + username: "packetbeat_internal" + password: "changeme" +-------------------------------------------------- + +.. To use PKI authentication, configure the `certificate` and +`key` settings: ++ +[source,js] +-------------------------------------------------- +output.elasticsearch: + hosts: ["localhost:9200"] + index: "packetbeat" + ssl.certificate: "/etc/pki/client/cert.pem" <1> + ssl.key: "/etc/pki/client/cert.key" +-------------------------------------------------- +<1> The distinguished name (DN) in the certificate must be mapped to +the writer role in the `role_mapping.yml` configuration file on each +node in the Elasticsearch cluster. + +[float] +[[beats-user-access]] +==== Granting Users Access to Beats Indices + +To enable users to access the indices a Beat creates, grant them `read` and +`view_index_metadata` privileges on the Beat indices: + +. Create a role that has the `read` and `view_index_metadata` +privileges for the Beat indices. You can create roles from the +**Management > Roles** UI in Kibana or through the `role` API. +For example, the following request creates a `packetbeat_reader` +role: ++ +[source, sh] +--------------------------------------------------------------- +POST _xpack/security/role/packetbeat_reader +{ + "indices": [ + { + "names": [ "packetbeat-*" ], <1> + "privileges": ["read","view_index_metadata"] + } + ] +} +--------------------------------------------------------------- +<1> If you use a custom Packetbeat index pattern, specify that pattern +instead of the default `packetbeat-*` pattern. + +. Assign your users the reader role so they can access the Beat indices: + +.. If you're using the `native` realm, you can assign roles with the +**Management > Users** UI in Kibana or through the `user` API. For +example, the following request grants `packetbeat_user` +the `packetbeat_reader` role: ++ +[source, sh] +--------------------------------------------------------------- +POST /_xpack/security/user/packetbeat_user +{ + "password" : "changeme", + "roles" : [ "packetbeat_reader"], + "full_name" : "Packetbeat User" +} +--------------------------------------------------------------- + +.. If you're using the LDAP, Active Directory, or PKI realms, you +assign the roles in the <> configuration +file. For example, the following snippet grants `Packetbeat User` +the `packetbeat_reader` role: ++ +[source, yaml] +--------------------------------------------------------------- +packetbeat_reader: + - "cn=Packetbeat User,dc=example,dc=com" +--------------------------------------------------------------- + +[float] +[[beats-tls]] +===== Configuring Beats to use Encrypted Connections + +If encryption is enabled on the Elasticsearch cluster, you need to +connect to Elasticsearch via HTTPS. If the CA that signed your node certificates +is not in the host system's trusted certificate authorities list, you also need +to add the path to the `.pem` file that contains your CA's certificate to the +Beat configuration. + +To configure a Beat to connect to Elasticsearch via HTTPS, add the `https` protocol +to all host URLs: + +[source,js] +-------------------------------------------------- +output.elasticsearch: + hosts: ["https://localhost:9200"] <1> + index: "packetbeat" + ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] <2> +-------------------------------------------------- +<1> Specify the `https` protocol to connect the Elasticsearch cluster. +<2> Specify the path to the local `.pem` file that contains your Certificate +Authority's certificate. This is generally only needed if you use your +own CA to sign your node certificates. \ No newline at end of file diff --git a/docs/en/security/tribe-clients-integrations/hadoop.asciidoc b/docs/en/security/tribe-clients-integrations/hadoop.asciidoc new file mode 100644 index 00000000000..0613f1ef771 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations/hadoop.asciidoc @@ -0,0 +1,23 @@ +[[hadoop]] +=== ES-Hadoop and Security + +Elasticsearch for Apache Hadoop ("ES-Hadoop") is capable of using HTTP basic and +PKI authentication and/or TLS/SSL when accessing an Elasticsearch cluster. For +full details please refer to the ES-Hadoop documentation, in particular the +`Security` section. + +For authentication purposes, select the user for your ES-Hadoop client (for +maintenance purposes it is best to create a dedicated user). Then, assign that +user to a role with the privileges required by your Hadoop/Spark/Storm job. +Configure ES-Hadoop to use the user name and password through the +`es.net.http.auth.user` and `es.net.http.auth.pass` properties. + +If PKI authentication is enabled, setup the appropriate `keystore` and `truststore` +instead through `es.net.ssl.keystore.location` and `es.net.truststore.location` +(and their respective `.pass` properties to specify the password). + +For secured transport, enable SSL/TLS through the `es.net.ssl` property by +setting it to `true`. Depending on your SSL configuration (keystore, truststore, etc...) +you might need to set other parameters as well - please refer to the +http://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html[ES-Hadoop] documentation, +specifically the `Configuration` and `Security` chapters. diff --git a/docs/en/security/tribe-clients-integrations/http.asciidoc b/docs/en/security/tribe-clients-integrations/http.asciidoc new file mode 100644 index 00000000000..d78c32bc361 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations/http.asciidoc @@ -0,0 +1,62 @@ +[[http-clients]] +=== HTTP/REST Clients and Security + +{security} works with standard HTTP {wikipedia}/Basic_access_authentication[basic authentication] +headers to authenticate users. Since Elasticsearch is stateless, this header must +be sent with every request: + +[source,shell] +-------------------------------------------------- +Authorization: Basic <1> +-------------------------------------------------- +<1> The `` is computed as `base64(USERNAME:PASSWORD)` + +[float] +==== Client examples + +This example uses `curl` without basic auth to create an index: + +[source,shell] +------------------------------------------------------------------------------- +curl -XPUT 'localhost:9200/idx' +------------------------------------------------------------------------------- + +[source,js] +------------------------------------------------------------------------------- +{ + "error": "AuthenticationException[Missing authentication token]", + "status": 401 +} +------------------------------------------------------------------------------- + +Since no user is associated with the request above, an authentication error is +returned. Now we'll use `curl` with basic auth to create an index as the +`rdeniro` user: + +[source,shell] +--------------------------------------------------------- +curl --user rdeniro:taxidriver -XPUT 'localhost:9200/idx' +--------------------------------------------------------- + +[source,js] +--------------------------------------------------------- +{ + "acknowledged": true +} +--------------------------------------------------------- + +[float] +==== Client Libraries over HTTP + +For more information about how to use {security} with the language specific clients +please refer to +https://github.com/elasticsearch/elasticsearch-ruby/tree/master/elasticsearch-transport#authentication[Ruby], +http://elasticsearch-py.readthedocs.org/en/master/#ssl-and-authentication[Python], +https://metacpan.org/pod/Search::Elasticsearch::Cxn::HTTPTiny#CONFIGURATION[Perl], +http://www.elastic.co/guide/en/elasticsearch/client/php-api/current/_security.html[PHP], +http://nest.azurewebsites.net/elasticsearch-net/security.html[.NET], +http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Javascript] + +//// +Groovy - TODO link +//// diff --git a/docs/en/security/tribe-clients-integrations/java.asciidoc b/docs/en/security/tribe-clients-integrations/java.asciidoc new file mode 100644 index 00000000000..3ceebef6657 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations/java.asciidoc @@ -0,0 +1,259 @@ +[[java-clients]] +=== Java Client and Security + +{security} supports the Java http://www.elastic.co/guide/en/elasticsearch/client/java-api/current/transport-client.html[transport client] for Elasticsearch. +The transport client uses the same transport protocol that the cluster nodes use +for inter-node communication. It is very efficient as it does not have to marshall +and unmarshall JSON requests like a typical REST client. + +NOTE: Using the Java Node Client with secured clusters is not recommended or + supported. + +[float] +[[transport-client]] +==== Configuring the Transport Client to work with a Secured Cluster + +To use the transport client with a secured cluster, you need to: + +[[java-transport-client-role]] +. Configure a user with the privileges required to start the transport client. +A default `transport_client` role is built-in to {xpack} that grants the +appropriate cluster permissions for the transport client to work with the secured +cluster. The transport client uses the _Nodes Info API_ to fetch information about +the nodes in the cluster. + +. Add the {xpack} transport JAR file to your CLASSPATH. You can download the {xpack} +distribution and extract the JAR file manually or you can get it from the +https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.jar[Elasticsearch Maven repository]. ++ +As with any dependency, you will also need its transitive dependencies. Refer to the +https://artifacts.elastic.co/maven/org/elasticsearch/client/x-pack-transport/{version}/x-pack-transport-{version}.pom[X-Pack POM file +for your version] when downloading for offline usage. +-- + +If you are using Maven, you need to add the {xpack} JAR file as a dependency in +your project's `pom.xml` file: + +[source,xml] +-------------------------------------------------------------- + + + + + + elasticsearch-releases + https://artifacts.elastic.co/maven + + true + + + false + + + ... + + ... + + + + + org.elasticsearch.client + x-pack-transport + {version} + + ... + + ... + + +-------------------------------------------------------------- + +If you are using Gradle, you need to add the {xpack} JAR file as a dependency in +your `build.gradle` file: + +[source,groovy] +-------------------------------------------------------------- +repositories { + /* ... Any other repositories ... */ + + // Add the Elasticsearch Maven Repository + maven { + url "https://artifacts.elastic.co/maven" + } +} + +dependencies { + compile "org.elasticsearch.client:x-pack-transport:{version}" + + /* ... */ +} +-------------------------------------------------------------- +-- + +. Set up the transport client. At a minimum, you must configure `xpack.security.user` to +include the name and password of your transport client user in your requests. The +following snippet configures the user credentials globally--every request +submitted with this client includes the `transport_client_user` credentials in +its headers. ++ +[source,java] +------------------------------------------------------------------------------------------------- +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "transport_client_user:changeme") + ... + .build()) + .addTransportAddress(new InetSocketTransportAddress("localhost", 9300)) + .addTransportAddress(new InetSocketTransportAddress("localhost", 9301)); +------------------------------------------------------------------------------------------------- ++ +WARNING: If you configure a transport client without SSL, passwords are sent in + clear text. ++ +You can also add an `Authorization` header to each request. If you've configured +global authorization credentials, the `Authorization` header overrides the global +authentication credentials. This is useful when an application has multiple users +who access Elasticsearch using the same client. You can set the global token to +a user that only has the `transport_client` role, and add the `transport_client` +role to the individual users. ++ +For example, the following snippet adds the `Authorization` header to a search +request: ++ +[source,java] +-------------------------------------------------------------------------------------------------- +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.xpack.security.authc.support.SecuredString; +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; + +import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "transport_client_user:changeme") + ... + .build()) + .build() + .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300)) + .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9301)) + +String token = basicAuthHeaderValue("test_user", new SecuredString("changeme".toCharArray())); + +client.filterWithHeader(Collections.singletonMap("Authorization", token)) + .prepareSearch().get(); +-------------------------------------------------------------------------------------------------- + +. Enable SSL to authenticate clients and encrypt communications. To enable SSL, +you need to: + +.. Configure the paths to the client's key and certificate in addition to the certificate authorities. +Client authentication requires every client to have a certification signed by a trusted CA. ++ +NOTE: Client authentication is enabled by default. For information about + disabling client authentication, see <>. ++ +[source,java] +-------------------------------------------------------------------------------------------------- +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "transport_client_user:changeme") + .put("xpack.ssl.key", "/path/to/client.key") + .put("xpack.ssl.certificate", "/path/to/client.crt") + .put("xpack.ssl.certificate_authorities", "/path/to/ca.crt") + ... + .build()); +-------------------------------------------------------------------------------------------------- + ++ +.. Enable the SSL transport by setting `xpack.security.transport.ssl.enabled` to `true` in the +client configuration. ++ +[source,java] +-------------------------------------------------------------------------------------------------- +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "transport_client_user:changeme") + .put("xpack.ssl.key", "/path/to/client.key") + .put("xpack.ssl.certificate", "/path/to/client.crt") + .put("xpack.ssl.certificate_authorities", "/path/to/ca.crt") + .put("xpack.security.transport.ssl.enabled", "true") + ... + .build()) + .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300)) + .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9301)) +-------------------------------------------------------------------------------------------------- + +[float] +[[disabling-client-auth]] +===== Disabling Client Authentication + +If you want to disable client authentication, you can use a client-specific +transport protocol. For more information see <>. + +If you are not using client authentication and sign the Elasticsearch node +certificates with your own CA, you need to provide the path to the CA +certificate in your client configuration. + +[source,java] +------------------------------------------------------------------------------------------------------ +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "test_user:changeme") + .put("xpack.ssl.certificate_authorities", "/path/to/ca.crt") + .put("xpack.security.transport.ssl.enabled", "true") + ... + .build()) + .addTransportAddress(new InetSocketTransportAddress("localhost", 9300)) + .addTransportAddress(new InetSocketTransportAddress("localhost", 9301)); +------------------------------------------------------------------------------------------------------ + +NOTE: If you are using a public CA that is already trusted by the Java runtime, + you do not need to set the `xpack.ssl.certificate_authorities`. + +[float] +[[connecting-anonymously]] +===== Connecting Anonymously + +To enable the transport client to connect anonymously, you must assign the +anonymous user the privileges defined in the <> +role. Anonymous access must also be enabled, of course. For more information, +see <>. + +[float] +[[security-client]] +==== Security Client + +{security} exposes its own API through the `SecurityClient` class. To get a hold +of a `SecurityClient` you'll first need to create the `XPackClient`, which is a +wrapper around the existing Elasticsearch clients (any client class implementing +`org.elasticsearch.client.Client`). + +The following example shows how you can clear {security}'s realm caches using +the `SecurityClient`: + +[source,java] +------------------------------------------------------------------------------------------------------ +Client client = ... // create the transport client + +XPackClient xpackClient = new XPackClient(client); +SecurityClient securityClient = xpackClient.security(); +ClearRealmCacheResponse response = securityClient.authc().prepareClearRealmCache() + .realms("ldap1", "ad1") <1> + .usernames("rdeniro") + .get(); +------------------------------------------------------------------------------------------------------ +<1> Clears the `ldap1` and `ad1` realm caches for the `rdeniro` user. diff --git a/docs/en/security/tribe-clients-integrations/kibana.asciidoc b/docs/en/security/tribe-clients-integrations/kibana.asciidoc new file mode 100644 index 00000000000..35df0153fc4 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations/kibana.asciidoc @@ -0,0 +1,197 @@ + +[[kibana]] +=== Kibana and Security + +[[using-kibana-with-security]] +Kibana users have to log in when {security} is enabled on your cluster. You +configure {security} roles for your Kibana users to control what data those users +can access. You also need to configure credentials for the +Kibana server so the requests it submits to Elasticsearch on the user's +behalf can be authenticated. + +To prevent user passwords from being sent in the clear, you must configure +Kibana to encrypt communications between the browser and the Kibana server. +If are encrypting traffic to and from the nodes in your Elasticsearch cluster, +you must also configure Kibana to connect to Elasticsearch via HTTPS. + +With {security} enabled, if you load a Kibana dashboard that accesses data in an +index that you are not authorized to view, you get an error that indicates the +index does not exist. {security} do not currently provide a way to control which +users can load which dashboards. + +IMPORTANT: Support for tribe nodes in Kibana was added in v5.2. + +To use Kibana with {security}: + +. Configure the password for the built-in `kibana` user. The Kibana server submits +requests as this user to access the cluster monitoring APIs and the `.kibana` index. +The server does _not_ need access to user indices. ++ +By default, the `kibana` user password is set to `changeme`. Change this password +through the reset password API: ++ +[source,shell] +-------------------------------------------------------------------------------- +PUT /_xpack/security/user/kibana/_password +{ + "password" : "s0m3th1ngs3cr3t" +} +-------------------------------------------------------------------------------- +// CONSOLE ++ +Once you change the password, you need to specify it with the `elasticsearch.password` +property in `kibana.yml`: ++ +[source,yaml] +-------------------------------------------------------------------------------- +elasticsearch.password: "s0m3th1ngs3cr3t" +-------------------------------------------------------------------------------- + +[[kibana-roles]] +. Assign the `kibana_user` role to grant Kibana users the privileges they +need to use Kibana. ++ +IMPORTANT: You also need to grant Kibana users access to the +indices that they will be working with in Kibana. ++ +** If you're using the `native` realm, you can assign roles using the +<>. For example, the following +creates a user named `jacknich` and assigns it the `kibana_user` role: ++ +[source,js] +-------------------------------------------------------------------------------- +POST /_xpack/security/user/jacknich +{ + "password" : "t0pS3cr3t", + "roles" : [ "kibana_user" ] +} +-------------------------------------------------------------------------------- +// CONSOLE + +** If you are using an LDAP or Active Directory realm, you can either assign +roles on a per user basis, or assign roles to groups of users. By default, role +mappings are stored in <>. +For example, the following snippet assigns the `kibana_user` role to the +group named `admins` and the user named Jack Nicholson: ++ +[source,yaml] +-------------------------------------------------------------------------------- +kibana_user: + - "cn=admins,dc=example,dc=com" + - "cn=Jack Nicholson,dc=example,dc=com" +-------------------------------------------------------------------------------- + +[[configure-kibana-cert]] +. Configure Kibana to encrypt communications between the browser and the Kibana +server: +.. Generate a server certificate for Kibana. You must either set the certificate's +`subjectAltName` to the hostname, fully-qualified domain name (FQDN), or IP +address of the Kibana server, or set the CN to the Kibana server's hostname +or FQDN. Using the server's IP address as the CN does not work. +.. Set the `server.ssl.key` and `server.ssl.cert` properties in `kibana.yml`: ++ +[source,yaml] +-------------------------------------------------------------------------------- +server.ssl.key: /path/to/your/server.key +server.ssl.cert: /path/to/your/server.crt +-------------------------------------------------------------------------------- ++ +Once you enable SSL encryption between the browser and the Kibana server, +access Kibana via HTTPS. For example, `https://localhost:5601`. ++ +NOTE: You must enable SSL encryption between the browser and the Kibana +server to use Kibana with {security} enabled. If {security} is configured to +encrypt connections to Elasticsearch, you must also <>. + +[[configure-kibana-ssl]] +. If you have enabled SSL encryption in {security}, configure Kibana to connect +to Elasticsearch via HTTPS: + +.. Specify the HTTPS protocol in the `elasticsearch.url` setting in the Kibana +configuration file, `kibana.yml`: ++ +[source,yaml] +-------------------------------------------------------------------------------- +elasticsearch.url: "https://.com:9200" +-------------------------------------------------------------------------------- + +.. If you are using your own CA to sign certificates for Elasticsearch, set the +`elasticsearch.ssl.ca` setting in `kibana.yml` to specify the location of the PEM +file. ++ +[source,yaml] +-------------------------------------------------------------------------------- +elasticsearch.ssl.ca: /path/to/your/cacert.pem +-------------------------------------------------------------------------------- + +. Install {xpack} into Kibana to secure user sessions and enable users +to log in and out of Kibana: + +.. Run the following command in your Kibana installation directory. ++ +[source,console] +-------------------------------------------------------------------------------- +bin/kibana-plugin install x-pack +-------------------------------------------------------------------------------- ++ +[NOTE] +============================================================================= +To perform an offline install, download the {xpack} zip file from +https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{version}.zip[ ++https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{version}.zip+] +(https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{version}.zip.sha1[sha1]) +and run: + +["source","sh",subs="attributes"] +--------------------------------------------------------- +bin/kibana-plugin install file:///path/to/file/x-pack-{version}.zip +--------------------------------------------------------- +============================================================================= + +.. Set the `xpack.security.encryptionKey` property in the `kibana.yml` configuration file. +You can use any text string that is 32 characters or longer as the encryption key. ++ +[source,yaml] +-------------------------------------------------------------------------------- +xpack.security.encryptionKey: "something_at_least_32_characters" +-------------------------------------------------------------------------------- + +.. To change the default session duration, set the `xpack.security.sessionTimeout` property +in the `kibana.yml` configuration file. By default, sessions will stay active until the +browser is closed. The timeout is specified in milliseconds. For example, set the timeout +to 600000 to expire sessions after 10 minutes: ++ +[source,yaml] +-------------------------------------------------------------------------------- +xpack.security.sessionTimeout: 600000 +-------------------------------------------------------------------------------- + +. Restart Kibana and verify that you can log in as a user. If you are running +Kibana locally, go to `https://localhost:5601` and enter the credentials for a +user you've assigned a Kibana user role. For example, you could log in as the +`jacknich` user created above. ++ +image::kibana-login.jpg["Kibana Login",link="images/kibana-login.jpg"] ++ +NOTE: This must be a user who has been assigned the `kibana_user` role. +Kibana server credentials should only be used internally by the +Kibana server. + +[float] +[[security-ui-settings]] +===== Kibana {security} UI Settings +[options="header"] +|====== +| Name | Default | Description +| `xpack.security.encryptionKey` | - | An arbitrary string of 32 characters or more used to encrypt credentials in a + cookie. It is crucial that this key is not exposed to + users of Kibana. Required. +| `xpack.security.sessionTimeout` | `1800000` (30 minutes) | Sets the session duration (in milliseconds). +| `xpack.security.cookieName` | `"sid"` | Sets the name of the cookie used for the session. +| `xpack.security.secureCookies` | `false` | Sets the `secure` flag of the session cookie. Is set + to `true` if `server.ssl.cert` and `server.ssl.key` + are set. Set this to `true` if SSL is configured + outside of Kibana (for example, you are routing + requests through a load balancer or proxy). +|====== diff --git a/docs/en/security/tribe-clients-integrations/logstash.asciidoc b/docs/en/security/tribe-clients-integrations/logstash.asciidoc new file mode 100644 index 00000000000..131c03d8574 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations/logstash.asciidoc @@ -0,0 +1,219 @@ +[[logstash]] +=== Logstash and Security + +The Logstash Elasticsearch plugins ( +{logstash-ref}/plugins-outputs-elasticsearch.html[output], +{logstash-ref}/plugins-inputs-elasticsearch.html[input], +{logstash-ref}/plugins-filters-elasticsearch.html[filter] +and <>) +support authentication and encryption over HTTP. + +To use Logstash with a secured cluster, you need to configure authentication +credentials for Logstash. Logstash throws an exception and the processing +pipeline is halted if authentication fails. + +If encryption is enabled on the cluster, you also need to enable SSL in the +Logstash configuration. + +If you wish to monitor your logstash instance with x-pack monitoring, and store +the monitoring data in a secured elasticsearch cluster, you must configure Logstash +with a username and password for a user with the appropriate permissions. + +In addition to configuring authentication credentials for Logstash, you need +to grant authorized users permission to access the Logstash indices. + +[float] +[[ls-http-auth-basic]] +==== Configuring Logstash to use Basic Authentication + +Logstash needs to be able to manage index templates, create indices, +and write and delete documents in the indices it creates. + +To set up authentication credentials for Logstash: + +. Create a `logstash_writer` role that has the `manage_index_templates` cluster +privilege, and the `write`, `delete`, and `create_index` privileges for the +Logstash indices. You can create roles from the **Management > Roles** UI in +Kibana or through the `role` API: ++ +[source, sh] +--------------------------------------------------------------- +POST _xpack/security/role/logstash_writer +{ + "cluster": ["manage_index_templates", "monitor"], + "indices": [ + { + "names": [ "logstash-*" ], <1> + "privileges": ["write","delete","create_index"] + } + ] +} +--------------------------------------------------------------- + +<1> If you use a custom Logstash index pattern, specify that pattern +instead of the default `logstash-*` pattern. + +. Create a `logstash_internal` user and assign it the `logstash_writer` role. +You can create users from the **Management > Users** UI in Kibana or through +the `user` API: ++ +[source, sh] +--------------------------------------------------------------- +POST _xpack/security/user/logstash_internal +{ + "password" : "changeme", + "roles" : [ "logstash_writer"], + "full_name" : "Internal Logstash User" +} +--------------------------------------------------------------- + +. Configure Logstash to authenticate as the `logstash_internal` user you just +created. You configure credentials separately for each of the Elasticsearch +plugins in your Logstash `.conf` file. For example: ++ +[source,js] +-------------------------------------------------- +input { + ... + user => logstash_internal + password => changeme + } +filter { + ... + user => logstash_internal + password => changeme + } +output { + elasticsearch { + ... + user => logstash_internal + password => changeme + } +-------------------------------------------------- + +[float] +[[ls-user-access]] +==== Granting Users Access to the Logstash Indices + +To access the indices Logstash creates, users need the `read` and +`view_index_metadata` privileges: + +. Create a `logstash_reader` role that has the `read and `view_index_metadata` +privileges for the Logstash indices. You can create roles from the +**Management > Roles** UI in Kibana or through the `role` API: ++ +[source, sh] +--------------------------------------------------------------- +POST _xpack/security/role/logstash_reader +{ + "indices": [ + { + "names": [ "logstash-*" ], <1> + "privileges": ["read","view_index_metadata"] + } + ] +} +--------------------------------------------------------------- + +<1> If you use a custom Logstash index pattern, specify that pattern +instead of the default `logstash-*` pattern. + +. Assign your Logstash users the `logstash_reader` role. You can create +and manage users from the **Management > Users** UI in Kibana or through +the `user` API: ++ +[source, sh] +--------------------------------------------------------------- +POST _xpack/security/user/logstash_user +{ + "password" : "changeme", + "roles" : [ "logstash_reader"], + "full_name" : "Kibana User" +} +--------------------------------------------------------------- + +[float] +[[ls-http-auth-pki]] +===== Configuring the elasticsearch Output to use PKI Authentication + +The `elasticsearch` output supports PKI authentication. To use an X.509 +client-certificate for authentication, you configure the `keystore` and +`keystore_password` options in your Logstash `.conf` file: + +[source,js] +-------------------------------------------------- +output { + elasticsearch { + ... + keystore => /path/to/keystore.jks + keystore_password => realpassword + truststore => /path/to/truststore.jks <1> + truststore_password => realpassword + } +} +-------------------------------------------------- +<1> If you use a separate truststore, the truststore path and password are +also required. + +[float] +[[ls-http-ssl]] +===== Configuring Logstash to use TLS Encryption + +If TLS encryption is enabled on the Elasticsearch cluster, you need to +configure the `ssl` and `cacert` options in your Logstash `.conf` file: + +[source,js] +-------------------------------------------------- +output { + elasticsearch { + ... + ssl => true + cacert => '/path/to/cert.pem' <1> + } +} +-------------------------------------------------- +<1> The path to the local `.pem` file that contains the Certificate + Authority's certificate. + +[float] +[[ls-monitoring-user]] +===== Configuring Logstash Monitoring + +If you wish to ship Logstash <> +data to a secure cluster, Logstash must be configured with a username and password. + +X-Pack security comes preconfigured with a `logstash_system` user for this purpose. +This user has the minimum permissions necessary for the monitoring function, and +_should not_ be used for any other purpose - it is specifically _not intended_ for +use within a Logstash pipeline. + +By default, the `logstash_system` user password is set to `changeme`. +Change this password through the reset password API: + +[source,js] +--------------------------------------------------------------------- +PUT _xpack/security/user/logstash_system/_password +{ + "password": "t0p.s3cr3t" +} +--------------------------------------------------------------------- +// CONSOLE + +Then configure the user and password in your `logstash.yml` configuration file: + +[source,yaml] +---------------------------------------------------------- +xpack.monitoring.elasticsearch.username: logstash_system +xpack.monitoring.elasticsearch.password: t0p.s3cr3t +---------------------------------------------------------- + +If you initially installed an older version of X-Pack, and then upgraded, then +the `logstash_system` user may have defaulted to disabled for security reasons. +You can enable the user with the following API call: + +[source,js] +--------------------------------------------------------------------- +PUT _xpack/security/user/logstash_system/_enable +--------------------------------------------------------------------- +// CONSOLE + diff --git a/docs/en/security/tribe-clients-integrations/monitoring.asciidoc b/docs/en/security/tribe-clients-integrations/monitoring.asciidoc new file mode 100644 index 00000000000..af58010f8f3 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations/monitoring.asciidoc @@ -0,0 +1,181 @@ +[[secure-monitoring]] +=== Monitoring and Security + +<> consists of two components: an agent +that you install on on each Elasticsearch and Logstash node, and a Monitoring UI +in Kibana. The monitoring agent collects and indexes metrics from the nodes +and you visualize the data through the Monitoring dashboards in Kibana. The agent +can index data on the same Elasticsearch cluster, or send it to an external +monitoring cluster. + +To use {monitoring} with {security} enabled, you need to +<> and create at least one user +for the Monitoring UI. If you are using an external monitoring cluster, you also +need to configure a user for the monitoring agent and configure the agent to use +the appropriate credentials when communicating with the monitoring cluster. + +[float] +[[monitoring-ui-users]] +==== Setting Up Monitoring UI Users + +When {security} is enabled, Kibana users are prompted to log in when they access +the UI. To use the Monitoring UI, a user must have access to the Kibana indices +and permission to read from the monitoring indices. + +You set up Monitoring UI users on the cluster where the monitoring data is being +stored. To grant all of the necessary permissions, assign the user the +`monitoring_user` and `kibana_user` roles: + +* If you're using the `native` realm, you can assign roles through Kibana or +with the <>. For example, the following +command creates a user named `jacknich` and assigns him the `kibana_user` and +`monitoring_user` roles: ++ +[source,js] +-------------------------------------------------------------------------------- +POST /_xpack/security/user/jacknich +{ + "password" : "t0pS3cr3t", + "roles" : [ "kibana_user", "monitoring_user" ] +} + +-------------------------------------------------------------------------------- + +* If you are using an LDAP or Active Directory realm, you can either assign roles +on a per user basis, or assign roles to groups of users. By default, role mappings +are configured in <>. For example, +the following snippet assigns the user named Jack Nicholson to the `kibana_user` +and `monitoring_user` roles: ++ +[source,yaml] +-------------------------------------------------------------------------------- +kibana_user: + - "cn=Jack Nicholson,dc=example,dc=com" +monitoring_user: + - "cn=Jack Nicholson,dc=example,dc=com" +-------------------------------------------------------------------------------- + +[float] +[[configuring-monitoring-agent-security]] +==== Configuring Monitoring Agent to Communicate with a {security}-Enabled Monitoring Cluster + +To configure the monitoring agent to communicate with a secured monitoring cluster: + +. Configure a user on the monitoring cluster who has the `remote_monitoring_agent` +role, which is <<[[built-in-roles-remote-monitoring-agent]], built-in to {xpack}>>. +For example: ++ +[source,js] +-------------------------------------------------------------------------------- +POST /_xpack/security/user/agent-user +{ + "password" : "t0pS3cr3t", + "roles" : [ "remote_monitoring_agent" ] +} +-------------------------------------------------------------------------------- ++ + +. On each node in the cluster being monitored, configure a Monitoring HTTP exporter +in `elasticsearch.yml` and restart Elasticsearch. In the exporter configuration, +you need to: ++ +-- +.. Set the `type` to `http`. +.. Specify the location of the monitoring cluster in the `host` setting. +.. Provide the agent user credentials with the `username` and `password` settings. + +For example: + +[source,yaml] +-------------------------------------------------- +xpack.monitoring.exporters: + id1: + type: http + host: ["http://es-mon1:9200", "http://es-mon2:9200"] + auth: + username: agent-user + password: password +-------------------------------------------------- + +If SSL/TLS is enabled on the monitoring cluster: + +.. Specify the HTTPS protocol when setting the monitoring server host. +.. Include the CA certificate in each node's trusted certificates in order to verify + the identities of the nodes in the monitoring cluster. + +To add a CA certificate to an Elasticsearch node's trusted certificates, you +can specify the location of the PEM encoded certificate with the +`certificate_authorities` setting: + +[source,yaml] +-------------------------------------------------- +xpack.monitoring.exporters: + id1: + type: http + host: ["https://es-mon1:9200", "https://es-mon2:9200"] + auth: + username: agent-user + password: password + ssl: + certificate_authorities: [ "/path/to/ca.crt" ] + id2: + type: local +-------------------------------------------------- + +Alternatively, you can configure trusted certificates using a truststore +(a Java Keystore file that contains the certificates): + +[source,yaml] +-------------------------------------------------- +xpack.monitoring.exporters: + id1: + type: http + host: ["https://es-mon1:9200", "https://es-mon2:9200"] + auth: + username: agent-user + password: password + ssl: + truststore.path: /path/to/file + truststore.password: password + id2: + type: local +-------------------------------------------------- +-- + +. On each Logstash node being monitored, update `logstash.yml` to: ++ +-- +.. Specify the location of the monitoring cluster and provide credentials +for the agent user: + +[source,yaml] +-------------------------------------------------- +xpack.monitoring.elasticsearch.url: ["http://es-mon-1:9200", "http://es-mon2:9200"] +xpack.monitoring.elasticsearch.username: "remote_monitor" +xpack.monitoring.elasticsearch.password: "changeme" +-------------------------------------------------- + +.. If SSL/TLS is enabled on the monitoring cluster: + +* Specify the HTTPS protocol when setting the `elasticsearch.url`. +* Include the CA certificate in each node's trusted certificates in order to verify + the identities of the nodes in the monitoring cluster. + +To add a CA certificate to an node's trusted certificates, you +can specify the location of the PEM encoded certificate with the +`xpack.monitoring.elasticsearch.ssl.ca` setting: + +[source,yaml] +-------------------------------------------------- +xpack.monitoring.elasticsearch.ssl.ca: [ "/path/to/ca.crt" ] +-------------------------------------------------- + +Alternatively, you can configure trusted certificates using a truststore +(a Java Keystore file that contains the certificates): + +[source,yaml] +-------------------------------------------------- +xpack.monitoring.elasticsearch.ssl.truststore.path: /path/to/file +xpack.monitoring.elasticsearch.ssl.truststore.password: changeme +-------------------------------------------------- +-- \ No newline at end of file diff --git a/docs/en/security/tribe-clients-integrations/reporting.asciidoc b/docs/en/security/tribe-clients-integrations/reporting.asciidoc new file mode 100644 index 00000000000..22fa31cb6f5 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations/reporting.asciidoc @@ -0,0 +1,44 @@ +[[secure-reporting]] +=== Reporting and Security + +Reporting operates by creating and updating documents in Elasticsearch in +response to user actions in Kibana. + +To use Reporting with {security} enabled, you need to <>. If you are automatically generating reports with +<>, you also need to configure {watcher} to trust the +Kibana server's certificate. For more information, see <>. + +[[reporting-app-users]] +To enable users to generate reports, assign them the built in `reporting_user` +and `kibana_user` roles: + +* If you're using the `native` realm, you can assign roles through +**Management / Users** UI in Kibana or with the `user` API. For example, +the following request creates a `reporter` user that has the +`reporting_user` and `kibana_user` roles: ++ +[source, sh] +--------------------------------------------------------------- +POST /_xpack/security/user/reporter +{ + "password" : "changeme", + "roles" : ["kibana_user", "reporting_user"], + "full_name" : "Reporting User" +} +--------------------------------------------------------------- + +* If you are using an LDAP or Active Directory realm, you can either assign +roles on a per user basis, or assign roles to groups of users. By default, role +mappings are configured in <>. +For example, the following snippet assigns the user named Bill Murray the +`kibana_user` and `reporting_user` roles: ++ +[source,yaml] +-------------------------------------------------------------------------------- +kibana_user: + - "cn=Bill Murray,dc=example,dc=com" +reporting_user: + - "cn=Bill Murray,dc=example,dc=com" +-------------------------------------------------------------------------------- \ No newline at end of file diff --git a/docs/en/security/tribe-clients-integrations/tribe.asciidoc b/docs/en/security/tribe-clients-integrations/tribe.asciidoc new file mode 100644 index 00000000000..0d3892a8b08 --- /dev/null +++ b/docs/en/security/tribe-clients-integrations/tribe.asciidoc @@ -0,0 +1,109 @@ +[[tribe-node-configuring]] +=== Tribe Nodes and Security + +{ref}/modules-tribe.html[Tribe nodes] act as a federated client across multiple +clusters. When using tribe nodes with secured clusters, all clusters must have +{security} enabled and share the same security configuration (users, roles, +user-role mappings, SSL/TLS CA). The tribe node itself also must be configured +to grant access to actions and indices on all of the connected clusters, as +security checks on incoming requests are primarily done on the tribe node +itself. + +IMPORTANT: Support for tribe nodes in Kibana was added in v5.2. + +To use a tribe node with secured clusters: + +. Install {xpack} on the tribe node and every node in each connected cluster. + +. Enable <> globally. +Generate a system key on one node and copy it to the tribe node and every other +node in each of the connected clusters. ++ +IMPORTANT: For message authentication to work properly across multiple clusters, + the tribe node and all of the connected clusters must share the same + system key. {security} reads the system key from `CONFIG_DIR/x-pack/system_key`. + +. Enable encryption globally. To encrypt communications, you must enable +<> on every node. ++ +TIP: To simplify SSL/TLS configuration, use the same certificate authority to + generate certificates for all connected clusters. + +. Configure the tribe in the tribe node's `elasticsearch.yml` file. You must +specify each cluster that is a part of the tribe and configure discovery and +encryption settings per cluster. For example, the following configuration adds +two clusters to the tribe: ++ +[source,yml] +----------------------------------------------------------- +tribe: + on_conflict: prefer_cluster1 <1> + c1: <2> + cluster.name: cluster1 + discovery.zen.ping.unicast.hosts: [ "cluster1-node1:9300", "cluster1-node2:9300"] + xpack.ssl.key: /home/es/config/x-pack/es-tribe-01.key + xpack.ssl.certificate: /home/es/config/x-pack/es-tribe-01.crt + xpack.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] + xpack.security.transport.ssl.enabled: true + xpack.security.http.ssl.enabled: true + c2: + cluster.name: cluster2 + discovery.zen.ping.unicast.hosts: [ "cluster2-node1:9300", "cluster2-node2:9300"] + xpack.ssl.key: /home/es/config/x-pack/es-tribe-01.key + xpack.ssl.certificate: /home/es/config/x-pack/es-tribe-01.crt + xpack.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] + xpack.security.transport.ssl.enabled: true + xpack.security.http.ssl.enabled: true +----------------------------------------------------------- +<1> Results are returned from the preferred cluster if the named index exists + in multiple clusters. A preference is *required* when using {security} on + a tribe node. +<2> An arbitrary name that represents the connection to the cluster. + +. Configure the same index privileges for your users on all nodes, including the +tribe node. The nodes in each cluster must grant access to indices in other +connected clusters as well as their own. ++ +For example, let's assume `cluster1` and `cluster2` each have a indices `index1` +and `index2`. To enable a user to submit a request through the tribe node to +search both clusters: ++ +-- +.. On the tribe node and both clusters, <> +that has read access to `index1` and `index2`: ++ +[source,yaml] +----------------------------------------------------------- +tribe_user: + indices: + 'index*': search +----------------------------------------------------------- + +.. Assign the `tribe_user` role to a user on the tribe node and both clusters. +For example, run the following command on each node to create `my_tribe_user` +and assign the `tribe_user` role: ++ +[source,shell] +----------------------------------------------------------- +./bin/shield/users useradd my_tribe_user -p password -r tribe_user +----------------------------------------------------------- ++ +NOTE: Each cluster needs to have its own users with admin privileges. + You cannot perform administration tasks such as create index through + the tribe node, you must send the request directly to the appropriate + cluster. +-- + +. To enable selected users to retrieve merged cluster state information +for the tribe from the tribe node, grant them the cluster +<> on the tribe node. For example, +you could create a `tribe_monitor` role that assigns the `monitor` privilege: ++ +[source,yaml] +----------------------------------------------------------- +tribe_monitor: + cluster: monitor +----------------------------------------------------------- + +. Start the tribe node. If you've made configuration changes to the nodes in the +connected clusters, they also need to be restarted. diff --git a/docs/en/security/troubleshooting.asciidoc b/docs/en/security/troubleshooting.asciidoc new file mode 100644 index 00000000000..e399092c9f7 --- /dev/null +++ b/docs/en/security/troubleshooting.asciidoc @@ -0,0 +1,229 @@ +[[security-troubleshooting]] +== {security} Troubleshooting + +[float] +=== `settings` + +Some settings are not returned via the nodes settings API:: ++ +-- +This is intentional. Some of the settings are considered to be highly +sensitive: all `ssl` settings, ldap `bind_dn`, `bind_password`). +For this reason, we filter these settings and do not expose them via +the nodes info API rest endpoint. You can also define additional +sensitive settings that should be hidden using the +`xpack.security.hide_settings` setting. For example, this snippet +hides the `url` settings of the `ldap1` realm and all settings of the +`ad1` realm. + +[source, yaml] +------------------------------------------ +xpack.security.hide_settings: xpack.security.authc.realms.ldap1.url, xpack.security.authc.realms.ad1.* +------------------------------------------ + +-- + +[float] +=== `users` + +I configured the appropriate roles and the users, but I still get an authorization exception:: ++ +-- +Verify that the role names associated with the users match the roles defined in the `roles.yml` file. You +can use the `users` tool to list all the users. Any unknown roles are marked with `*`. + +[source, shell] +------------------------------------------ +bin/xpack/users list +rdeniro : admin +alpacino : power_user +jacknich : monitoring,unknown_role* <1> +------------------------------------------ +<1> `unknown_role` was not found in `roles.yml` +-- + +ERROR: extra arguments [...] were provided:: ++ +-- +This error occurs when the `users` tool is parsing the input and finds unexepected arguments. This can happen when there +are special characters used in some of the arguments. For example, on Windows systems the `,` character is considered +a parameter separator; in other words `-r role1,role2` is translated to `-r role1 role2` and the `users` tool only recognizes +`role1` as an expected parameter. The solution here is to quote the parameter: `-r "role1,role2"`. +-- + +[[trouble-shoot-active-directory]] +[float] +=== Active Directory + +Certain users are being frequently locked out of Active Directory:: ++ +-- +Check your realm configuration; realms are checked serially, one after another. If your Active Directory realm is being checked before other realms and there are usernames +that appear in both Active Directory and another realm, a valid login for one realm may be causing failed login attempts in another realm. + +For example, if `UserA` exists in both Active Directory and a file realm, and the Active Directory realm is checked first and +file is checked second, an attempt to authenticate as `UserA` in the file realm would first attempt to authenticate +against Active Directory and fail, before successfully authenticating against the `file` realm. Because authentication is +verified on each request, the Active Directory realm would be checked - and fail - on each request for `UserA` in the `file` +realm. In this case, while the authentication request completed successfully, the account on Active Directory would have received +several failed login attempts, and that account may become temporarily locked out. Plan the order of your realms accordingly. + +Also note that it is not typically necessary to define multiple Active Directory realms to handle domain controller failures. When using Microsoft DNS, the DNS entry for the domain should always point to an available domain controller. +-- + +[float] +=== LDAP + +I can authenticate to LDAP, but I still get an authorization exception:: ++ +-- +A number of configuration options can cause this error. + +|====================== +|_group identification_ | + +Groups are located by either an LDAP search or by the "memberOf" attribute on +the user. Also, If subtree search is turned off, it will search only one +level deep. See the <> for all the options. +There are many options here and sticking to the defaults will not work for all +scenarios. + +| _group to role mapping_| + +Either the `role_mapping.yml` file or the location for this file could be +misconfigured. See <> for more. + +|_role definition_| + +The role definition may be missing or invalid. + +|====================== + +To help track down these possibilities, add the following lines to the end of the `log4j2.properties` configuration file in the +`CONFIG_DIR`: + +[source,properties] +---------------- +logger.authc.name = org.elasticsearch.xpack.security.authc +logger.authc.level = DEBUG +---------------- + +A successful authentication should produce debug statements that list groups and role mappings. +-- + + +[float] +=== Encryption & Certificates + +`curl` on the Mac returns a certificate verification error even when the `--cacert` option is used:: ++ +-- +Apple's integration of `curl` with their keychain technology disables the `--cacert` option. +See http://curl.haxx.se/mail/archive-2013-10/0036.html for more information. + +You can use another tool, such as `wget`, to test certificates. Alternately, you can add the certificate for the +signing certificate authority MacOS system keychain, using a procedure similar to the one detailed at the +http://support.apple.com/kb/PH14003[Apple knowledge base]. Be sure to add the signing CA's certificate and not the server's certificate. +-- + +[float] +==== SSLHandshakeException causing connections to fail + +A `SSLHandshakeException` will cause a connection to a node to fail and indicates that there is a configuration issue. Some of the +common exceptions are shown below with tips on how to resolve these issues. + +`java.security.cert.CertificateException: No name matching node01.example.com found`:: ++ +-- +Indicates that a client connection was made to `node01.example.com` but the certificate returned did not contain the name `node01.example.com`. +In most cases, the issue can be resolved by ensuring the name is specified during <>. +Another scenario is when the environment does not wish to use DNS names in certificates at all. In this scenario, all settings +in `elasticsearch.yml` should only use IP addresses including the `network.publish_host` setting. +-- + +`java.security.cert.CertificateException: No subject alternative names present`:: ++ +-- +Indicates that a client connection was made to an IP address but the returned certificate did not contain any `SubjectAlternativeName` entries. +IP addresses are only used for hostname verification if they are specified as a `SubjectAlternativeName` during +<>. If the intent was to use IP addresses for hostname verification, then the certificate +will need to be regenerated with the appropriate IP address. +-- + +`javax.net.ssl.SSLHandshakeException: null cert chain` and `javax.net.ssl.SSLException: Received fatal alert: bad_certificate`:: ++ +-- +The `SSLHandshakeException` above indicates that a self-signed certificate was returned by the client that is not trusted +as it cannot be found in the `truststore` or `keystore`. The `SSLException` above is seen on the client side of the connection. +-- + +`sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target` and `javax.net.ssl.SSLException: Received fatal alert: certificate_unknown`:: ++ +-- +The `SunCertPathBuilderException` above indicates that a certificate was returned during the handshake that is not trusted. +This message is seen on the client side of the connection. The `SSLException` above is seen on the server side of the +connection. The CA certificate that signed the returned certificate was not found in the `keystore` or `truststore` and +needs to be added to trust this certificate. +-- + +[float] +==== Other SSL/TLS related exceptions + +The are other exceptions related to SSL that may be seen in the logs. Below you will find some common exceptions and their +meaning. + +WARN: received plaintext http traffic on a https channel, closing connection:: ++ +-- +Indicates that there was an incoming plaintext http request. This typically occurs when an external applications attempts +to make an unencrypted call to the REST interface. Please ensure that all applications are using `https` when calling the +REST interface with SSL enabled. +-- + +`org.elasticsearch.common.netty.handler.ssl.NotSslRecordException: not an SSL/TLS record:`:: ++ +-- +Indicates that there was incoming plaintext traffic on an SSL connection. This typically occurs when a node is not +configured to use encrypted communication and tries to connect to nodes that are using encrypted communication. Please +verify that all nodes are using the same setting for `xpack.security.transport.ssl.enabled`. +-- + +`java.io.StreamCorruptedException: invalid internal transport message format, got`:: ++ +-- +Indicates an issue with data received on the transport interface in an unknown format. This can happen when a node with +encrypted communication enabled connects to a node that has encrypted communication disabled. Please verify that all +nodes are using the same setting for `xpack.security.transport.ssl.enabled`. +-- + +`java.lang.IllegalArgumentException: empty text`:: ++ +-- +The exception is typically seen when a `https` request is made to a node that is not using `https`. If `https` is desired, +please ensure the following setting is in `elasticsearch.yml`: + +[source,yaml] +---------------- +xpack.security.http.ssl.enabled: true +---------------- +-- + +ERROR: unsupported ciphers [...] were requested but cannot be used in this JVM:: ++ +-- +This error occurs when a SSL/TLS cipher suite is specified that cannot supported by the JVM that Elasticsearch is running +in. Security will try to use the specified cipher suites that are supported by this JVM. This error can occur when using +the Security defaults as some distributions of OpenJDK do not enable the PKCS11 provider by default. In this case, we +recommend consulting your JVM documentation for details on how to enable the PKCS11 provider. + +Another common source of this error is requesting cipher suites that use encrypting with a key length greater than 128 bits +when running on an Oracle JDK. In this case, you will need to install the <>. +-- + +[float] +==== Internal Server Error in Kibana + +If the Security plugin is enabled in Elasticsearch but disabled in Kibana, you must +still set `elasticsearch.username` and `elasticsearch.password` in `kibana.yml`. +Otherwise, Kibana cannot connect to Elasticsearch. In 5.1.1, this results in an +`UnhandledPromiseRejectionWarning` and Kibana displays an Internal Server Error. \ No newline at end of file diff --git a/docs/en/security/using-ip-filtering.asciidoc b/docs/en/security/using-ip-filtering.asciidoc new file mode 100644 index 00000000000..37beced5a94 --- /dev/null +++ b/docs/en/security/using-ip-filtering.asciidoc @@ -0,0 +1,143 @@ +[[ip-filtering]] +== Restricting Connections with IP Filtering + +You can apply IP filtering to application clients, node clients, or transport +clients, in addition to other nodes that are attempting to join the cluster. + +If a node's IP address is on the blacklist, {security} will still allow the +connection to Elasticsearch, but it will be dropped immediately, and no requests +will be processed. + +NOTE: Elasticsearch installations are not designed to be publicly accessible + over the Internet. IP Filtering and the other security capabilities of + {security} do not change this condition. + +[float] +=== Enabling IP filtering + +{security} features an access control feature that allows or rejects hosts, +domains, or subnets. + +You configure IP filtering by specifying the `xpack.security.transport.filter.allow` and +`xpack.security.transport.filter.deny` settings in in `elasticsearch.yml`. Allow rules +take precedence over the deny rules. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: "192.168.0.1" +xpack.security.transport.filter.deny: "192.168.0.0/24" +-------------------------------------------------- + +The `_all` keyword can be used to deny all connections that are not explicitly +allowed. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: [ "192.168.0.1", "192.168.0.2", "192.168.0.3", "192.168.0.4" ] +xpack.security.transport.filter.deny: _all +-------------------------------------------------- + +IP filtering configuration also support IPv6 addresses. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: "2001:0db8:1234::/48" +xpack.security.transport.filter.deny: "1234:0db8:85a3:0000:0000:8a2e:0370:7334" +-------------------------------------------------- + +You can also filter by hostnames when DNS lookups are available. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: localhost +xpack.security.transport.filter.deny: '*.google.com' +-------------------------------------------------- + +[float] +=== Disabling IP Filtering + +Disabling IP filtering can slightly improve performance under some conditions. +To disable IP filtering entirely, set the value of the `xpack.security.transport.filter.enabled` +setting in the `elasticsearch.yml` configuration file to `false`. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.enabled: false +-------------------------------------------------- + +You can also disable IP filtering for the transport protocol but enable it for +HTTP only. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.enabled: false +xpack.security.http.filter.enabled: true +-------------------------------------------------- + +[float] +=== Specifying TCP transport profiles + +{ref}/modules-transport.html#_tcp_transport_profiles[TCP transport profiles] +enable Elasticsearch to bind on multiple hosts. {security} enables you to apply +different IP filtering on different profiles. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: 172.16.0.0/24 +xpack.security.transport.filter.deny: _all +transport.profiles.client.xpack.security.filter.allow: 192.168.0.0/24 +transport.profiles.client.xpack.security.filter.deny: _all +-------------------------------------------------- + +NOTE: When you do not specify a profile, `default` is used automatically. + +[float] +=== HTTP Filtering + +You may want to have different IP filtering for the transport and HTTP protocols. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: localhost +xpack.security.transport.filter.deny: '*.google.com' +xpack.security.http.filter.allow: 172.16.0.0/16 +xpack.security.http.filter.deny: _all +-------------------------------------------------- + +[float] +[[dynamic-ip-filtering]] +==== Dynamically updating ip filter settings + +In case of running in an environment with highly dynamic IP addresses like cloud +based hosting, it is very hard to know the IP addresses upfront when provisioning +a machine. Instead of changing the configuration file and restarting the node, +you can use the _Cluster Update Settings API_. For example: + +[source,js] +-------------------------------------------------- +PUT /_cluster/settings +{ + "persistent" : { + "xpack.security.transport.filter.allow" : "172.16.0.0/24" + } +} +-------------------------------------------------- +// CONSOLE + +You can also dynamically disable filtering completely: + +[source,js] +-------------------------------------------------- +PUT /_cluster/settings +{ + "persistent" : { + "xpack.security.transport.filter.enabled" : false + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +NOTE: In order to avoid locking yourself out of the cluster, the default bound + transport address will never be denied. This means you can always SSH into + a system and use curl to apply changes.