Merge remote-tracking branch 'remotes/upstream/master' into feature/sql
Original commit: elastic/x-pack-elasticsearch@154da6e26e
This commit is contained in:
commit
448b5d8faf
|
@ -340,16 +340,13 @@ def wait_for_monitoring_index_to_fill(client, version):
|
|||
def wait_for_monitoring_to_index(doc_type, count):
|
||||
logging.info('Waiting for %s to have count(%s) = %s...' % (monitoring_name, doc_type, count))
|
||||
wait_for_search(count, lambda:
|
||||
client.search(index=monitoring_name, doc_type=doc_type, body={"query": {"match_all": {}}}))
|
||||
client.search(index=monitoring_name, body={"query": {"term": { "type": doc_type }}}))
|
||||
|
||||
wait_for_monitoring_to_index('cluster_info', 1)
|
||||
if parse_version(version) >= parse_version('2.1.0'):
|
||||
wait_for_monitoring_to_index('node', 1)
|
||||
wait_for_monitoring_to_index('index_stats', 10)
|
||||
wait_for_monitoring_to_index('shards', 10)
|
||||
wait_for_monitoring_to_index('indices_stats', 3)
|
||||
wait_for_monitoring_to_index('node_stats', 3)
|
||||
wait_for_monitoring_to_index('cluster_state', 3)
|
||||
wait_for_monitoring_to_index('cluster_stats', 3)
|
||||
|
||||
wait_for_yellow(version, client, monitoring_name)
|
||||
|
||||
|
|
|
@ -483,12 +483,13 @@ The number of records that have been processed by the job.
|
|||
Memory status::
|
||||
The status of the mathematical models. When you create jobs by using the APIs or
|
||||
by using the advanced options in {kib}, you can specify a `model_memory_limit`.
|
||||
That value is the maximum amount of memory resources, in MiB, that the
|
||||
mathematical models can use. Once that limit is approached, data pruning becomes
|
||||
more aggressive. Upon exceeding that limit, new entities are not modeled.
|
||||
The default value is `4096`. The memory status field reflects whether you have
|
||||
reached or exceeded the model memory limit. It can have one of the following
|
||||
values: +
|
||||
That value is the maximum amount of memory resources that the mathematical
|
||||
models can use. Once that limit is approached, data pruning becomes more
|
||||
aggressive. Upon exceeding that limit, new entities are not modeled. For more
|
||||
information about this setting, see
|
||||
{ref}/ml-job-resource.html#ml-apilimits[Analysis Limits]. The memory status
|
||||
field reflects whether you have reached or exceeded the model memory limit. It
|
||||
can have one of the following values: +
|
||||
`ok`::: The models stayed below the configured value.
|
||||
`soft_limit`::: The models used more than 60% of the configured memory limit
|
||||
and older unused models will be pruned to free up space.
|
||||
|
|
|
@ -23,14 +23,21 @@ you work with extensively in the beta, make note of all the details so
|
|||
that you can re-create them successfully.
|
||||
|
||||
[float]
|
||||
=== {xpackml} features do not support cross cluster search
|
||||
=== {xpackml} features do not yet support cross cluster search
|
||||
|
||||
You cannot use cross cluster search in either the {ml} APIs or the {ml}
|
||||
features in {kib}.
|
||||
At this time, you cannot use cross cluster search in either the {ml} APIs or the
|
||||
{ml} features in {kib}.
|
||||
|
||||
For more information about cross cluster search,
|
||||
see {ref}/modules-cross-cluster-search.html[Cross Cluster Search].
|
||||
|
||||
[float]
|
||||
=== {xpackml} features are not supported on tribe nodes
|
||||
|
||||
You cannot use {ml} features on tribe nodes. For more information about that
|
||||
type of node, see
|
||||
{ref}/modules-tribe.html[Tribe node].
|
||||
|
||||
[float]
|
||||
=== Anomaly Explorer omissions and limitations
|
||||
//See x-pack-elasticsearch/#844 and x-pack-kibana/#1461
|
||||
|
|
|
@ -277,26 +277,33 @@ For more information, see
|
|||
//<<ml-configuring-categories>>.
|
||||
|
||||
`model_memory_limit`::
|
||||
(long) The approximate maximum amount of memory resources that are required
|
||||
for analytical processing, in MiB. Once this limit is approached, data pruning
|
||||
(long or string) The approximate maximum amount of memory resources that are
|
||||
required for analytical processing. Once this limit is approached, data pruning
|
||||
becomes more aggressive. Upon exceeding this limit, new entities are not
|
||||
modeled. The default value is 4096.
|
||||
modeled. The default value is `4096mb`. If you specify a number instead of a
|
||||
string, the units are assumed to be MiB. Specifying a string is recommended
|
||||
for clarity. If you specify a byte size unit of `b` or `kb` and the number
|
||||
does not equate to a discrete number of megabytes, it is rounded down to the
|
||||
closest MiB. The minimum valid value is 1 MiB. If you specify a value less
|
||||
than 1 MiB, an error occurs. For more information about supported byte size
|
||||
units, see
|
||||
{ref}/common-options.html#byte-units[Byte size units].
|
||||
|
||||
[float]
|
||||
[[ml-apimodelplotconfig]]
|
||||
==== Model Plot Config
|
||||
|
||||
This advanced configuration option stores model information along with the
|
||||
results. It provides a more detailed view into anomaly detection.
|
||||
results. It provides a more detailed view into anomaly detection.
|
||||
|
||||
WARNING: If you enable model plot it can add considerable overhead to the performance
|
||||
WARNING: If you enable model plot it can add considerable overhead to the performance
|
||||
of the system; it is not feasible for jobs with many entities.
|
||||
|
||||
Model plot provides a simplified and indicative view of the model and its bounds.
|
||||
It does not display complex features such as multivariate correlations or multimodal data.
|
||||
As such, anomalies may occasionally be reported which cannot be seen in the model plot.
|
||||
|
||||
Model plot config can be configured when the job is created or updated later. It must be
|
||||
Model plot config can be configured when the job is created or updated later. It must be
|
||||
disabled if performance issues are experienced.
|
||||
|
||||
The `model_plot_config` object has the following properties:
|
||||
|
@ -306,7 +313,7 @@ The `model_plot_config` object has the following properties:
|
|||
each entity that is being analyzed. By default, this is not enabled.
|
||||
|
||||
`terms`::
|
||||
(string) Limits data collection to this comma separated list of partition or by field values.
|
||||
If terms are not specified or it is an empty string, no filtering is applied.
|
||||
(string) Limits data collection to this comma separated list of partition or by field values.
|
||||
If terms are not specified or it is an empty string, no filtering is applied.
|
||||
For example, "CPU,NetworkIn,DiskWrites". This is experimental. Only the specified `terms` can
|
||||
be viewed when using the Single Metric Viewer.
|
||||
|
|
|
@ -23,8 +23,7 @@ The following properties can be updated after the job is created:
|
|||
|Name |Description |Requires Restart
|
||||
|
||||
|`analysis_limits`: `model_memory_limit` |The approximate maximum amount of
|
||||
memory resources required for analytical processing, in MiB.
|
||||
See <<ml-apilimits>>. | Yes
|
||||
memory resources required for analytical processing. See <<ml-apilimits>>. | Yes
|
||||
|
||||
|`background_persist_interval` |Advanced configuration option. The time between
|
||||
each periodic persistence of the model. See <<ml-job-resource>>. | Yes
|
||||
|
|
|
@ -33,6 +33,12 @@ NOTE: When you use Active Directory for authentication, the username entered by
|
|||
the user is expected to match the `sAMAccountName` or `userPrincipalName`,
|
||||
not the common name.
|
||||
|
||||
The Active Directory realm authenticates users using an LDAP bind request. After
|
||||
authenticating the user, the realm then searches to find the user's entry in
|
||||
Active Directory. Once the user has been found, the Active Directory realm then
|
||||
retrieves the user's group memberships from the `tokenGroups` attribute on the
|
||||
user's entry in Active Directory.
|
||||
|
||||
To configure an `active_directory` realm:
|
||||
|
||||
. Add a realm configuration of type `active_directory` to `elasticsearch.yml`
|
||||
|
@ -63,13 +69,10 @@ xpack:
|
|||
order: 0 <1>
|
||||
domain_name: ad.example.com
|
||||
url: ldaps://ad.example.com:636 <2>
|
||||
unmapped_groups_as_roles: true <3>
|
||||
------------------------------------------------------------
|
||||
<1> The realm order controls the order in which the configured realms are checked
|
||||
when authenticating a user.
|
||||
<2> If you don't specify the URL, it defaults to `ldap:<domain_name>:389`.
|
||||
<3> When this option is enabled, Active Directory groups are automatically mapped
|
||||
to roles of the same name.
|
||||
+
|
||||
IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
|
||||
realms you specify are used for authentication. If you also want to use the
|
||||
|
@ -77,6 +80,42 @@ realms you specify are used for authentication. If you also want to use the
|
|||
|
||||
. Restart Elasticsearch.
|
||||
|
||||
===== Configuring a Bind User
|
||||
By default, all of the LDAP operations are run by the user that {security} is
|
||||
authenticating. In some cases, regular users may not be able to access all of the
|
||||
necessary items within Active Directory and a _bind user_ is needed. A bind user
|
||||
can be configured and will be used to perform all operations other than the LDAP
|
||||
bind request, which is required to authenticate the credentials provided by the user.
|
||||
|
||||
The use of a bind user enables the <<run-as-privilege,run as feature>> to be
|
||||
used with the Active Directory realm and the ability to maintain a set of pooled
|
||||
connections to Active Directory. These pooled connection reduce the number of
|
||||
resources that must be created and destroyed with every user authentication.
|
||||
|
||||
The following example shows the configuration of a bind user through the user of the
|
||||
`bind_dn` and `bind_password` settings.
|
||||
|
||||
[source, yaml]
|
||||
------------------------------------------------------------
|
||||
xpack:
|
||||
security:
|
||||
authc:
|
||||
realms:
|
||||
active_directory:
|
||||
type: active_directory
|
||||
order: 0
|
||||
domain_name: ad.example.com
|
||||
url: ldaps://ad.example.com:636
|
||||
bind_dn: es_svc_user@ad.example.com <1>
|
||||
bind_password: es_svc_user_password
|
||||
------------------------------------------------------------
|
||||
<1> This is the user that all Active Directory search requests are executed as.
|
||||
Without a bind user configured, all requests run as the user that is authenticating
|
||||
with Elasticsearch.
|
||||
|
||||
When a bind user is configured, connection pooling is enabled by default.
|
||||
Connection pooling can be disabled using the `user_search.pool.enabled` setting.
|
||||
|
||||
===== Multiple Domain Support
|
||||
When authenticating users across multiple domains in a forest, there are a few minor
|
||||
differences in the configuration and the way that users will authenticate. The `domain_name`
|
||||
|
@ -176,6 +215,14 @@ operation are supported: failover and load balancing
|
|||
assuming an unencrypted connection to port 389. For example,
|
||||
`ldap://<domain_name>:389`. This settings is required when
|
||||
connecting using SSL/TLS or via a custom port.
|
||||
| `bind_dn` | no | The DN of the user that is used to bind to Active Directory
|
||||
and perform searches. Due to its potential security
|
||||
impact, `bind_dn` is not exposed via the
|
||||
{ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
|
||||
| `bind_password` | no | The password for the user that is used to bind to
|
||||
Active Directory. Due to its potential security impact,
|
||||
`bind_password` is not exposed via the
|
||||
{ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
|
||||
| `load_balance.type` | no | The behavior to use when there are multiple LDAP URLs defined.
|
||||
For supported values see <<ad-load-balancing>>.
|
||||
| `load_balance.cache_ttl` | no | When using `dns_failover` or `dns_round_robin` as the load
|
||||
|
@ -195,12 +242,10 @@ operation are supported: failover and load balancing
|
|||
`(&(objectClass=user)(sAMAccountName={0}))`. For more
|
||||
information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
|
||||
| `user_search.upn_filter` | no | Specifies a filter to use to lookup a user given a user principal name.
|
||||
The default filter looks up `user` objects with either
|
||||
a matching `userPrincipalName` or a `sAMAccountName` matching the account
|
||||
portion of the user principal name. If specified, this
|
||||
The default filter looks up `user` objects with
|
||||
a matching `userPrincipalName`. If specified, this
|
||||
must be a valid LDAP user search filter, for example
|
||||
`(&(objectClass=user)(sAMAccountName={0}))`. `{0}` is the value
|
||||
preceding the `@` sign in the user principal name and `{1}` is
|
||||
`(&(objectClass=user)(userPrincipalName={1}))`. `{1}` is
|
||||
the full user principal name provided by the user. For more
|
||||
information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
|
||||
| `user_search.down_level_filter` | no | Specifies a filter to use to lookup a user given a down level logon name (DOMAIN\user).
|
||||
|
@ -209,6 +254,22 @@ operation are supported: failover and load balancing
|
|||
must be a valid LDAP user search filter, for example
|
||||
`(&(objectClass=user)(sAMAccountName={0}))`. For more
|
||||
information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax].
|
||||
| `user_search.pool.enabled` | no | Enables or disables connection pooling for user search. When
|
||||
disabled a new connection is created for every search. The
|
||||
default is `true` when `bind_dn` is provided.
|
||||
| `user_search.pool.size` | no | Specifies the maximum number of connections to Active Directory
|
||||
server to allow in the connection pool. Defaults to `20`.
|
||||
| `user_search.pool.initial_size` | no | The initial number of connections to create to Active Directory
|
||||
server on startup. Defaults to `0`. Values greater than `0`
|
||||
could cause startup failures if the LDAP server is down.
|
||||
| `user_search.pool.health_check.enabled` | no | Enables or disables a health check on Active Directory connections in
|
||||
the connection pool. Connections are checked in the
|
||||
background at the specified interval. Defaults to `true`.
|
||||
| `user_search.pool.health_check.dn` | no | Specifies the distinguished name to retrieve as part of
|
||||
the health check. Defaults to the value of `bind_dn` if present, and if
|
||||
not falls back to `user_search.base_dn`.
|
||||
| `user_search.pool.health_check.interval` | no | How often to perform background checks of connections in
|
||||
the pool. Defaults to `60s`.
|
||||
| `group_search.base_dn` | no | Specifies the context to search for groups in which the user
|
||||
has membership. Defaults to the root of the Active Directory
|
||||
domain.
|
||||
|
|
|
@ -85,7 +85,7 @@ users, you can use User DN templates to configure the realm. The advantage of
|
|||
this method is that a search does not have to be performed to find the user DN.
|
||||
However, multiple bind operations might be needed to find the correct user DN.
|
||||
|
||||
To configure an `ldap` Realm with User Search:
|
||||
To configure an `ldap` Realm with User DN templates:
|
||||
|
||||
. Add a realm configuration of type `ldap` to `elasticsearch.yml` in the
|
||||
`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to
|
||||
|
@ -119,6 +119,9 @@ xpack:
|
|||
|
||||
. Restart Elasticsearch
|
||||
|
||||
IMPORTANT: The `bind_dn` setting is not used in template mode.
|
||||
All LDAP operations will execute as the authenticating user.
|
||||
|
||||
|
||||
[[ldap-load-balancing]]
|
||||
===== Load Balancing and Failover
|
||||
|
@ -288,7 +291,11 @@ failover and load balancing modes of operation.
|
|||
all objects contained under `base_dn`. `base` specifies
|
||||
that the `base_dn` is the user object, and that it is the
|
||||
only user considered. Defaults to `sub_tree`.
|
||||
| `user_search.attribute` | no | Specifies the attribute to match with the username presented
|
||||
| `user_search.filter` | no | Specifies the filter used to search the directory in attempt to match
|
||||
an entry with the username provided by the user. Defaults to `(uid={0})`.
|
||||
`{0}` is substituted with the username provided when searching.
|
||||
| `user_search.attribute` | no | This setting is deprecated; use `user_search.filter` instead.
|
||||
Specifies the attribute to match with the username presented
|
||||
to. Defaults to `uid`.
|
||||
| `user_search.pool.enabled` | no | Enables or disables connection pooling for user search. When
|
||||
disabled a new connection is created for every search. The
|
||||
|
|
|
@ -8,10 +8,10 @@ users, you can use the _run as_ mechanism to restrict data access according to
|
|||
|
||||
To "run as" (impersonate) another user, you must be able to retrieve the user from
|
||||
the realm you use to authenticate. Both the internal `native` and `file` realms
|
||||
support this out of the box. The LDAP realm however must be configured to run in
|
||||
_user search_ mode. For more information, see
|
||||
<<ldap-user-search, Configuring an LDAP Realm with User Search>>.
|
||||
The Active Directory and PKI realms do not support "run as".
|
||||
support this out of the box. The LDAP realm must be configured to run in
|
||||
<<ldap-user-search, _user search_ mode>>. The Active Directory realm must be
|
||||
<<ad-settings,configured with a `bind_dn` and `bind_password`>> to support _run as_.
|
||||
The PKI realm does not support _run as_.
|
||||
|
||||
To submit requests on behalf of other users, you need to have the `run_as`
|
||||
permission. For example, the following role grants permission to submit request
|
||||
|
|
|
@ -70,9 +70,7 @@ When a user's role enables document level security for an index:
|
|||
Calling certain Elasticsearch APIs on an alias can potentially leak information
|
||||
about indices that the user isn't authorized to access. For example, when you get
|
||||
the mappings for an alias with the `_mapping` API, the response includes the
|
||||
index name and mappings for each index that the alias applies to. Similarly, the
|
||||
response to a `_field_stats` request includes the name of the underlying index,
|
||||
rather than the alias name.
|
||||
index name and mappings for each index that the alias applies to.
|
||||
|
||||
Until this limitation is addressed, avoid index and field names that contain
|
||||
confidential or sensitive information.
|
||||
|
|
|
@ -15,6 +15,12 @@ roles are passed to the remote clusters. A remote cluster checks the user's
|
|||
roles against its local role definitions to determine which indices the user
|
||||
is allowed to access.
|
||||
|
||||
|
||||
[WARNING]
|
||||
This feature was added as Beta in Elasticsearch `v5.3` with further
|
||||
improvements made in 5.4 and 5.5. It requires gateway eligible nodes to be on
|
||||
`v5.5` onwards.
|
||||
|
||||
To use cross cluster search with secured clusters:
|
||||
|
||||
* Install {xpack} on every node in each connected cluster.
|
||||
|
|
|
@ -155,6 +155,7 @@ to `1h`.
|
|||
|
||||
`bind_dn`::
|
||||
The DN of the user that will be used to bind to the LDAP and perform searches.
|
||||
Only applicable in {xpack-ref}/ldap-realm.html#ldap-user-search[user search mode].
|
||||
If this is not specified, an anonymous bind will be attempted.
|
||||
Defaults to Empty.
|
||||
|
||||
|
@ -187,13 +188,19 @@ The scope of the user search. Valid values are `sub_tree`, `one_level` or
|
|||
`base` specifies that the `base_dn` is the user object, and that it is
|
||||
the only user considered. Defaults to `sub_tree`.
|
||||
|
||||
`user_search.filter`::
|
||||
Specifies the filter used to search the directory in attempt to match
|
||||
an entry with the username provided by the user. Defaults to `(uid={0})`.
|
||||
`{0}` is substituted with the username provided when searching.
|
||||
|
||||
`user_search.attribute`::
|
||||
This setting is deprecated; use `user_search.filter` instead.
|
||||
The attribute to match with the username presented to. Defaults to `uid`.
|
||||
|
||||
`user_search.pool.enabled`::
|
||||
Enables or disables connection pooling for user search. When
|
||||
disabled a new connection is created for every search. The
|
||||
default is `true`.
|
||||
default is `true` when `bind_dn` is provided.
|
||||
|
||||
`user_search.pool.size`::
|
||||
The maximum number of connections to the LDAP server to allow in the
|
||||
|
@ -201,7 +208,7 @@ connection pool. Defaults to `20`.
|
|||
|
||||
`user_search.pool.initial_size`::
|
||||
The initial number of connections to create to the LDAP server on startup.
|
||||
Defaults to `5`.
|
||||
Defaults to `0`.
|
||||
|
||||
`user_search.pool.health_check.enabled`::
|
||||
Flag to enable or disable a health check on LDAP connections in the connection
|
||||
|
@ -210,12 +217,13 @@ Defaults to `true`.
|
|||
|
||||
`user_search.pool.health_check.dn`::
|
||||
The distinguished name to be retrieved as part of the health check.
|
||||
Defaults to the value of `bind_dn`. Required if `bind_dn` is not
|
||||
specified.
|
||||
Defaults to the value of `bind_dn` if present, and if
|
||||
not falls back to `user_search.base_dn`.
|
||||
|
||||
`user_search.pool.health_check.interval`::
|
||||
The interval to perform background checks of connections in the pool.
|
||||
Defaults to `60s`.
|
||||
|
||||
`group_search.base_dn`::
|
||||
The container DN to search for groups in which the user has membership. When
|
||||
this element is absent, Security searches for the attribute specified by
|
||||
|
@ -353,6 +361,14 @@ The domain name of Active Directory. The cluster can derive the URL and
|
|||
`user_search_dn` fields from values in this element if those fields are not
|
||||
otherwise specified. Required.
|
||||
|
||||
`bind_dn`::
|
||||
The DN of the user that will be used to bind to Active Directory and perform searches.
|
||||
Defaults to Empty.
|
||||
|
||||
`bind_password`::
|
||||
The password for the user that will be used to bind to Active Directory.
|
||||
Defaults to Empty.
|
||||
|
||||
`unmapped_groups_as_roles`::
|
||||
Takes a boolean variable. When this element is set to `true`, the names of any
|
||||
LDAP groups that are not referenced in a role-mapping _file_ are used as role
|
||||
|
@ -380,12 +396,10 @@ filter looks up `user` objects with either `sAMAccountName` or
|
|||
|
||||
`user_search.upn_filter`::
|
||||
Specifies a filter to use to lookup a user given a user principal name.
|
||||
The default filter looks up `user` objects with either
|
||||
a matching `userPrincipalName` or a `sAMAccountName` matching the account
|
||||
portion of the user principal name. If specified, this
|
||||
The default filter looks up `user` objects with
|
||||
a matching `userPrincipalName`. If specified, this
|
||||
must be a valid LDAP user search filter, for example
|
||||
`(&(objectClass=user)(sAMAccountName={0}))`. `{0}` is the value preceding the
|
||||
`@` sign in the user principal name and `{1}` is the full user principal name
|
||||
`(&(objectClass=user)(userPrincipalName={1}))`. `{1}` is the full user principal name
|
||||
provided by the user.
|
||||
|
||||
`user_search.down_level_filter`::
|
||||
|
@ -395,6 +409,32 @@ Specifies a filter to use to lookup a user given a down level logon name
|
|||
must be a valid LDAP user search filter, for example
|
||||
`(&(objectClass=user)(sAMAccountName={0}))`.
|
||||
|
||||
`user_search.pool.enabled`::
|
||||
Enables or disables connection pooling for user search. When
|
||||
disabled a new connection is created for every search. The
|
||||
default is `true` when `bind_dn` is provided.
|
||||
|
||||
`user_search.pool.size`::
|
||||
The maximum number of connections to the Active Directory server to allow in the
|
||||
connection pool. Defaults to `20`.
|
||||
|
||||
`user_search.pool.initial_size`::
|
||||
The initial number of connections to create to the Active Directory server on startup.
|
||||
Defaults to `0`.
|
||||
|
||||
`user_search.pool.health_check.enabled`::
|
||||
Flag to enable or disable a health check on Active Directory connections in the connection
|
||||
pool. Connections are checked in the background at the specified interval.
|
||||
Defaults to `true`.
|
||||
|
||||
`user_search.pool.health_check.dn`::
|
||||
The distinguished name to be retrieved as part of the health check.
|
||||
Defaults to the value of `bind_dn` if it is a distinguished name.
|
||||
|
||||
`user_search.pool.health_check.interval`::
|
||||
The interval to perform background checks of connections in the pool.
|
||||
Defaults to `60s`.
|
||||
|
||||
`group_search.base_dn`::
|
||||
The context to search for groups in which the user has membership. Defaults
|
||||
to the root of the Active Directory domain.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
=== Watching Event Data
|
||||
|
||||
If you are indexing event data, such as log messages, network traffic, or a web feed, you can create a watch to email notifications when certain events occur.
|
||||
For example, if you index a feed of RSPVs for meetup events happening around the world, you can create a watch that alerts you to interesting events.
|
||||
For example, if you index a feed of RSVPs for meetup events happening around the world, you can create a watch that alerts you to interesting events.
|
||||
|
||||
To index the meetup data, you can use https://www.elastic.co/products/logstash[Logstash] to ingest live data from the Meetup.com streaming API, `http://stream.meetup.com/2/rsvps`.
|
||||
|
||||
|
|
|
@ -28,13 +28,12 @@ dependencyLicenses {
|
|||
mapping from: /bc.*/, to: 'bouncycastle'
|
||||
mapping from: /owasp-java-html-sanitizer.*/, to: 'owasp-java-html-sanitizer'
|
||||
mapping from: /transport-netty.*/, to: 'elasticsearch'
|
||||
mapping from: /rest.*/, to: 'elasticsearch'
|
||||
mapping from: /elasticsearch-rest-client.*/, to: 'elasticsearch'
|
||||
mapping from: /http.*/, to: 'httpclient' // pulled in by rest client
|
||||
mapping from: /commons-.*/, to: 'commons' // pulled in by rest client
|
||||
mapping from: /sniffer.*/, to: 'elasticsearch'
|
||||
ignoreSha 'rest'
|
||||
ignoreSha 'elasticsearch-rest-client'
|
||||
ignoreSha 'transport-netty4'
|
||||
ignoreSha 'sniffer'
|
||||
ignoreSha 'elasticsearch-rest-client-sniffer'
|
||||
}
|
||||
|
||||
licenseHeaders {
|
||||
|
@ -75,8 +74,8 @@ dependencies {
|
|||
testCompile 'com.google.code.findbugs:jsr305:3.0.1'
|
||||
|
||||
// monitoring deps
|
||||
compile "org.elasticsearch.client:rest:${version}"
|
||||
compile "org.elasticsearch.client:sniffer:${version}"
|
||||
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
|
||||
compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}"
|
||||
|
||||
// ml deps
|
||||
compile 'net.sf.supercsv:super-csv:2.4.0'
|
||||
|
@ -206,54 +205,21 @@ integTestCluster {
|
|||
setting 'xpack.monitoring.exporters._local.type', 'local'
|
||||
setting 'xpack.monitoring.exporters._local.enabled', 'false'
|
||||
setting 'xpack.monitoring.collection.interval', '-1'
|
||||
keystoreSetting 'bootstrap.password', 'x-pack-test-password'
|
||||
distribution = 'zip' // this is important since we use the reindex module in ML
|
||||
|
||||
setupCommand 'setupTestUser', 'bin/x-pack/users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'superuser'
|
||||
|
||||
waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
HttpURLConnection httpURLConnection = null;
|
||||
try {
|
||||
httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_xpack/security/user/elastic/_password")
|
||||
.openConnection();
|
||||
httpURLConnection.setRequestProperty("Authorization", "Basic " +
|
||||
Base64.getEncoder().encodeToString("elastic:".getBytes(StandardCharsets.UTF_8)));
|
||||
httpURLConnection.setRequestMethod("PUT");
|
||||
httpURLConnection.setDoOutput(true);
|
||||
httpURLConnection.setRequestProperty("Content-Type", "application/json; charset=UTF-8");
|
||||
|
||||
httpURLConnection.connect();
|
||||
OutputStream out = httpURLConnection.getOutputStream();
|
||||
out.write("{\"password\": \"x-pack-test-password\"}".getBytes(StandardCharsets.UTF_8));
|
||||
out.close()
|
||||
|
||||
if (httpURLConnection.getResponseCode() == 200) {
|
||||
break
|
||||
}
|
||||
} catch (Exception e) {
|
||||
httpURLConnection.disconnect()
|
||||
if (i == 9) {
|
||||
logger.error("final attempt to set elastic password", e)
|
||||
} else {
|
||||
logger.debug("failed to set elastic password", e)
|
||||
}
|
||||
} finally {
|
||||
if (httpURLConnection != null) {
|
||||
httpURLConnection.disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
// did not start, so wait a bit before trying again
|
||||
Thread.sleep(500L);
|
||||
}
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
// we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned
|
||||
HttpURLConnection httpURLConnection = null;
|
||||
try {
|
||||
httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection();
|
||||
httpURLConnection.setRequestProperty("Authorization", "Basic " +
|
||||
Base64.getEncoder().encodeToString("elastic:x-pack-test-password".getBytes(StandardCharsets.UTF_8)));
|
||||
Base64.getEncoder().encodeToString("x_pack_rest_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)));
|
||||
httpURLConnection.setRequestMethod("GET");
|
||||
httpURLConnection.connect();
|
||||
if (httpURLConnection.getResponseCode() == 200) {
|
||||
|
@ -363,4 +329,5 @@ run {
|
|||
setting 'xpack.security.enabled', 'true'
|
||||
setting 'xpack.monitoring.enabled', 'true'
|
||||
setting 'xpack.watcher.enabled', 'true'
|
||||
keystoreSetting 'bootstrap.password', 'password'
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ public class TextTemplate implements ToXContent {
|
|||
if (params == null) {
|
||||
params = new HashMap<>();
|
||||
}
|
||||
this.script = new Script(type, Script.DEFAULT_TEMPLATE_LANG, template, options, params);
|
||||
this.script = new Script(type, type == ScriptType.STORED ? null : Script.DEFAULT_TEMPLATE_LANG, template, options, params);
|
||||
this.inlineTemplate = null;
|
||||
}
|
||||
|
||||
|
@ -116,13 +116,6 @@ public class TextTemplate implements ToXContent {
|
|||
return new TextTemplate(parser.text());
|
||||
} else {
|
||||
Script template = Script.parse(parser, Script.DEFAULT_TEMPLATE_LANG);
|
||||
|
||||
// for deprecation of stored script namespaces the default lang is ignored,
|
||||
// so the template lang must be set for a stored script
|
||||
if (template.getType() == ScriptType.STORED) {
|
||||
template = new Script(ScriptType.STORED, Script.DEFAULT_TEMPLATE_LANG, template.getIdOrCode(), template.getParams());
|
||||
}
|
||||
|
||||
return new TextTemplate(template);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,8 @@ public class TextTemplateEngine extends AbstractComponent {
|
|||
|
||||
options.put(Script.CONTENT_TYPE_OPTION, mediaType);
|
||||
}
|
||||
Script script = new Script(textTemplate.getType(), "mustache", template, options, mergedModel);
|
||||
Script script = new Script(textTemplate.getType(),
|
||||
textTemplate.getType() == ScriptType.STORED ? null : "mustache", template, options, mergedModel);
|
||||
TemplateScript.Factory compiledTemplate = service.compile(script, Watcher.SCRIPT_TEMPLATE_CONTEXT);
|
||||
return compiledTemplate.newInstance(model).execute();
|
||||
}
|
||||
|
|
|
@ -6,15 +6,13 @@
|
|||
package org.elasticsearch.xpack.deprecation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -35,39 +33,27 @@ public class DeprecationChecks {
|
|||
private DeprecationChecks() {
|
||||
}
|
||||
|
||||
static List<BiFunction<List<NodeInfo>, ClusterState, DeprecationIssue>> CLUSTER_SETTINGS_CHECKS =
|
||||
static List<Function<ClusterState, DeprecationIssue>> CLUSTER_SETTINGS_CHECKS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
// STUB: TODO(talevy): add checks
|
||||
// STUB
|
||||
));
|
||||
|
||||
static List<BiFunction<List<NodeInfo>, ClusterState, DeprecationIssue>> NODE_SETTINGS_CHECKS =
|
||||
static List<BiFunction<List<NodeInfo>, List<NodeStats>, DeprecationIssue>> NODE_SETTINGS_CHECKS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
// STUB: TODO(talevy): add checks
|
||||
// STUB
|
||||
));
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static List<Function<IndexMetaData, DeprecationIssue>> INDEX_SETTINGS_CHECKS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
indexMetaData -> {
|
||||
List<String> issues = new ArrayList<>();
|
||||
if (indexMetaData.getCreationVersion().onOrBefore(Version.V_5_6_0)) {
|
||||
for (ObjectCursor<MappingMetaData> mappingMetaData : indexMetaData.getMappings().values()) {
|
||||
Map<String, Object> sourceAsMap = mappingMetaData.value.sourceAsMap();
|
||||
((Map<String, Object>) sourceAsMap.getOrDefault("properties", Collections.emptyMap()))
|
||||
.forEach((key, value) -> {
|
||||
Map<String, Object> valueMap = ((Map<String, Object>) value);
|
||||
if ("boolean".equals(valueMap.get("type"))) {
|
||||
issues.add("type: " + mappingMetaData.value.type() + ", field: " + key);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
return new DeprecationIssue(DeprecationIssue.Level.INFO, "Coercion of boolean fields",
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/master/" +
|
||||
"breaking_60_mappings_changes.html#_coercion_of_boolean_fields",
|
||||
Arrays.toString(issues.toArray()));
|
||||
}
|
||||
));
|
||||
IndexDeprecationChecks::allMetaFieldIsDisabledByDefaultCheck,
|
||||
IndexDeprecationChecks::baseSimilarityDefinedCheck,
|
||||
IndexDeprecationChecks::coercionCheck,
|
||||
IndexDeprecationChecks::dynamicTemplateWithMatchMappingTypeCheck,
|
||||
IndexDeprecationChecks::includeInAllCheck,
|
||||
IndexDeprecationChecks::indexSharedFileSystemCheck,
|
||||
IndexDeprecationChecks::indexStoreTypeCheck,
|
||||
IndexDeprecationChecks::storeThrottleSettingsCheck));
|
||||
|
||||
/**
|
||||
* helper utility function to reduce repeat of running a specific {@link Set} of checks.
|
||||
|
|
|
@ -6,12 +6,17 @@
|
|||
package org.elasticsearch.xpack.deprecation;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||
|
@ -34,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.license.LicenseUtils;
|
||||
import org.elasticsearch.license.XPackLicenseState;
|
||||
import org.elasticsearch.node.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.XPackPlugin;
|
||||
|
@ -153,6 +159,7 @@ public class DeprecationInfoAction extends Action<DeprecationInfoAction.Request,
|
|||
* cluster.
|
||||
*
|
||||
* @param nodesInfo The list of {@link NodeInfo} metadata objects for retrieving node-level information
|
||||
* @param nodesStats The list of {@link NodeStats} metadata objects for retrieving node-level information
|
||||
* @param state The cluster state
|
||||
* @param indexNameExpressionResolver Used to resolve indices into their concrete names
|
||||
* @param indices The list of index expressions to evaluate using `indexNameExpressionResolver`
|
||||
|
@ -163,16 +170,16 @@ public class DeprecationInfoAction extends Action<DeprecationInfoAction.Request,
|
|||
* concrete indices
|
||||
* @return The list of deprecation issues found in the cluster
|
||||
*/
|
||||
static DeprecationInfoAction.Response from(List<NodeInfo> nodesInfo, ClusterState state,
|
||||
static DeprecationInfoAction.Response from(List<NodeInfo> nodesInfo, List<NodeStats> nodesStats, ClusterState state,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
String[] indices, IndicesOptions indicesOptions,
|
||||
List<BiFunction<List<NodeInfo>, ClusterState,DeprecationIssue>>clusterSettingsChecks,
|
||||
List<BiFunction<List<NodeInfo>, ClusterState, DeprecationIssue>> nodeSettingsChecks,
|
||||
List<Function<ClusterState,DeprecationIssue>>clusterSettingsChecks,
|
||||
List<BiFunction<List<NodeInfo>, List<NodeStats>, DeprecationIssue>> nodeSettingsChecks,
|
||||
List<Function<IndexMetaData, DeprecationIssue>> indexSettingsChecks) {
|
||||
List<DeprecationIssue> clusterSettingsIssues = filterChecks(clusterSettingsChecks,
|
||||
(c) -> c.apply(nodesInfo, state));
|
||||
(c) -> c.apply(state));
|
||||
List<DeprecationIssue> nodeSettingsIssues = filterChecks(nodeSettingsChecks,
|
||||
(c) -> c.apply(nodesInfo, state));
|
||||
(c) -> c.apply(nodesInfo, nodesStats));
|
||||
|
||||
String[] concreteIndexNames = indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, indices);
|
||||
|
||||
|
@ -305,17 +312,24 @@ public class DeprecationInfoAction extends Action<DeprecationInfoAction.Request,
|
|||
protected final void masterOperation(final Request request, ClusterState state, final ActionListener<Response> listener) {
|
||||
if (licenseState.isDeprecationAllowed()) {
|
||||
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest("_local").settings(true).plugins(true);
|
||||
client.admin().cluster().nodesInfo(nodesInfoRequest, ActionListener.wrap(nodesInfoResponse -> {
|
||||
// if there's a failure, then we failed to work with the
|
||||
// _local node (guaranteed a single exception)
|
||||
if (nodesInfoResponse.hasFailures()) {
|
||||
throw nodesInfoResponse.failures().get(0);
|
||||
}
|
||||
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("_local").fs(true);
|
||||
|
||||
listener.onResponse(Response.from(nodesInfoResponse.getNodes(), state,
|
||||
indexNameExpressionResolver, request.indices(), request.indicesOptions(),
|
||||
CLUSTER_SETTINGS_CHECKS, NODE_SETTINGS_CHECKS, INDEX_SETTINGS_CHECKS));
|
||||
}, listener::onFailure));
|
||||
client.admin().cluster().nodesInfo(nodesInfoRequest, ActionListener.wrap(
|
||||
nodesInfoResponse -> {
|
||||
if (nodesInfoResponse.hasFailures()) {
|
||||
throw nodesInfoResponse.failures().get(0);
|
||||
}
|
||||
client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.wrap(
|
||||
nodesStatsResponse -> {
|
||||
if (nodesStatsResponse.hasFailures()) {
|
||||
throw nodesStatsResponse.failures().get(0);
|
||||
}
|
||||
listener.onResponse(Response.from(nodesInfoResponse.getNodes(),
|
||||
nodesStatsResponse.getNodes(), state, indexNameExpressionResolver,
|
||||
request.indices(), request.indicesOptions(), CLUSTER_SETTINGS_CHECKS,
|
||||
NODE_SETTINGS_CHECKS, INDEX_SETTINGS_CHECKS));
|
||||
}, listener::onFailure));
|
||||
},listener::onFailure));
|
||||
} else {
|
||||
listener.onFailure(LicenseUtils.newComplianceException(XPackPlugin.DEPRECATION));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,241 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.deprecation;
|
||||
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.AllFieldMapper;
|
||||
import org.elasticsearch.index.mapper.DynamicTemplate;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Index-specific deprecation checks
|
||||
*/
|
||||
public class IndexDeprecationChecks {
|
||||
|
||||
private static void fieldLevelMappingIssue(IndexMetaData indexMetaData, BiConsumer<MappingMetaData, Map<String, Object>> checker) {
|
||||
for (ObjectCursor<MappingMetaData> mappingMetaData : indexMetaData.getMappings().values()) {
|
||||
Map<String, Object> sourceAsMap = mappingMetaData.value.sourceAsMap();
|
||||
checker.accept(mappingMetaData.value, sourceAsMap);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iterates through the "properties" field of mappings and returns any predicates that match in the
|
||||
* form of issue-strings.
|
||||
*
|
||||
* @param type the document type
|
||||
* @param parentMap the mapping to read properties from
|
||||
* @param predicate the predicate to check against for issues, issue is returned if predicate evaluates to true
|
||||
* @return a list of issues found in fields
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private static List<String> findInPropertiesRecursively(String type, Map<String, Object> parentMap,
|
||||
Function<Map<?,?>, Boolean> predicate) {
|
||||
List<String> issues = new ArrayList<>();
|
||||
Map<?, ?> properties = (Map<?, ?>) parentMap.get("properties");
|
||||
if (properties == null) {
|
||||
return issues;
|
||||
}
|
||||
for (Map.Entry<?, ?> entry : properties.entrySet()) {
|
||||
Map<String, Object> valueMap = (Map<String, Object>) entry.getValue();
|
||||
if (predicate.apply(valueMap)) {
|
||||
issues.add("[type: " + type + ", field: " + entry.getKey() + "]");
|
||||
}
|
||||
|
||||
Map<?, ?> values = (Map<?, ?>) valueMap.get("fields");
|
||||
if (values != null) {
|
||||
for (Map.Entry<?, ?> multifieldEntry : values.entrySet()) {
|
||||
Map<String, Object> multifieldValueMap = (Map<String, Object>) multifieldEntry.getValue();
|
||||
if (predicate.apply(multifieldValueMap)) {
|
||||
issues.add("[type: " + type + ", field: " + entry.getKey() + ", multifield: " + multifieldEntry.getKey() + "]");
|
||||
}
|
||||
if (multifieldValueMap.containsKey("properties")) {
|
||||
issues.addAll(findInPropertiesRecursively(type, multifieldValueMap, predicate));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (valueMap.containsKey("properties")) {
|
||||
issues.addAll(findInPropertiesRecursively(type, valueMap, predicate));
|
||||
}
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
static DeprecationIssue coercionCheck(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
List<String> issues = new ArrayList<>();
|
||||
fieldLevelMappingIssue(indexMetaData, (mappingMetaData, sourceAsMap) -> {
|
||||
issues.addAll(findInPropertiesRecursively(mappingMetaData.type(), sourceAsMap,
|
||||
property -> "boolean".equals(property.get("type"))));
|
||||
});
|
||||
if (issues.size() > 0) {
|
||||
return new DeprecationIssue(DeprecationIssue.Level.INFO, "Coercion of boolean fields",
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/master/" +
|
||||
"breaking_60_mappings_changes.html#_coercion_of_boolean_fields",
|
||||
issues.toString());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static DeprecationIssue allMetaFieldIsDisabledByDefaultCheck(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
List<String> issues = new ArrayList<>();
|
||||
fieldLevelMappingIssue(indexMetaData, (mappingMetaData, sourceAsMap) -> {
|
||||
Map<String, Object> allMetaData = (Map<String, Object>) sourceAsMap.getOrDefault("_all", Collections.emptyMap());
|
||||
Object enabledObj = allMetaData.get("enabled");
|
||||
if (enabledObj != null) {
|
||||
enabledObj = Booleans.parseBooleanLenient(enabledObj.toString(),
|
||||
AllFieldMapper.Defaults.ENABLED.enabled);
|
||||
}
|
||||
if (Boolean.TRUE.equals(enabledObj)) {
|
||||
issues.add(mappingMetaData.type());
|
||||
}
|
||||
});
|
||||
if (issues.size() > 0) {
|
||||
return new DeprecationIssue(DeprecationIssue.Level.INFO,
|
||||
"The _all meta field is disabled by default on indices created in 6.0",
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/master/" +
|
||||
"breaking_60_mappings_changes.html#_the_literal__all_literal_meta_field_is_now_disabled_by_default",
|
||||
"types: " + issues.toString());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
static DeprecationIssue includeInAllCheck(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
List<String> issues = new ArrayList<>();
|
||||
fieldLevelMappingIssue(indexMetaData, (mappingMetaData, sourceAsMap) -> {
|
||||
issues.addAll(findInPropertiesRecursively(mappingMetaData.type(), sourceAsMap,
|
||||
property -> property.containsKey("include_in_all")));
|
||||
});
|
||||
if (issues.size() > 0) {
|
||||
return new DeprecationIssue(DeprecationIssue.Level.CRITICAL,
|
||||
"The [include_in_all] mapping parameter is now disallowed",
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/master/" +
|
||||
"breaking_60_mappings_changes.html#_the_literal_include_in_all_literal_mapping_parameter_is_now_disallowed",
|
||||
issues.toString());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
static DeprecationIssue dynamicTemplateWithMatchMappingTypeCheck(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
List<String> issues = new ArrayList<>();
|
||||
fieldLevelMappingIssue(indexMetaData, (mappingMetaData, sourceAsMap) -> {
|
||||
List<?> dynamicTemplates = (List<?>) mappingMetaData
|
||||
.getSourceAsMap().getOrDefault("dynamic_templates", Collections.emptyList());
|
||||
for (Object template : dynamicTemplates) {
|
||||
for (Map.Entry<?, ?> prop : ((Map<?, ?>) template).entrySet()) {
|
||||
Map<?, ?> val = (Map<?, ?>) prop.getValue();
|
||||
if (val.containsKey("match_mapping_type")) {
|
||||
Object mappingMatchType = val.get("match_mapping_type");
|
||||
boolean isValidMatchType = Arrays.stream(DynamicTemplate.XContentFieldType.values())
|
||||
.anyMatch(v -> v.toString().equals(mappingMatchType));
|
||||
if (isValidMatchType == false) {
|
||||
issues.add("type: " + mappingMetaData.type() + ", dynamicFieldDefinition"
|
||||
+ prop.getKey() + ", unknown match_mapping_type[" + mappingMatchType + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
if (issues.size() > 0) {
|
||||
return new DeprecationIssue(DeprecationIssue.Level.CRITICAL,
|
||||
"Unrecognized match_mapping_type options not silently ignored",
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/master/" +
|
||||
"breaking_60_mappings_changes.html" +
|
||||
"#_unrecognized_literal_match_mapping_type_literal_options_not_silently_ignored",
|
||||
issues.toString());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static DeprecationIssue baseSimilarityDefinedCheck(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
Settings settings = indexMetaData.getSettings().getAsSettings("index.similarity.base");
|
||||
if (settings.size() > 0) {
|
||||
return new DeprecationIssue(DeprecationIssue.Level.WARNING,
|
||||
"The base similarity is now ignored as coords and query normalization have been removed." +
|
||||
"If provided, this setting will be ignored and issue a deprecation warning",
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/master/" +
|
||||
"breaking_60_settings_changes.html#_similarity_settings", null);
|
||||
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static DeprecationIssue indexStoreTypeCheck(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1) &&
|
||||
indexMetaData.getSettings().get("index.store.type") != null) {
|
||||
return new DeprecationIssue(DeprecationIssue.Level.CRITICAL,
|
||||
"The default index.store.type has been removed. If you were using it, " +
|
||||
"we advise that you simply remove it from your index settings and Elasticsearch" +
|
||||
"will use the best store implementation for your operating system.",
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/master/" +
|
||||
"breaking_60_settings_changes.html#_store_settings", null);
|
||||
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static DeprecationIssue storeThrottleSettingsCheck(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
Settings settings = indexMetaData.getSettings();
|
||||
Settings throttleSettings = settings.getAsSettings("index.store.throttle");
|
||||
ArrayList<String> foundSettings = new ArrayList<>();
|
||||
if (throttleSettings.get("max_bytes_per_sec") != null) {
|
||||
foundSettings.add("index.store.throttle.max_bytes_per_sec");
|
||||
}
|
||||
if (throttleSettings.get("type") != null) {
|
||||
foundSettings.add("index.store.throttle.type");
|
||||
}
|
||||
|
||||
if (foundSettings.isEmpty() == false) {
|
||||
return new DeprecationIssue(DeprecationIssue.Level.CRITICAL,
|
||||
"index.store.throttle settings are no longer recognized. these settings should be removed",
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/master/" +
|
||||
"breaking_60_settings_changes.html#_store_throttling_settings", "present settings: " + foundSettings);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static DeprecationIssue indexSharedFileSystemCheck(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1) &&
|
||||
indexMetaData.getSettings().get("index.shared_filesystem") != null) {
|
||||
return new DeprecationIssue(DeprecationIssue.Level.CRITICAL,
|
||||
"[index.shared_filesystem] setting should be removed",
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/master/" +
|
||||
"breaking_60_settings_changes.html#_shadow_replicas_have_been_removed", null);
|
||||
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -25,16 +25,16 @@ import org.elasticsearch.index.query.BoolQueryBuilder;
|
|||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.license.LicenseUtils;
|
||||
import org.elasticsearch.license.XPackLicenseState;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.sampler.Sampler;
|
||||
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms;
|
||||
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
package org.elasticsearch.xpack.ml.action;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.ResourceAlreadyExistsException;
|
||||
|
@ -27,12 +28,14 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.metadata.AliasOrIndex;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.CheckedSupplier;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -491,9 +494,12 @@ public class OpenJobAction extends Action<OpenJobAction.Request, OpenJobAction.R
|
|||
}
|
||||
String[] concreteIndices = aliasOrIndex.getIndices().stream().map(IndexMetaData::getIndex).map(Index::getName)
|
||||
.toArray(String[]::new);
|
||||
if (state.metaData().findMappings(concreteIndices, new String[] {ElasticsearchMappings.DOC_TYPE}).isEmpty()) {
|
||||
|
||||
String[] indicesThatRequireAnUpdate = mappingRequiresUpdate(state, concreteIndices, Version.CURRENT, logger);
|
||||
|
||||
if (indicesThatRequireAnUpdate.length > 0) {
|
||||
try (XContentBuilder mapping = mappingSupplier.get()) {
|
||||
PutMappingRequest putMappingRequest = new PutMappingRequest(concreteIndices);
|
||||
PutMappingRequest putMappingRequest = new PutMappingRequest(indicesThatRequireAnUpdate);
|
||||
putMappingRequest.type(ElasticsearchMappings.DOC_TYPE);
|
||||
putMappingRequest.source(mapping);
|
||||
client.execute(PutMappingAction.INSTANCE, putMappingRequest, ActionListener.wrap(
|
||||
|
@ -502,13 +508,14 @@ public class OpenJobAction extends Action<OpenJobAction.Request, OpenJobAction.R
|
|||
listener.onResponse(true);
|
||||
} else {
|
||||
listener.onFailure(new ElasticsearchException("Attempt to put missing mapping in indices "
|
||||
+ Arrays.toString(concreteIndices) + " was not acknowledged"));
|
||||
+ Arrays.toString(indicesThatRequireAnUpdate) + " was not acknowledged"));
|
||||
}
|
||||
}, listener::onFailure));
|
||||
} catch (IOException e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
} else {
|
||||
logger.trace("Mappings are uptodate.");
|
||||
listener.onResponse(true);
|
||||
}
|
||||
}
|
||||
|
@ -747,4 +754,51 @@ public class OpenJobAction extends Action<OpenJobAction.Request, OpenJobAction.R
|
|||
static boolean nodeSupportsJobVersion(Version nodeVersion, Version jobVersion) {
|
||||
return nodeVersion.onOrAfter(Version.V_5_5_0);
|
||||
}
|
||||
|
||||
static String[] mappingRequiresUpdate(ClusterState state, String[] concreteIndices, Version minVersion, Logger logger) {
|
||||
List<String> indicesToUpdate = new ArrayList<>();
|
||||
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> currentMapping = state.metaData().findMappings(concreteIndices,
|
||||
new String[] { ElasticsearchMappings.DOC_TYPE });
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
ImmutableOpenMap<String, MappingMetaData> innerMap = currentMapping.get(index);
|
||||
if (innerMap != null) {
|
||||
MappingMetaData metaData = innerMap.get(ElasticsearchMappings.DOC_TYPE);
|
||||
try {
|
||||
Map<String, Object> meta = (Map<String, Object>) metaData.sourceAsMap().get("_meta");
|
||||
if (meta != null) {
|
||||
String versionString = (String) meta.get("version");
|
||||
if (versionString == null) {
|
||||
logger.info("Version of mappings for [{}] not found, recreating", index);
|
||||
indicesToUpdate.add(index);
|
||||
continue;
|
||||
}
|
||||
|
||||
Version mappingVersion = Version.fromString(versionString);
|
||||
|
||||
if (mappingVersion.onOrAfter(minVersion)) {
|
||||
continue;
|
||||
} else {
|
||||
logger.info("Mappings for [{}] are outdated [{}], updating it[{}].", index, mappingVersion, Version.CURRENT);
|
||||
indicesToUpdate.add(index);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
logger.info("Version of mappings for [{}] not found, recreating", index);
|
||||
indicesToUpdate.add(index);
|
||||
continue;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("Failed to retrieve mapping version for [{}], recreating", index), e);
|
||||
indicesToUpdate.add(index);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
logger.info("No mappings found for [{}], recreating", index);
|
||||
indicesToUpdate.add(index);
|
||||
}
|
||||
}
|
||||
return indicesToUpdate.toArray(new String[indicesToUpdate.size()]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,8 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
|
@ -191,13 +193,17 @@ public class JobManager extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (e instanceof IllegalArgumentException
|
||||
&& e.getMessage().matches("mapper \\[.*\\] of different type, current_type \\[.*\\], merged_type \\[.*\\]")) {
|
||||
actionListener.onFailure(ExceptionsHelper.badRequestException(Messages.JOB_CONFIG_MAPPING_TYPE_CLASH, e));
|
||||
} else {
|
||||
actionListener.onFailure(e);
|
||||
if (e instanceof IllegalArgumentException) {
|
||||
// the underlying error differs depending on which way around the clashing fields are seen
|
||||
Matcher matcher = Pattern.compile("(?:mapper|Can't merge a non object mapping) \\[(.*)\\] (?:of different type, " +
|
||||
"current_type \\[.*\\], merged_type|with an object mapping) \\[.*\\]").matcher(e.getMessage());
|
||||
if (matcher.matches()) {
|
||||
String msg = Messages.getMessage(Messages.JOB_CONFIG_MAPPING_TYPE_CLASH, matcher.group(1));
|
||||
actionListener.onFailure(ExceptionsHelper.badRequestException(msg, e));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
actionListener.onFailure(e);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -601,8 +601,8 @@ public class AnalysisConfig implements ToXContentObject, Writeable {
|
|||
String prevTermField = null;
|
||||
for (String termField : termFields) {
|
||||
if (prevTermField != null && termField.startsWith(prevTermField + ".")) {
|
||||
throw ExceptionsHelper.badRequestException("Fields " + prevTermField + " and " + termField +
|
||||
" cannot both be used in the same analysis_config");
|
||||
throw ExceptionsHelper.badRequestException("Fields [" + prevTermField + "] and [" + termField +
|
||||
"] cannot both be used in the same analysis_config");
|
||||
}
|
||||
prevTermField = termField;
|
||||
}
|
||||
|
|
|
@ -135,8 +135,7 @@ public final class Messages {
|
|||
public static final String JOB_CONFIG_DETECTOR_OVER_DISALLOWED =
|
||||
"''over'' is not a permitted value for {0}";
|
||||
public static final String JOB_CONFIG_MAPPING_TYPE_CLASH =
|
||||
"A field has a different mapping type to an existing field with the same name. " +
|
||||
"Use the 'results_index_name' setting to assign the job to another index";
|
||||
"This job would cause a mapping clash with existing field [{0}] - avoid the clash by assigning a dedicated results index";
|
||||
public static final String JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG =
|
||||
"data_description.time_field may not be used in the analysis_config";
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ml.job.persistence;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.ml.job.config.Detector;
|
||||
import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
|
@ -111,10 +112,24 @@ public class ElasticsearchMappings {
|
|||
.endArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* Inserts "_meta" containing useful information like the version into the mapping
|
||||
* template.
|
||||
*
|
||||
* @param builder The builder for the mappings
|
||||
* @throws IOException On write error
|
||||
*/
|
||||
public static void addMetaInformation(XContentBuilder builder) throws IOException {
|
||||
builder.startObject("_meta")
|
||||
.field("version", Version.CURRENT)
|
||||
.endObject();
|
||||
}
|
||||
|
||||
public static XContentBuilder docMapping() throws IOException {
|
||||
XContentBuilder builder = jsonBuilder();
|
||||
builder.startObject();
|
||||
builder.startObject(DOC_TYPE);
|
||||
addMetaInformation(builder);
|
||||
addDefaultMapping(builder);
|
||||
builder.startObject(PROPERTIES);
|
||||
|
||||
|
@ -523,12 +538,15 @@ public class ElasticsearchMappings {
|
|||
* by knowing the ID of a particular document.
|
||||
*/
|
||||
public static XContentBuilder stateMapping() throws IOException {
|
||||
return jsonBuilder()
|
||||
.startObject()
|
||||
.startObject(DOC_TYPE)
|
||||
.field(ENABLED, false)
|
||||
.endObject()
|
||||
.endObject();
|
||||
XContentBuilder builder = jsonBuilder();
|
||||
builder.startObject();
|
||||
builder.startObject(DOC_TYPE);
|
||||
addMetaInformation(builder);
|
||||
builder.field(ENABLED, false);
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ml.job.persistence;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
|
@ -24,7 +25,6 @@ import org.elasticsearch.index.query.TermQueryBuilder;
|
|||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryAction;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
|
||||
import org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
@ -33,7 +33,10 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.state.CategorizerState;
|
|||
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class JobStorageDeletionTask extends Task {
|
||||
|
@ -176,26 +179,31 @@ public class JobStorageDeletionTask extends Task {
|
|||
private void deleteAliases(String jobId, Client client, ActionListener<Boolean> finishedHandler) {
|
||||
final String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId);
|
||||
final String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(jobId);
|
||||
final String indexPattern = AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*";
|
||||
|
||||
IndicesAliasesRequest request = new IndicesAliasesRequest().addAliasAction(
|
||||
IndicesAliasesRequest.AliasActions.remove().aliases(readAliasName, writeAliasName).indices(indexPattern));
|
||||
client.admin().indices().aliases(request, ActionListener.wrap(
|
||||
response -> finishedHandler.onResponse(true),
|
||||
e -> {
|
||||
if (e instanceof AliasesNotFoundException) {
|
||||
logger.warn("[{}] Aliases {} not found. Continuing to delete job.", jobId,
|
||||
((AliasesNotFoundException) e).getResourceId());
|
||||
// first find the concrete indices associated with the aliases
|
||||
GetAliasesRequest aliasesRequest = new GetAliasesRequest().aliases(readAliasName, writeAliasName)
|
||||
.indicesOptions(IndicesOptions.lenientExpandOpen());
|
||||
client.admin().indices().getAliases(aliasesRequest, ActionListener.wrap(
|
||||
getAliasesResponse -> {
|
||||
Set<String> aliases = new HashSet<>();
|
||||
getAliasesResponse.getAliases().valuesIt().forEachRemaining(
|
||||
metaDataList -> metaDataList.forEach(metadata -> aliases.add(metadata.getAlias())));
|
||||
if (aliases.isEmpty()) {
|
||||
// don't error if the job's aliases have already been deleted - carry on and delete the rest of the job's data
|
||||
finishedHandler.onResponse(true);
|
||||
} else if (e instanceof IndexNotFoundException) {
|
||||
logger.warn("[{}] Index [{}] referenced by alias not found. Continuing to delete job.", jobId,
|
||||
((IndexNotFoundException) e).getIndex().getName());
|
||||
finishedHandler.onResponse(true);
|
||||
} else {
|
||||
// all other exceptions should die
|
||||
logger.error("[" + jobId + "] Failed to delete aliases [" + readAliasName + ", " + writeAliasName + "].", e);
|
||||
finishedHandler.onFailure(e);
|
||||
return;
|
||||
}
|
||||
}));
|
||||
List<String> indices = new ArrayList<>();
|
||||
getAliasesResponse.getAliases().keysIt().forEachRemaining(indices::add);
|
||||
// remove the aliases from the concrete indices found in the first step
|
||||
IndicesAliasesRequest removeRequest = new IndicesAliasesRequest().addAliasAction(
|
||||
IndicesAliasesRequest.AliasActions.remove()
|
||||
.aliases(aliases.toArray(new String[aliases.size()]))
|
||||
.indices(indices.toArray(new String[indices.size()])));
|
||||
client.admin().indices().aliases(removeRequest, ActionListener.wrap(
|
||||
removeResponse -> finishedHandler.onResponse(true),
|
||||
finishedHandler::onFailure));
|
||||
},
|
||||
finishedHandler::onFailure));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,6 @@ public class NativeController {
|
|||
|
||||
private final CppLogMessageHandler cppLogHandler;
|
||||
private final OutputStream commandStream;
|
||||
private Thread logTailThread;
|
||||
|
||||
NativeController(Environment env, NamedPipeHelper namedPipeHelper) throws IOException {
|
||||
ProcessPipes processPipes = new ProcessPipes(env, namedPipeHelper, ProcessCtrl.CONTROLLER, null,
|
||||
|
@ -61,15 +60,22 @@ public class NativeController {
|
|||
}
|
||||
|
||||
void tailLogsInThread() {
|
||||
logTailThread = new Thread(() -> {
|
||||
try {
|
||||
cppLogHandler.tailStream();
|
||||
cppLogHandler.close();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Error tailing C++ controller logs", e);
|
||||
}
|
||||
LOGGER.info("Native controller process has stopped - no new native processes can be started");
|
||||
});
|
||||
final Thread logTailThread = new Thread(
|
||||
() -> {
|
||||
try {
|
||||
cppLogHandler.tailStream();
|
||||
cppLogHandler.close();
|
||||
} catch (IOException e) {
|
||||
LOGGER.error("Error tailing C++ controller logs", e);
|
||||
}
|
||||
LOGGER.info("Native controller process has stopped - no new native processes can be started");
|
||||
},
|
||||
"ml-cpp-log-tail-thread");
|
||||
/*
|
||||
* This thread is created on the main thread so would default to being a user thread which could prevent the JVM from exiting if
|
||||
* this thread were to still be running during shutdown. As such, we mark it as a daemon thread.
|
||||
*/
|
||||
logTailThread.setDaemon(true);
|
||||
logTailThread.start();
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ public class ClusterAlertsUtil {
|
|||
* The last time that all watches were updated. For now, all watches have been updated in the same version and should all be replaced
|
||||
* together.
|
||||
*/
|
||||
public static final int LAST_UPDATED_VERSION = Version.V_6_0_0_alpha2.id;
|
||||
public static final int LAST_UPDATED_VERSION = Version.V_6_0_0_beta1.id;
|
||||
|
||||
/**
|
||||
* An unsorted list of Watch IDs representing resource files for Monitoring Cluster Alerts.
|
||||
|
|
|
@ -24,7 +24,7 @@ public final class MonitoringTemplateUtils {
|
|||
* <p>
|
||||
* It may be possible for this to diverge between templates and pipelines, but for now they're the same.
|
||||
*/
|
||||
public static final int LAST_UPDATED_VERSION = Version.V_6_0_0_alpha2.id;
|
||||
public static final int LAST_UPDATED_VERSION = Version.V_6_0_0_beta1.id;
|
||||
|
||||
/**
|
||||
* Current version of templates used in their name to differentiate from breaking changes (separate from product version).
|
||||
|
|
|
@ -5,6 +5,25 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.time.Clock;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.function.UnaryOperator;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -35,6 +54,7 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -128,8 +148,8 @@ import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore;
|
|||
import org.elasticsearch.xpack.security.authz.store.FileRolesStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.ReservedRolesStore;
|
||||
import org.elasticsearch.xpack.security.bootstrap.BootstrapElasticPassword;
|
||||
import org.elasticsearch.xpack.security.bootstrap.ContainerPasswordBootstrapCheck;
|
||||
import org.elasticsearch.xpack.security.crypto.CryptoService;
|
||||
import org.elasticsearch.xpack.security.rest.SecurityRestFilter;
|
||||
import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction;
|
||||
import org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction;
|
||||
|
@ -159,28 +179,8 @@ import org.elasticsearch.xpack.ssl.SSLService;
|
|||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.time.Clock;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.function.UnaryOperator;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.common.settings.Setting.groupSetting;
|
||||
import static org.elasticsearch.xpack.XPackSettings.HTTP_SSL_ENABLED;
|
||||
|
||||
public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
|
||||
|
@ -323,8 +323,8 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
|
|||
final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore,
|
||||
anonymousUser, securityLifecycleService, threadPool.getThreadContext());
|
||||
Map<String, Realm.Factory> realmFactories = new HashMap<>();
|
||||
realmFactories.putAll(InternalRealms.getFactories(threadPool, resourceWatcherService,
|
||||
sslService, nativeUsersStore, nativeRoleMappingStore));
|
||||
realmFactories.putAll(InternalRealms.getFactories(threadPool, resourceWatcherService, sslService, nativeUsersStore,
|
||||
nativeRoleMappingStore, securityLifecycleService));
|
||||
for (XPackExtension extension : extensions) {
|
||||
Map<String, Realm.Factory> newRealms = extension.getRealms(resourceWatcherService);
|
||||
for (Map.Entry<String, Realm.Factory> entry : newRealms.entrySet()) {
|
||||
|
@ -369,6 +369,7 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
|
|||
}
|
||||
final CompositeRolesStore allRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore,
|
||||
reservedRolesStore, rolesProviders, threadPool.getThreadContext(), licenseState);
|
||||
securityLifecycleService.addSecurityIndexHealthChangeListener(allRolesStore::onSecurityIndexHealthChange);
|
||||
// to keep things simple, just invalidate all cached entries on license change. this happens so rarely that the impact should be
|
||||
// minimal
|
||||
licenseState.addListener(allRolesStore::invalidateAll);
|
||||
|
@ -386,6 +387,11 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
|
|||
DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterService.getClusterSettings());
|
||||
securityInterceptor.set(new SecurityServerTransportInterceptor(settings, threadPool, authcService.get(), authzService, licenseState,
|
||||
sslService, securityContext.get(), destructiveOperations));
|
||||
|
||||
BootstrapElasticPassword bootstrapElasticPassword = new BootstrapElasticPassword(settings, logger, clusterService, reservedRealm,
|
||||
securityLifecycleService);
|
||||
bootstrapElasticPassword.initiatePasswordBootstrap();
|
||||
|
||||
return components;
|
||||
}
|
||||
|
||||
|
@ -492,13 +498,15 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
|
|||
|
||||
public List<BootstrapCheck> getBootstrapChecks() {
|
||||
if (enabled) {
|
||||
return Arrays.asList(
|
||||
new SSLBootstrapCheck(sslService, settings, env),
|
||||
new TokenPassphraseBootstrapCheck(settings),
|
||||
new TokenSSLBootstrapCheck(settings),
|
||||
new PkiRealmBootstrapCheck(settings, sslService),
|
||||
new ContainerPasswordBootstrapCheck()
|
||||
final ArrayList<BootstrapCheck> checks = CollectionUtils.arrayAsArrayList(
|
||||
new SSLBootstrapCheck(sslService, settings, env),
|
||||
new TokenPassphraseBootstrapCheck(settings),
|
||||
new TokenSSLBootstrapCheck(settings),
|
||||
new PkiRealmBootstrapCheck(settings, sslService),
|
||||
new ContainerPasswordBootstrapCheck()
|
||||
);
|
||||
checks.addAll(InternalRealms.getBootstrapChecks(settings));
|
||||
return checks;
|
||||
} else {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
|
|
@ -183,7 +183,7 @@ public class SecurityFeatureSet implements XPackFeatureSet {
|
|||
sslUsage = in.readMap();
|
||||
auditUsage = in.readMap();
|
||||
ipFilterUsage = in.readMap();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha3)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_beta1)) {
|
||||
// system key has been removed but older send its usage, so read the map and ignore
|
||||
in.readMap();
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ public class SecurityFeatureSet implements XPackFeatureSet {
|
|||
out.writeMap(sslUsage);
|
||||
out.writeMap(auditUsage);
|
||||
out.writeMap(ipFilterUsage);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha3)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_beta1)) {
|
||||
// system key has been removed but older versions still expected it so send a empty map
|
||||
out.writeMap(Collections.emptyMap());
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.component.LifecycleListener;
|
||||
|
@ -26,6 +27,7 @@ import org.elasticsearch.xpack.security.support.IndexLifecycleManager;
|
|||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
|
@ -147,6 +149,15 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
|
|||
return securityIndex.checkMappingVersion(requiredVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a listener which will be notified when the security index health changes. The previous and
|
||||
* current health will be provided to the listener so that the listener can determine if any action
|
||||
* needs to be taken.
|
||||
*/
|
||||
public void addSecurityIndexHealthChangeListener(BiConsumer<ClusterIndexHealth, ClusterIndexHealth> listener) {
|
||||
securityIndex.addIndexHealthChangeListener(listener);
|
||||
}
|
||||
|
||||
// this is called in a lifecycle listener beforeStop on the cluster service
|
||||
private void close() {
|
||||
if (indexAuditTrail != null) {
|
||||
|
|
|
@ -10,7 +10,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.xpack.XPackSettings;
|
||||
import org.elasticsearch.xpack.security.action.filter.SecurityActionFilter;
|
||||
import org.elasticsearch.xpack.security.action.interceptor.BulkShardRequestInterceptor;
|
||||
import org.elasticsearch.xpack.security.action.interceptor.FieldStatsRequestInterceptor;
|
||||
import org.elasticsearch.xpack.security.action.interceptor.RequestInterceptor;
|
||||
import org.elasticsearch.xpack.security.action.interceptor.SearchRequestInterceptor;
|
||||
import org.elasticsearch.xpack.security.action.interceptor.UpdateRequestInterceptor;
|
||||
|
@ -33,7 +32,6 @@ public class SecurityActionModule extends AbstractSecurityModule.Node {
|
|||
multibinder.addBinding().to(SearchRequestInterceptor.class);
|
||||
multibinder.addBinding().to(UpdateRequestInterceptor.class);
|
||||
multibinder.addBinding().to(BulkShardRequestInterceptor.class);
|
||||
multibinder.addBinding().to(FieldStatsRequestInterceptor.class);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.action.interceptor;
|
||||
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsRequest;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.license.XPackLicenseState;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
/**
|
||||
* Intercepts requests to shards to field level stats and strips fields that the user is not allowed to access from the response.
|
||||
*/
|
||||
public class FieldStatsRequestInterceptor extends FieldAndDocumentLevelSecurityRequestInterceptor<FieldStatsRequest> {
|
||||
@Inject
|
||||
public FieldStatsRequestInterceptor(Settings settings, ThreadPool threadPool, XPackLicenseState licenseState) {
|
||||
super(settings, threadPool.getThreadContext(), licenseState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supports(TransportRequest request) {
|
||||
return request instanceof FieldStatsRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void disableFeatures(FieldStatsRequest request, boolean fieldLevelSecurityEnabled, boolean documentLevelSecurityEnabled) {
|
||||
if (fieldLevelSecurityEnabled) {
|
||||
request.setUseCache(false);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -39,18 +39,23 @@ public class ChangePasswordRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
public static char[] validateAndHashPassword(SecureString password) {
|
||||
Validation.Error error = Validation.Users.validatePassword(password.getChars());
|
||||
if (error != null) {
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError(error.toString());
|
||||
throw validationException;
|
||||
}
|
||||
return Hasher.BCRYPT.hash(password);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the password. Note: the char[] passed to this method will be cleared.
|
||||
*/
|
||||
public ChangePasswordRequestBuilder password(char[] password) {
|
||||
try (SecureString secureString = new SecureString(password)) {
|
||||
Validation.Error error = Validation.Users.validatePassword(password);
|
||||
if (error != null) {
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError(error.toString());
|
||||
throw validationException;
|
||||
}
|
||||
request.passwordHash(Hasher.BCRYPT.hash(secureString));
|
||||
char[] hash = validateAndHashPassword(secureString);
|
||||
request.passwordHash(hash);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authc;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
/**
|
||||
* Represents the result of an authentication attempt.
|
||||
* This allows a {@link Realm} to respond in 3 different ways (without needing to
|
||||
* resort to {@link org.elasticsearch.action.ActionListener#onFailure(Exception)})
|
||||
* <ol>
|
||||
* <li>Successful authentication of a user</li>
|
||||
* <li>Unable to authenticate user, try another realm (optionally with a diagnostic message)</li>
|
||||
* <li>Unable to authenticate user, terminate authentication (with an error message)</li>
|
||||
* </ol>
|
||||
*/
|
||||
public final class AuthenticationResult {
|
||||
private static final AuthenticationResult NOT_HANDLED = new AuthenticationResult(Status.CONTINUE, null, null, null);
|
||||
|
||||
public enum Status {
|
||||
SUCCESS,
|
||||
CONTINUE,
|
||||
TERMINATE,
|
||||
}
|
||||
|
||||
private final Status status;
|
||||
private final User user;
|
||||
private final String message;
|
||||
private final Exception exception;
|
||||
|
||||
private AuthenticationResult(Status status, @Nullable User user, @Nullable String message, @Nullable Exception exception) {
|
||||
this.status = status;
|
||||
this.user = user;
|
||||
this.message = message;
|
||||
this.exception = exception;
|
||||
}
|
||||
|
||||
public Status getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public User getUser() {
|
||||
return user;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return message;
|
||||
}
|
||||
|
||||
public Exception getException() {
|
||||
return exception;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an {@code AuthenticationResult} that indicates that the supplied {@link User}
|
||||
* has been successfully authenticated.
|
||||
* <p>
|
||||
* The {@link #getStatus() status} is set to {@link Status#SUCCESS}.
|
||||
* </p><p>
|
||||
* Neither the {@link #getMessage() message} nor {@link #getException() exception} are populated.
|
||||
* </p>
|
||||
* @param user The user that was authenticated. Cannot be {@code null}.
|
||||
*/
|
||||
public static AuthenticationResult success(User user) {
|
||||
Objects.requireNonNull(user);
|
||||
return new AuthenticationResult(Status.SUCCESS, user, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an {@code AuthenticationResult} that indicates that the realm did not handle the
|
||||
* authentication request in any way, and has no failure messages.
|
||||
* <p>
|
||||
* The {@link #getStatus() status} is set to {@link Status#CONTINUE}.
|
||||
* </p><p>
|
||||
* The {@link #getMessage() message}, {@link #getException() exception}, and {@link #getUser() user} are all set to {@code null}.
|
||||
* </p>
|
||||
*/
|
||||
public static AuthenticationResult notHandled() {
|
||||
return NOT_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an {@code AuthenticationResult} that indicates that the realm attempted to handle the authentication request but was
|
||||
* unsuccessful. The reason for the failure is given in the supplied message and optional exception.
|
||||
* <p>
|
||||
* The {@link #getStatus() status} is set to {@link Status#CONTINUE}.
|
||||
* </p><p>
|
||||
* The {@link #getUser() user} is not populated.
|
||||
* </p>
|
||||
*/
|
||||
public static AuthenticationResult unsuccessful(String message, @Nullable Exception cause) {
|
||||
Objects.requireNonNull(message);
|
||||
return new AuthenticationResult(Status.CONTINUE, null, message, cause);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an {@code AuthenticationResult} that indicates that the realm attempted to handle the authentication request, was
|
||||
* unsuccessful and wants to terminate this authentication request.
|
||||
* The reason for the failure is given in the supplied message and optional exception.
|
||||
* <p>
|
||||
* The {@link #getStatus() status} is set to {@link Status#TERMINATE}.
|
||||
* </p><p>
|
||||
* The {@link #getUser() user} is not populated.
|
||||
* </p>
|
||||
*/
|
||||
public static AuthenticationResult terminate(String message, @Nullable Exception cause) {
|
||||
return new AuthenticationResult(Status.TERMINATE, null, message, cause);
|
||||
}
|
||||
|
||||
public boolean isAuthenticated() {
|
||||
return status == Status.SUCCESS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AuthenticationResult{" +
|
||||
"status=" + status +
|
||||
", user=" + user +
|
||||
", message=" + message +
|
||||
", exception=" + exception +
|
||||
'}';
|
||||
}
|
||||
|
||||
}
|
|
@ -5,6 +5,13 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchSecurityException;
|
||||
|
@ -27,15 +34,10 @@ import org.elasticsearch.xpack.security.audit.AuditTrail;
|
|||
import org.elasticsearch.xpack.security.audit.AuditTrailService;
|
||||
import org.elasticsearch.xpack.security.authc.Authentication.RealmRef;
|
||||
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
|
||||
import org.elasticsearch.xpack.security.support.Exceptions;
|
||||
import org.elasticsearch.xpack.security.user.AnonymousUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.xpack.security.Security.setting;
|
||||
|
||||
/**
|
||||
|
@ -170,7 +172,7 @@ public class AuthenticationService extends AbstractComponent {
|
|||
* <li>look for a user token</li>
|
||||
* <li>token extraction {@link #extractToken(Consumer)}</li>
|
||||
* <li>token authentication {@link #consumeToken(AuthenticationToken)}</li>
|
||||
* <li>user lookup for run as if necessary {@link #consumeUser(User)} and
|
||||
* <li>user lookup for run as if necessary {@link #consumeUser(User, Map)} and
|
||||
* {@link #lookupRunAsUser(User, String, Consumer)}</li>
|
||||
* <li>write authentication into the context {@link #finishAuthentication(User)}</li>
|
||||
* </ol>
|
||||
|
@ -255,8 +257,8 @@ public class AuthenticationService extends AbstractComponent {
|
|||
/**
|
||||
* Consumes the {@link AuthenticationToken} provided by the caller. In the case of a {@code null} token, {@link #handleNullToken()}
|
||||
* is called. In the case of a {@code non-null} token, the realms are iterated over and the first realm that returns a non-null
|
||||
* {@link User} is the authenticating realm and iteration is stopped. This user is then passed to {@link #consumeUser(User)} if no
|
||||
* exception was caught while trying to authenticate the token
|
||||
* {@link User} is the authenticating realm and iteration is stopped. This user is then passed to {@link #consumeUser(User, Map)}
|
||||
* if no exception was caught while trying to authenticate the token
|
||||
*/
|
||||
private void consumeToken(AuthenticationToken token) {
|
||||
if (token == null) {
|
||||
|
@ -264,30 +266,42 @@ public class AuthenticationService extends AbstractComponent {
|
|||
} else {
|
||||
authenticationToken = token;
|
||||
final List<Realm> realmsList = realms.asList();
|
||||
final Map<Realm, Tuple<String, Exception>> messages = new LinkedHashMap<>();
|
||||
final BiConsumer<Realm, ActionListener<User>> realmAuthenticatingConsumer = (realm, userListener) -> {
|
||||
if (realm.supports(authenticationToken)) {
|
||||
realm.authenticate(authenticationToken, ActionListener.wrap((user) -> {
|
||||
if (user == null) {
|
||||
// the user was not authenticated, call this so we can audit the correct event
|
||||
request.realmAuthenticationFailed(authenticationToken, realm.name());
|
||||
} else {
|
||||
realm.authenticate(authenticationToken, ActionListener.wrap((result) -> {
|
||||
assert result != null : "Realm " + realm + " produced a null authentication result";
|
||||
if (result.getStatus() == AuthenticationResult.Status.SUCCESS) {
|
||||
// user was authenticated, populate the authenticated by information
|
||||
authenticatedBy = new RealmRef(realm.name(), realm.type(), nodeName);
|
||||
userListener.onResponse(result.getUser());
|
||||
} else {
|
||||
// the user was not authenticated, call this so we can audit the correct event
|
||||
request.realmAuthenticationFailed(authenticationToken, realm.name());
|
||||
if (result.getStatus() == AuthenticationResult.Status.TERMINATE) {
|
||||
logger.info("Authentication of [{}] was terminated by realm [{}] - {}",
|
||||
authenticationToken.principal(), realm.name(), result.getMessage());
|
||||
userListener.onFailure(Exceptions.authenticationError(result.getMessage(), result.getException()));
|
||||
} else {
|
||||
if (result.getMessage() != null) {
|
||||
messages.put(realm, new Tuple<>(result.getMessage(), result.getException()));
|
||||
}
|
||||
userListener.onResponse(null);
|
||||
}
|
||||
}
|
||||
userListener.onResponse(user);
|
||||
}, (ex) -> {
|
||||
logger.warn(
|
||||
"An error occurred while attempting to authenticate [{}] against realm [{}] - {}",
|
||||
authenticationToken.principal(), realm.name(), ex);
|
||||
logger.debug("Authentication failed due to exception", ex);
|
||||
logger.warn(new ParameterizedMessage(
|
||||
"An error occurred while attempting to authenticate [{}] against realm [{}]",
|
||||
authenticationToken.principal(), realm.name()), ex);
|
||||
userListener.onFailure(ex);
|
||||
}), request);
|
||||
}));
|
||||
} else {
|
||||
userListener.onResponse(null);
|
||||
}
|
||||
};
|
||||
final IteratingActionListener<User, Realm> authenticatingListener =
|
||||
new IteratingActionListener<>(ActionListener.wrap(this::consumeUser,
|
||||
new IteratingActionListener<>(ActionListener.wrap(
|
||||
(user) -> consumeUser(user, messages),
|
||||
(e) -> listener.onFailure(request.exceptionProcessingRequest(e, token))),
|
||||
realmAuthenticatingConsumer, realmsList, threadContext);
|
||||
try {
|
||||
|
@ -342,10 +356,9 @@ public class AuthenticationService extends AbstractComponent {
|
|||
* functionality is in use. When run as is not in use, {@link #finishAuthentication(User)} is called, otherwise we try to lookup
|
||||
* the run as user in {@link #lookupRunAsUser(User, String, Consumer)}
|
||||
*/
|
||||
private void consumeUser(User user) {
|
||||
private void consumeUser(User user, Map<Realm, Tuple<String, Exception>> messages) {
|
||||
if (user == null) {
|
||||
final Map<Realm, Tuple<String, Exception>> failureDetails = Realm.getAuthenticationFailureDetails(threadContext);
|
||||
failureDetails.forEach((realm, tuple) -> {
|
||||
messages.forEach((realm, tuple) -> {
|
||||
final String message = tuple.v1();
|
||||
final String cause = tuple.v2() == null ? "" : " (Caused by " + tuple.v2() + ")";
|
||||
logger.warn("Authentication to realm {} failed - {}{}", realm.name(), message, cause);
|
||||
|
@ -438,21 +451,16 @@ public class AuthenticationService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
abstract static class AuditableRequest implements IncomingRequest {
|
||||
abstract static class AuditableRequest {
|
||||
|
||||
final AuditTrail auditTrail;
|
||||
final AuthenticationFailureHandler failureHandler;
|
||||
final ThreadContext threadContext;
|
||||
private final InetSocketAddress remoteAddress;
|
||||
private final RequestType requestType;
|
||||
|
||||
AuditableRequest(AuditTrail auditTrail, AuthenticationFailureHandler failureHandler, ThreadContext threadContext,
|
||||
RequestType requestType, InetSocketAddress remoteAddress) {
|
||||
AuditableRequest(AuditTrail auditTrail, AuthenticationFailureHandler failureHandler, ThreadContext threadContext) {
|
||||
this.auditTrail = auditTrail;
|
||||
this.failureHandler = failureHandler;
|
||||
this.threadContext = threadContext;
|
||||
this.remoteAddress = remoteAddress;
|
||||
this.requestType = requestType;
|
||||
}
|
||||
|
||||
abstract void realmAuthenticationFailed(AuthenticationToken token, String realm);
|
||||
|
@ -469,13 +477,6 @@ public class AuthenticationService extends AbstractComponent {
|
|||
|
||||
abstract void authenticationSuccess(String realm, User user);
|
||||
|
||||
public InetSocketAddress getRemoteAddress() {
|
||||
return remoteAddress;
|
||||
}
|
||||
|
||||
public RequestType getType() {
|
||||
return requestType;
|
||||
}
|
||||
}
|
||||
|
||||
static class AuditableTransportRequest extends AuditableRequest {
|
||||
|
@ -485,7 +486,7 @@ public class AuthenticationService extends AbstractComponent {
|
|||
|
||||
AuditableTransportRequest(AuditTrail auditTrail, AuthenticationFailureHandler failureHandler, ThreadContext threadContext,
|
||||
String action, TransportMessage message) {
|
||||
super(auditTrail, failureHandler, threadContext, getType(message), getRemoteAddress(message));
|
||||
super(auditTrail, failureHandler, threadContext);
|
||||
this.action = action;
|
||||
this.message = message;
|
||||
}
|
||||
|
@ -539,14 +540,6 @@ public class AuthenticationService extends AbstractComponent {
|
|||
return "transport request action [" + action + "]";
|
||||
}
|
||||
|
||||
private static RequestType getType(TransportMessage message) {
|
||||
return message.remoteAddress() == null ? RequestType.LOCAL_NODE : RequestType.REMOTE_NODE;
|
||||
}
|
||||
|
||||
private static InetSocketAddress getRemoteAddress(TransportMessage message) {
|
||||
TransportAddress transportAddress = message.remoteAddress();
|
||||
return transportAddress == null ? null : transportAddress.address();
|
||||
}
|
||||
}
|
||||
|
||||
static class AuditableRestRequest extends AuditableRequest {
|
||||
|
@ -556,7 +549,7 @@ public class AuthenticationService extends AbstractComponent {
|
|||
@SuppressWarnings("unchecked")
|
||||
AuditableRestRequest(AuditTrail auditTrail, AuthenticationFailureHandler failureHandler, ThreadContext threadContext,
|
||||
RestRequest request) {
|
||||
super(auditTrail, failureHandler, threadContext, RequestType.REST, (InetSocketAddress) request.getRemoteAddress());
|
||||
super(auditTrail, failureHandler, threadContext);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authc;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
/**
|
||||
* This represents an incoming request that needs to be authenticated
|
||||
*/
|
||||
public interface IncomingRequest {
|
||||
|
||||
/**
|
||||
* This method returns the remote address for the request. It will be null if the request is a
|
||||
* local transport request.
|
||||
*
|
||||
* @return the remote socket address
|
||||
*/
|
||||
InetSocketAddress getRemoteAddress();
|
||||
|
||||
/**
|
||||
* This returns the type of request that is incoming. It can be a rest request, a remote
|
||||
* transport request, or a local transport request.
|
||||
*
|
||||
* @return the request type
|
||||
*/
|
||||
RequestType getType();
|
||||
|
||||
enum RequestType {
|
||||
REST,
|
||||
REMOTE_NODE,
|
||||
LOCAL_NODE
|
||||
}
|
||||
}
|
|
@ -5,25 +5,31 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.elasticsearch.bootstrap.BootstrapCheck;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.security.SecurityLifecycleService;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeRealm;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
|
||||
import org.elasticsearch.xpack.security.authc.file.FileRealm;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.LdapRealm;
|
||||
import org.elasticsearch.xpack.security.authc.pki.PkiRealm;
|
||||
import org.elasticsearch.xpack.security.authc.support.RoleMappingFileBootstrapCheck;
|
||||
import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore;
|
||||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Provides a single entry point into dealing with all standard XPack security {@link Realm realms}.
|
||||
* This class does not handle extensions.
|
||||
|
@ -57,14 +63,18 @@ public class InternalRealms {
|
|||
* This excludes the {@link ReservedRealm}, as it cannot be created dynamically.
|
||||
* @return A map from <em>realm-type</em> to <code>Factory</code>
|
||||
*/
|
||||
public static Map<String, Realm.Factory> getFactories(
|
||||
ThreadPool threadPool, ResourceWatcherService resourceWatcherService,
|
||||
SSLService sslService, NativeUsersStore nativeUsersStore,
|
||||
NativeRoleMappingStore nativeRoleMappingStore) {
|
||||
public static Map<String, Realm.Factory> getFactories(ThreadPool threadPool, ResourceWatcherService resourceWatcherService,
|
||||
SSLService sslService, NativeUsersStore nativeUsersStore,
|
||||
NativeRoleMappingStore nativeRoleMappingStore,
|
||||
SecurityLifecycleService securityLifecycleService) {
|
||||
|
||||
Map<String, Realm.Factory> map = new HashMap<>();
|
||||
map.put(FileRealm.TYPE, config -> new FileRealm(config, resourceWatcherService));
|
||||
map.put(NativeRealm.TYPE, config -> new NativeRealm(config, nativeUsersStore));
|
||||
map.put(NativeRealm.TYPE, config -> {
|
||||
final NativeRealm nativeRealm = new NativeRealm(config, nativeUsersStore);
|
||||
securityLifecycleService.addSecurityIndexHealthChangeListener(nativeRealm::onSecurityIndexHealthChange);
|
||||
return nativeRealm;
|
||||
});
|
||||
map.put(LdapRealm.AD_TYPE, config -> new LdapRealm(LdapRealm.AD_TYPE, config, sslService,
|
||||
resourceWatcherService, nativeRoleMappingStore, threadPool));
|
||||
map.put(LdapRealm.LDAP_TYPE, config -> new LdapRealm(LdapRealm.LDAP_TYPE, config,
|
||||
|
@ -78,7 +88,7 @@ public class InternalRealms {
|
|||
* This excludes the {@link ReservedRealm}, as it cannot be configured dynamically.
|
||||
* @return A map from <em>realm-type</em> to a collection of <code>Setting</code> objects.
|
||||
*/
|
||||
public static Map<String,Set<Setting<?>>> getSettings() {
|
||||
public static Map<String, Set<Setting<?>>> getSettings() {
|
||||
Map<String, Set<Setting<?>>> map = new HashMap<>();
|
||||
map.put(FileRealm.TYPE, FileRealm.getSettings());
|
||||
map.put(NativeRealm.TYPE, NativeRealm.getSettings());
|
||||
|
@ -91,4 +101,21 @@ public class InternalRealms {
|
|||
private InternalRealms() {
|
||||
}
|
||||
|
||||
public static List<BootstrapCheck> getBootstrapChecks(final Settings globalSettings) {
|
||||
final List<BootstrapCheck> checks = new ArrayList<>();
|
||||
final Map<String, Settings> settingsByRealm = RealmSettings.getRealmSettings(globalSettings);
|
||||
settingsByRealm.forEach((name, settings) -> {
|
||||
final RealmConfig realmConfig = new RealmConfig(name, settings, globalSettings, null);
|
||||
switch (realmConfig.type()) {
|
||||
case LdapRealm.AD_TYPE:
|
||||
case LdapRealm.LDAP_TYPE:
|
||||
case PkiRealm.TYPE:
|
||||
final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(realmConfig);
|
||||
if (check != null) {
|
||||
checks.add(check);
|
||||
}
|
||||
}
|
||||
});
|
||||
return checks;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,18 +5,14 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* An authentication mechanism to which the default authentication {@link org.elasticsearch.xpack.security.authc.AuthenticationService
|
||||
* service } delegates the authentication process. Different realms may be defined, each may be based on different
|
||||
|
@ -24,8 +20,6 @@ import java.util.Map;
|
|||
*/
|
||||
public abstract class Realm implements Comparable<Realm> {
|
||||
|
||||
private static final String AUTHENTICATION_FAILURES_KEY = "_xpack_security_auth_failures";
|
||||
|
||||
protected final Logger logger;
|
||||
protected final String type;
|
||||
protected RealmConfig config;
|
||||
|
@ -37,21 +31,21 @@ public abstract class Realm implements Comparable<Realm> {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return The type of this realm
|
||||
* @return The type of this realm
|
||||
*/
|
||||
public String type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The name of this realm.
|
||||
* @return The name of this realm.
|
||||
*/
|
||||
public String name() {
|
||||
return config.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The order of this realm within the executing realm chain.
|
||||
* @return The order of this realm within the executing realm chain.
|
||||
*/
|
||||
public int order() {
|
||||
return config.order;
|
||||
|
@ -63,7 +57,7 @@ public abstract class Realm implements Comparable<Realm> {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return {@code true} if this realm supports the given authentication token, {@code false} otherwise.
|
||||
* @return {@code true} if this realm supports the given authentication token, {@code false} otherwise.
|
||||
*/
|
||||
public abstract boolean supports(AuthenticationToken token);
|
||||
|
||||
|
@ -71,22 +65,39 @@ public abstract class Realm implements Comparable<Realm> {
|
|||
* Attempts to extract an authentication token from the given context. If an appropriate token
|
||||
* is found it's returned, otherwise {@code null} is returned.
|
||||
*
|
||||
* @param context The context that will provide information about the incoming request
|
||||
* @return The authentication token or {@code null} if not found
|
||||
* @param context The context that will provide information about the incoming request
|
||||
* @return The authentication token or {@code null} if not found
|
||||
*/
|
||||
public abstract AuthenticationToken token(ThreadContext context);
|
||||
|
||||
/**
|
||||
* Authenticates the given token in an asynchronous fashion. A successful authentication will call the
|
||||
* {@link ActionListener#onResponse} with the User associated with the given token. An unsuccessful authentication calls
|
||||
* with {@code null} on the argument.
|
||||
* Authenticates the given token in an asynchronous fashion.
|
||||
* <p>
|
||||
* A successful authentication will call {@link ActionListener#onResponse} with a
|
||||
* {@link AuthenticationResult#success successful} result, which includes the user associated with the given token.
|
||||
* <br>
|
||||
* If the realm does not support, or cannot handle the token, it will call {@link ActionListener#onResponse} with a
|
||||
* {@link AuthenticationResult#notHandled not-handled} result.
|
||||
* This can include cases where the token identifies as user that is not known by this realm.
|
||||
* <br>
|
||||
* If the realm can handle the token, but authentication failed it will typically call {@link ActionListener#onResponse} with a
|
||||
* {@link AuthenticationResult#unsuccessful failure} result, which includes a diagnostic message regarding the failure.
|
||||
* This can include cases where the token identifies a valid user, but has an invalid password.
|
||||
* <br>
|
||||
* If the realm wishes to assert that it has the exclusive right to handle the provided token, but authentication was not successful
|
||||
* it typically call {@link ActionListener#onResponse} with a
|
||||
* {@link AuthenticationResult#terminate termination} result, which includes a diagnostic message regarding the failure.
|
||||
* This can include cases where the token identifies a valid user, but has an invalid password and no other realm is allowed to
|
||||
* authenticate that user.
|
||||
* </p>
|
||||
* <p>
|
||||
* The remote address should be {@code null} if the request initiated from the local node.
|
||||
* </p>
|
||||
*
|
||||
* The remote address should be null if the request initiated from the local node.
|
||||
* @param token The authentication token
|
||||
* @param listener The listener to pass the authentication result to
|
||||
* @param incomingRequest the request that is being authenticated
|
||||
* @param token The authentication token
|
||||
* @param listener The listener to pass the authentication result to
|
||||
*/
|
||||
public abstract void authenticate(AuthenticationToken token, ActionListener<User> listener, IncomingRequest incomingRequest);
|
||||
public abstract void authenticate(AuthenticationToken token, ActionListener<AuthenticationResult> listener);
|
||||
|
||||
/**
|
||||
* Looks up the user identified the String identifier. A successful lookup will call the {@link ActionListener#onResponse}
|
||||
|
@ -117,35 +128,11 @@ public abstract class Realm implements Comparable<Realm> {
|
|||
|
||||
/**
|
||||
* Constructs a realm which will be used for authentication.
|
||||
*
|
||||
* @param config The configuration for the realm
|
||||
* @throws Exception an exception may be thrown if there was an error during realm creation
|
||||
*/
|
||||
Realm create(RealmConfig config) throws Exception;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a mechanism for a realm to report errors that were handled within a realm, but may
|
||||
* provide useful diagnostics about why authentication failed.
|
||||
*/
|
||||
protected final void setFailedAuthenticationDetails(String message, @Nullable Exception cause) {
|
||||
final ThreadContext threadContext = config.threadContext();
|
||||
Map<Realm, Tuple<String, Exception>> failures = threadContext.getTransient(AUTHENTICATION_FAILURES_KEY);
|
||||
if (failures == null) {
|
||||
failures = new LinkedHashMap<>();
|
||||
threadContext.putTransient(AUTHENTICATION_FAILURES_KEY, failures);
|
||||
}
|
||||
failures.put(this, new Tuple<>(message, cause));
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves any authentication failures messages that were set using {@link #setFailedAuthenticationDetails(String, Exception)}
|
||||
*/
|
||||
static Map<Realm, Tuple<String, Exception>> getAuthenticationFailureDetails(ThreadContext threadContext) {
|
||||
final Map<Realm, Tuple<String, Exception>> failures = threadContext.getTransient(AUTHENTICATION_FAILURES_KEY);
|
||||
if (failures == null) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ public class RealmConfig {
|
|||
final String name;
|
||||
final boolean enabled;
|
||||
final int order;
|
||||
private final String type;
|
||||
final Settings settings;
|
||||
|
||||
private final Environment env;
|
||||
|
@ -35,6 +36,7 @@ public class RealmConfig {
|
|||
this.env = env;
|
||||
enabled = RealmSettings.ENABLED_SETTING.get(settings);
|
||||
order = RealmSettings.ORDER_SETTING.get(settings);
|
||||
type = RealmSettings.TYPE_SETTING.get(settings);
|
||||
this.threadContext = threadContext;
|
||||
}
|
||||
|
||||
|
@ -50,6 +52,10 @@ public class RealmConfig {
|
|||
return order;
|
||||
}
|
||||
|
||||
public String type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public Settings settings() {
|
||||
return settings;
|
||||
}
|
||||
|
|
|
@ -5,11 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc;
|
||||
|
||||
import org.elasticsearch.common.settings.AbstractScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.xpack.extensions.XPackExtension;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
|
@ -17,8 +12,14 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.elasticsearch.common.settings.AbstractScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.xpack.extensions.XPackExtension;
|
||||
|
||||
import static org.elasticsearch.common.Strings.isNullOrEmpty;
|
||||
import static org.elasticsearch.xpack.security.Security.setting;
|
||||
|
||||
|
@ -71,6 +72,16 @@ public class RealmSettings {
|
|||
return settings.getByPrefix(RealmSettings.PREFIX);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the realm settings from a global settings object.
|
||||
* Returns a Map of realm-name to realm-settings.
|
||||
*/
|
||||
public static Map<String, Settings> getRealmSettings(Settings globalSettings) {
|
||||
Settings realmsSettings = RealmSettings.get(globalSettings);
|
||||
return realmsSettings.names().stream()
|
||||
.collect(Collectors.toMap(Function.identity(), realmsSettings::getAsSettings));
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the child {@link Setting} for the provided realm into a fully scoped key for use in an error message.
|
||||
* @see #PREFIX
|
||||
|
|
|
@ -6,8 +6,10 @@
|
|||
package org.elasticsearch.xpack.security.authc.esnative;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.xpack.security.authc.IncomingRequest;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationResult;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm;
|
||||
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
|
||||
|
@ -35,10 +37,25 @@ public class NativeRealm extends CachingUsernamePasswordRealm {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<User> listener, IncomingRequest incomingRequest) {
|
||||
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<AuthenticationResult> listener) {
|
||||
userStore.verifyPassword(token.principal(), token.credentials(), listener);
|
||||
}
|
||||
|
||||
public void onSecurityIndexHealthChange(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) {
|
||||
final boolean movedFromRedToNonRed = (previousHealth == null || previousHealth.getStatus() == ClusterHealthStatus.RED)
|
||||
&& currentHealth != null && currentHealth.getStatus() != ClusterHealthStatus.RED;
|
||||
final boolean indexDeleted = previousHealth != null && currentHealth == null;
|
||||
|
||||
if (movedFromRedToNonRed || indexDeleted) {
|
||||
clearCache();
|
||||
}
|
||||
}
|
||||
|
||||
// method is used for testing to verify cache expiration since expireAll is final
|
||||
void clearCache() {
|
||||
expireAll();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The {@link Setting setting configuration} for this realm type
|
||||
*/
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.xpack.security.action.realm.ClearRealmCacheResponse;
|
|||
import org.elasticsearch.xpack.security.action.user.ChangePasswordRequest;
|
||||
import org.elasticsearch.xpack.security.action.user.DeleteUserRequest;
|
||||
import org.elasticsearch.xpack.security.action.user.PutUserRequest;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationResult;
|
||||
import org.elasticsearch.xpack.security.authc.ContainerSettings;
|
||||
import org.elasticsearch.xpack.security.authc.support.Hasher;
|
||||
import org.elasticsearch.xpack.security.client.SecurityClient;
|
||||
|
@ -512,14 +513,14 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
* @param username username to lookup the user by
|
||||
* @param password the plaintext password to verify
|
||||
*/
|
||||
void verifyPassword(String username, final SecureString password, ActionListener<User> listener) {
|
||||
void verifyPassword(String username, final SecureString password, ActionListener<AuthenticationResult> listener) {
|
||||
getUserAndPassword(username, ActionListener.wrap((userAndPassword) -> {
|
||||
if (userAndPassword == null || userAndPassword.passwordHash() == null) {
|
||||
listener.onResponse(null);
|
||||
} else if (hasher.verify(password, userAndPassword.passwordHash())) {
|
||||
listener.onResponse(userAndPassword.user());
|
||||
listener.onResponse(AuthenticationResult.success(userAndPassword.user()));
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
listener.onResponse(AuthenticationResult.unsuccessful("Password authentication failed for " + username, null));
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
|||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.settings.SecureSetting;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -17,7 +18,8 @@ import org.elasticsearch.env.Environment;
|
|||
import org.elasticsearch.xpack.XPackSettings;
|
||||
import org.elasticsearch.xpack.security.Security;
|
||||
import org.elasticsearch.xpack.security.SecurityLifecycleService;
|
||||
import org.elasticsearch.xpack.security.authc.IncomingRequest;
|
||||
import org.elasticsearch.xpack.security.action.user.ChangePasswordRequest;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationResult;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo;
|
||||
import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm;
|
||||
|
@ -31,9 +33,6 @@ import org.elasticsearch.xpack.security.user.KibanaUser;
|
|||
import org.elasticsearch.xpack.security.user.LogstashSystemUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.SocketException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -50,7 +49,6 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
|
||||
public static final SecureString EMPTY_PASSWORD_TEXT = new SecureString("".toCharArray());
|
||||
static final char[] EMPTY_PASSWORD_HASH = Hasher.BCRYPT.hash(EMPTY_PASSWORD_TEXT);
|
||||
static final char[] OLD_DEFAULT_PASSWORD_HASH = Hasher.BCRYPT.hash(new SecureString("changeme".toCharArray()));
|
||||
|
||||
private static final ReservedUserInfo DEFAULT_USER_INFO = new ReservedUserInfo(EMPTY_PASSWORD_HASH, true, true);
|
||||
private static final ReservedUserInfo DISABLED_USER_INFO = new ReservedUserInfo(EMPTY_PASSWORD_HASH, false, true);
|
||||
|
@ -58,6 +56,7 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
public static final Setting<Boolean> ACCEPT_DEFAULT_PASSWORD_SETTING = Setting.boolSetting(
|
||||
Security.setting("authc.accept_default_password"), true, Setting.Property.NodeScope, Setting.Property.Filtered,
|
||||
Setting.Property.Deprecated);
|
||||
public static final Setting<SecureString> BOOTSTRAP_ELASTIC_PASSWORD = SecureSetting.secureString("bootstrap.password", null);
|
||||
|
||||
private final NativeUsersStore nativeUsersStore;
|
||||
private final AnonymousUser anonymousUser;
|
||||
|
@ -76,73 +75,38 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<User> listener, IncomingRequest incomingRequest) {
|
||||
if (incomingRequest.getType() != IncomingRequest.RequestType.REST) {
|
||||
doAuthenticate(token, listener, false);
|
||||
} else {
|
||||
InetAddress address = incomingRequest.getRemoteAddress().getAddress();
|
||||
|
||||
try {
|
||||
// This checks if the address is the loopback address or if it is bound to one of this machine's
|
||||
// network interfaces. This is because we want to allow requests that originate from this machine.
|
||||
final boolean isLocalMachine = address.isLoopbackAddress() || NetworkInterface.getByInetAddress(address) != null;
|
||||
doAuthenticate(token, listener, isLocalMachine);
|
||||
} catch (SocketException e) {
|
||||
listener.onFailure(Exceptions.authenticationError("failed to authenticate user [{}]", e, token.principal()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void doAuthenticate(UsernamePasswordToken token, ActionListener<User> listener, boolean acceptEmptyPassword) {
|
||||
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<AuthenticationResult> listener) {
|
||||
if (realmEnabled == false) {
|
||||
listener.onResponse(null);
|
||||
listener.onResponse(AuthenticationResult.notHandled());
|
||||
} else if (isReserved(token.principal(), config.globalSettings()) == false) {
|
||||
listener.onResponse(null);
|
||||
listener.onResponse(AuthenticationResult.notHandled());
|
||||
} else {
|
||||
getUserInfo(token.principal(), ActionListener.wrap((userInfo) -> {
|
||||
Runnable action;
|
||||
AuthenticationResult result;
|
||||
if (userInfo != null) {
|
||||
try {
|
||||
if (userInfo.hasEmptyPassword) {
|
||||
// norelease
|
||||
// Accepting the OLD_DEFAULT_PASSWORD_HASH is a transition step. We do not want to support
|
||||
// this in a release.
|
||||
if (isSetupMode(token.principal(), acceptEmptyPassword) == false) {
|
||||
action = () -> listener.onFailure(Exceptions.authenticationError("failed to authenticate user [{}]",
|
||||
token.principal()));
|
||||
} else if (verifyPassword(userInfo, token)
|
||||
|| Hasher.BCRYPT.verify(token.credentials(), OLD_DEFAULT_PASSWORD_HASH)) {
|
||||
action = () -> listener.onResponse(getUser(token.principal(), userInfo));
|
||||
} else {
|
||||
action = () -> listener.onFailure(Exceptions.authenticationError("failed to authenticate user [{}]",
|
||||
token.principal()));
|
||||
}
|
||||
result = AuthenticationResult.terminate("failed to authenticate user [" + token.principal() + "]", null);
|
||||
} else if (verifyPassword(userInfo, token)) {
|
||||
final User user = getUser(token.principal(), userInfo);
|
||||
action = () -> listener.onResponse(user);
|
||||
result = AuthenticationResult.success(user);
|
||||
} else {
|
||||
action = () -> listener.onFailure(Exceptions.authenticationError("failed to authenticate user [{}]",
|
||||
token.principal()));
|
||||
result = AuthenticationResult.terminate("failed to authenticate user [" + token.principal() + "]", null);
|
||||
}
|
||||
} finally {
|
||||
if (userInfo.passwordHash != EMPTY_PASSWORD_HASH && userInfo.passwordHash != OLD_DEFAULT_PASSWORD_HASH) {
|
||||
if (userInfo.passwordHash != EMPTY_PASSWORD_HASH) {
|
||||
Arrays.fill(userInfo.passwordHash, (char) 0);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
action = () -> listener.onFailure(Exceptions.authenticationError("failed to authenticate user [{}]",
|
||||
token.principal()));
|
||||
result = AuthenticationResult.terminate("failed to authenticate user [" + token.principal() + "]", null);
|
||||
}
|
||||
// we want the finally block to clear out the chars before we proceed further so we execute the action here
|
||||
action.run();
|
||||
// we want the finally block to clear out the chars before we proceed further so we handle the result here
|
||||
listener.onResponse(result);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isSetupMode(String userName, boolean acceptEmptyPassword) {
|
||||
return ElasticUser.NAME.equals(userName) && acceptEmptyPassword;
|
||||
}
|
||||
|
||||
private boolean verifyPassword(ReservedUserInfo userInfo, UsernamePasswordToken token) {
|
||||
if (Hasher.BCRYPT.verify(token.credentials(), userInfo.passwordHash)) {
|
||||
return true;
|
||||
|
@ -186,11 +150,36 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
}
|
||||
}
|
||||
|
||||
public synchronized void bootstrapElasticUserCredentials(SecureString passwordHash, ActionListener<Boolean> listener) {
|
||||
getUserInfo(ElasticUser.NAME, new ActionListener<ReservedUserInfo>() {
|
||||
@Override
|
||||
public void onResponse(ReservedUserInfo reservedUserInfo) {
|
||||
if (reservedUserInfo == null) {
|
||||
listener.onFailure(new IllegalStateException("unexpected state: ReservedUserInfo was null"));
|
||||
} else if (reservedUserInfo.hasEmptyPassword) {
|
||||
ChangePasswordRequest changePasswordRequest = new ChangePasswordRequest();
|
||||
changePasswordRequest.username(ElasticUser.NAME);
|
||||
changePasswordRequest.passwordHash(passwordHash.getChars());
|
||||
nativeUsersStore.changePassword(changePasswordRequest,
|
||||
ActionListener.wrap(v -> listener.onResponse(true), listener::onFailure));
|
||||
|
||||
} else {
|
||||
listener.onResponse(false);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private User getUser(String username, ReservedUserInfo userInfo) {
|
||||
assert username != null;
|
||||
switch (username) {
|
||||
case ElasticUser.NAME:
|
||||
return new ElasticUser(userInfo.enabled, userInfo.hasEmptyPassword);
|
||||
return new ElasticUser(userInfo.enabled);
|
||||
case KibanaUser.NAME:
|
||||
return new KibanaUser(userInfo.enabled);
|
||||
case LogstashSystemUser.NAME:
|
||||
|
@ -214,8 +203,7 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
List<User> users = new ArrayList<>(4);
|
||||
|
||||
ReservedUserInfo userInfo = reservedUserInfos.get(ElasticUser.NAME);
|
||||
users.add(new ElasticUser(userInfo == null || userInfo.enabled,
|
||||
userInfo == null || userInfo.hasEmptyPassword));
|
||||
users.add(new ElasticUser(userInfo == null || userInfo.enabled));
|
||||
|
||||
userInfo = reservedUserInfos.get(KibanaUser.NAME);
|
||||
users.add(new KibanaUser(userInfo == null || userInfo.enabled));
|
||||
|
@ -277,5 +265,6 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
|
||||
public static void addSettings(List<Setting<?>> settingsList) {
|
||||
settingsList.add(ACCEPT_DEFAULT_PASSWORD_SETTING);
|
||||
settingsList.add(BOOTSTRAP_ELASTIC_PASSWORD);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ import org.elasticsearch.cli.Terminal;
|
|||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.settings.KeyStoreWrapper;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
@ -37,22 +38,32 @@ import java.util.function.Function;
|
|||
*/
|
||||
public class SetupPasswordTool extends MultiCommand {
|
||||
|
||||
private static final char[] CHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" +
|
||||
private static final char[] CHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" +
|
||||
"~!@#$%^&*-_=+?").toCharArray();
|
||||
private static final String[] USERS = new String[]{ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME};
|
||||
|
||||
private final Function<Environment, CommandLineHttpClient> clientFunction;
|
||||
private final CheckedFunction<Environment, KeyStoreWrapper, Exception> keyStoreFunction;
|
||||
private CommandLineHttpClient client;
|
||||
|
||||
SetupPasswordTool() {
|
||||
this((environment) -> new CommandLineHttpClient(environment.settings(), environment));
|
||||
this((environment) -> new CommandLineHttpClient(environment.settings(), environment),
|
||||
(environment) -> {
|
||||
KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.load(environment.configFile());
|
||||
if (keyStoreWrapper == null) {
|
||||
throw new UserException(ExitCodes.CONFIG, "Keystore does not exist");
|
||||
}
|
||||
return keyStoreWrapper;
|
||||
});
|
||||
}
|
||||
|
||||
SetupPasswordTool(Function<Environment, CommandLineHttpClient> clientFunction) {
|
||||
SetupPasswordTool(Function<Environment, CommandLineHttpClient> clientFunction,
|
||||
CheckedFunction<Environment, KeyStoreWrapper, Exception> keyStoreFunction) {
|
||||
super("Sets the passwords for reserved users");
|
||||
subcommands.put("auto", new AutoSetup());
|
||||
subcommands.put("interactive", new InteractiveSetup());
|
||||
this.clientFunction = clientFunction;
|
||||
this.keyStoreFunction = keyStoreFunction;
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
@ -135,7 +146,7 @@ public class SetupPasswordTool extends MultiCommand {
|
|||
try (SecureString password2 = new SecureString(terminal.readSecret("Reenter password for [" + user + "]: "))) {
|
||||
if (password1.equals(password2) == false) {
|
||||
password1.close();
|
||||
throw new UserException(ExitCodes.USAGE, "Passwords for user [" + user+ "] do not match");
|
||||
throw new UserException(ExitCodes.USAGE, "Passwords for user [" + user + "] do not match");
|
||||
}
|
||||
}
|
||||
return password1;
|
||||
|
@ -157,7 +168,7 @@ public class SetupPasswordTool extends MultiCommand {
|
|||
private OptionSpec<String> noPromptOption;
|
||||
|
||||
private String elasticUser = ElasticUser.NAME;
|
||||
private SecureString elasticUserPassword = ReservedRealm.EMPTY_PASSWORD_TEXT;
|
||||
private SecureString elasticUserPassword;
|
||||
private String url;
|
||||
|
||||
SetupCommand(String description) {
|
||||
|
@ -165,11 +176,17 @@ public class SetupPasswordTool extends MultiCommand {
|
|||
setParser();
|
||||
}
|
||||
|
||||
void setupOptions(OptionSet options, Environment env) {
|
||||
void setupOptions(OptionSet options, Environment env) throws Exception {
|
||||
client = clientFunction.apply(env);
|
||||
KeyStoreWrapper keyStore = keyStoreFunction.apply(env);
|
||||
String providedUrl = urlOption.value(options);
|
||||
url = providedUrl == null ? "http://localhost:9200" : providedUrl;
|
||||
setShouldPrompt(options);
|
||||
|
||||
// TODO: We currently do not support keystore passwords
|
||||
keyStore.decrypt(new char[0]);
|
||||
|
||||
elasticUserPassword = keyStore.getString(ReservedRealm.BOOTSTRAP_ELASTIC_PASSWORD.getKey());
|
||||
}
|
||||
|
||||
private void setParser() {
|
||||
|
@ -199,6 +216,7 @@ public class SetupPasswordTool extends MultiCommand {
|
|||
BiConsumer<String, SecureString> callback) throws Exception {
|
||||
boolean isSuperUser = user.equals(elasticUser);
|
||||
SecureString password = passwordFn.apply(user);
|
||||
|
||||
try {
|
||||
String route = url + "/_xpack/security/user/" + user + "/_password";
|
||||
String response = client.postURL("PUT", route, elasticUser, elasticUserPassword, buildPayload(password));
|
||||
|
|
|
@ -5,18 +5,18 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc.file;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.security.authc.IncomingRequest;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationResult;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm;
|
||||
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class FileRealm extends CachingUsernamePasswordRealm {
|
||||
|
||||
public static final String TYPE = "file";
|
||||
|
@ -38,13 +38,12 @@ public class FileRealm extends CachingUsernamePasswordRealm {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<User> listener, IncomingRequest incomingRequest) {
|
||||
if (userPasswdStore.verifyPassword(token.principal(), token.credentials())) {
|
||||
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<AuthenticationResult> listener) {
|
||||
final AuthenticationResult result = userPasswdStore.verifyPassword(token.principal(), token.credentials(), () -> {
|
||||
String[] roles = userRolesStore.roles(token.principal());
|
||||
listener.onResponse(new User(token.principal(), roles));
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
return new User(token.principal(), roles);
|
||||
});
|
||||
listener.onResponse(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,11 +18,13 @@ import org.elasticsearch.watcher.FileWatcher;
|
|||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.XPackPlugin;
|
||||
import org.elasticsearch.xpack.XPackSettings;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationResult;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.support.Hasher;
|
||||
import org.elasticsearch.xpack.security.support.NoOpLogger;
|
||||
import org.elasticsearch.xpack.security.support.Validation;
|
||||
import org.elasticsearch.xpack.security.support.Validation.Users;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
|
@ -78,16 +80,15 @@ public class FileUserPasswdStore {
|
|||
return users.size();
|
||||
}
|
||||
|
||||
public boolean verifyPassword(String username, SecureString password) {
|
||||
public AuthenticationResult verifyPassword(String username, SecureString password, java.util.function.Supplier<User> user) {
|
||||
char[] hash = users.get(username);
|
||||
if (hash == null) {
|
||||
return false;
|
||||
return AuthenticationResult.notHandled();
|
||||
}
|
||||
if (hasher.verify(password, hash) == false) {
|
||||
logger.debug("User [{}] exists in file but authentication failed", username);
|
||||
return false;
|
||||
return AuthenticationResult.unsuccessful("Password authentication failed for " + username, null);
|
||||
}
|
||||
return true;
|
||||
return AuthenticationResult.success(user.get());
|
||||
}
|
||||
|
||||
public boolean userExists(String username) {
|
||||
|
|
|
@ -24,9 +24,13 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySessionFactory.AD_DOMAIN_NAME_SETTING;
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySessionFactory.buildDnFromDomain;
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.OBJECT_CLASS_PRESENCE_FILTER;
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.search;
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry;
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory.IGNORE_REFERRAL_ERRORS_SETTING;
|
||||
|
||||
|
||||
class ActiveDirectoryGroupsResolver implements GroupsResolver {
|
||||
|
||||
|
@ -35,11 +39,10 @@ class ActiveDirectoryGroupsResolver implements GroupsResolver {
|
|||
private final LdapSearchScope scope;
|
||||
private final boolean ignoreReferralErrors;
|
||||
|
||||
ActiveDirectoryGroupsResolver(Settings settings, String baseDnDefault,
|
||||
boolean ignoreReferralErrors) {
|
||||
this.baseDn = settings.get("base_dn", baseDnDefault);
|
||||
this.scope = LdapSearchScope.resolve(settings.get("scope"), LdapSearchScope.SUB_TREE);
|
||||
this.ignoreReferralErrors = ignoreReferralErrors;
|
||||
ActiveDirectoryGroupsResolver(Settings settings) {
|
||||
this.baseDn = settings.get("group_search.base_dn", buildDnFromDomain(settings.get(AD_DOMAIN_NAME_SETTING)));
|
||||
this.scope = LdapSearchScope.resolve(settings.get("group_search.scope"), LdapSearchScope.SUB_TREE);
|
||||
this.ignoreReferralErrors = IGNORE_REFERRAL_ERRORS_SETTING.get(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -8,25 +8,32 @@ package org.elasticsearch.xpack.security.authc.ldap;
|
|||
import com.unboundid.ldap.sdk.Filter;
|
||||
import com.unboundid.ldap.sdk.LDAPConnection;
|
||||
import com.unboundid.ldap.sdk.LDAPConnectionOptions;
|
||||
import com.unboundid.ldap.sdk.LDAPConnectionPool;
|
||||
import com.unboundid.ldap.sdk.LDAPException;
|
||||
import com.unboundid.ldap.sdk.LDAPInterface;
|
||||
import com.unboundid.ldap.sdk.SearchResultEntry;
|
||||
import com.unboundid.ldap.sdk.SimpleBindRequest;
|
||||
import com.unboundid.ldap.sdk.controls.AuthorizationIdentityRequestControl;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchSecurityException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.RealmSettings;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapSearchScope;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory;
|
||||
import org.elasticsearch.xpack.security.authc.support.CharArrays;
|
||||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
|
||||
import java.util.HashSet;
|
||||
|
@ -46,7 +53,7 @@ import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.sear
|
|||
* user entry in Active Directory that matches the user name). This eliminates the need for user templates, and simplifies
|
||||
* the configuration for windows admins that may not be familiar with LDAP concepts.
|
||||
*/
|
||||
class ActiveDirectorySessionFactory extends SessionFactory {
|
||||
class ActiveDirectorySessionFactory extends PoolingSessionFactory {
|
||||
|
||||
static final String AD_DOMAIN_NAME_SETTING = "domain_name";
|
||||
|
||||
|
@ -58,29 +65,42 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
static final String AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING = "user_search.down_level_filter";
|
||||
static final String AD_USER_SEARCH_SCOPE_SETTING = "user_search.scope";
|
||||
private static final String NETBIOS_NAME_FILTER_TEMPLATE = "(netbiosname={0})";
|
||||
private static final Setting<Boolean> POOL_ENABLED = Setting.boolSetting("user_search.pool.enabled",
|
||||
settings -> Boolean.toString(PoolingSessionFactory.BIND_DN.exists(settings)), Setting.Property.NodeScope);
|
||||
|
||||
final DefaultADAuthenticator defaultADAuthenticator;
|
||||
final DownLevelADAuthenticator downLevelADAuthenticator;
|
||||
final UpnADAuthenticator upnADAuthenticator;
|
||||
|
||||
ActiveDirectorySessionFactory(RealmConfig config, SSLService sslService) {
|
||||
super(config, sslService);
|
||||
ActiveDirectorySessionFactory(RealmConfig config, SSLService sslService) throws LDAPException {
|
||||
super(config, sslService, new ActiveDirectoryGroupsResolver(config.settings()), POOL_ENABLED, () -> {
|
||||
if (BIND_DN.exists(config.settings())) {
|
||||
return new SimpleBindRequest(getBindDN(config.settings()), BIND_PASSWORD.get(config.settings()));
|
||||
} else {
|
||||
return new SimpleBindRequest();
|
||||
}
|
||||
}, () -> {
|
||||
if (BIND_DN.exists(config.settings())) {
|
||||
final String healthCheckDn = BIND_DN.get(config.settings());
|
||||
if (healthCheckDn.isEmpty() && healthCheckDn.indexOf('=') > 0) {
|
||||
return healthCheckDn;
|
||||
}
|
||||
}
|
||||
return config.settings().get(AD_USER_SEARCH_BASEDN_SETTING, config.settings().get(AD_DOMAIN_NAME_SETTING));
|
||||
});
|
||||
Settings settings = config.settings();
|
||||
String domainName = settings.get(AD_DOMAIN_NAME_SETTING);
|
||||
if (domainName == null) {
|
||||
throw new IllegalArgumentException("missing [" + AD_DOMAIN_NAME_SETTING +
|
||||
"] setting for active directory");
|
||||
throw new IllegalArgumentException("missing [" + AD_DOMAIN_NAME_SETTING + "] setting for active directory");
|
||||
}
|
||||
String domainDN = buildDnFromDomain(domainName);
|
||||
GroupsResolver groupResolver = new ActiveDirectoryGroupsResolver(settings.getAsSettings("group_search"), domainDN,
|
||||
ignoreReferralErrors);
|
||||
LdapMetaDataResolver metaDataResolver = new LdapMetaDataResolver(config.settings(), ignoreReferralErrors);
|
||||
defaultADAuthenticator = new DefaultADAuthenticator(config, timeout, ignoreReferralErrors, logger, groupResolver,
|
||||
defaultADAuthenticator = new DefaultADAuthenticator(config, timeout, ignoreReferralErrors, logger, groupResolver,
|
||||
metaDataResolver, domainDN);
|
||||
downLevelADAuthenticator = new DownLevelADAuthenticator(config, timeout, ignoreReferralErrors, logger, groupResolver,
|
||||
metaDataResolver, domainDN, sslService);
|
||||
upnADAuthenticator = new UpnADAuthenticator(config, timeout, ignoreReferralErrors, logger, groupResolver,
|
||||
metaDataResolver, domainDN);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -88,30 +108,78 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
return new String[] {"ldap://" + settings.get(AD_DOMAIN_NAME_SETTING) + ":389"};
|
||||
}
|
||||
|
||||
/**
|
||||
* This is an active directory bind that looks up the user DN after binding with a windows principal.
|
||||
*
|
||||
* @param username name of the windows user without the domain
|
||||
*/
|
||||
@Override
|
||||
public void session(String username, SecureString password, ActionListener<LdapSession> listener) {
|
||||
void getSessionWithPool(LDAPConnectionPool connectionPool, String user, SecureString password, ActionListener<LdapSession> listener) {
|
||||
getADAuthenticator(user).authenticate(connectionPool, user, password, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
void getSessionWithoutPool(String username, SecureString password, ActionListener<LdapSession> listener) {
|
||||
// the runnable action here allows us make the control/flow logic simpler to understand. If we got a connection then lets
|
||||
// authenticate. If there was a failure pass it back using the listener
|
||||
Runnable runnable;
|
||||
try {
|
||||
final LDAPConnection connection = LdapUtils.privilegedConnect(serverSet::getConnection);
|
||||
runnable = () -> getADAuthenticator(username).authenticate(connection, username, password,
|
||||
ActionListener.wrap(listener::onResponse,
|
||||
(e) -> {
|
||||
IOUtils.closeWhileHandlingException(connection);
|
||||
listener.onFailure(e);
|
||||
}));
|
||||
ActionListener.wrap(listener::onResponse,
|
||||
(e) -> {
|
||||
IOUtils.closeWhileHandlingException(connection);
|
||||
listener.onFailure(e);
|
||||
}));
|
||||
} catch (LDAPException e) {
|
||||
runnable = () -> listener.onFailure(e);
|
||||
}
|
||||
runnable.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
void getUnauthenticatedSessionWithPool(LDAPConnectionPool connectionPool, String user, ActionListener<LdapSession> listener) {
|
||||
getADAuthenticator(user).searchForDN(connectionPool, user, null, Math.toIntExact(timeout.seconds()), ActionListener.wrap(entry -> {
|
||||
if (entry == null) {
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
final String dn = entry.getDN();
|
||||
listener.onResponse(new LdapSession(logger, config, connectionPool, dn, groupResolver, metaDataResolver, timeout, null));
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
@Override
|
||||
void getUnauthenticatedSessionWithoutPool(String user, ActionListener<LdapSession> listener) {
|
||||
if (BIND_DN.exists(config.settings())) {
|
||||
LDAPConnection connection = null;
|
||||
boolean startedSearching = false;
|
||||
try {
|
||||
connection = LdapUtils.privilegedConnect(serverSet::getConnection);
|
||||
connection.bind(new SimpleBindRequest(getBindDN(config.settings()), BIND_PASSWORD.get(config.settings())));
|
||||
final LDAPConnection finalConnection = connection;
|
||||
getADAuthenticator(user).searchForDN(finalConnection, user, null, Math.toIntExact(timeout.getSeconds()),
|
||||
ActionListener.wrap(entry -> {
|
||||
if (entry == null) {
|
||||
IOUtils.closeWhileHandlingException(finalConnection);
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
final String dn = entry.getDN();
|
||||
listener.onResponse(new LdapSession(logger, config, finalConnection, dn, groupResolver, metaDataResolver,
|
||||
timeout, null));
|
||||
}
|
||||
}, e -> {
|
||||
IOUtils.closeWhileHandlingException(finalConnection);
|
||||
listener.onFailure(e);
|
||||
}));
|
||||
startedSearching = true;
|
||||
} catch (LDAPException e) {
|
||||
listener.onFailure(e);
|
||||
} finally {
|
||||
if (connection != null && startedSearching == false) {
|
||||
IOUtils.closeWhileHandlingException(connection);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param domain active directory domain name
|
||||
* @return LDAP DN, distinguished name, of the root of the domain
|
||||
|
@ -120,6 +188,14 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
return "DC=" + domain.replace(".", ",DC=");
|
||||
}
|
||||
|
||||
static String getBindDN(Settings settings) {
|
||||
String bindDN = BIND_DN.get(settings);
|
||||
if (bindDN.isEmpty() == false && bindDN.indexOf('\\') < 0 && bindDN.indexOf('@') < 0 && bindDN.indexOf('=') < 0) {
|
||||
bindDN = bindDN + "@" + settings.get(AD_DOMAIN_NAME_SETTING);
|
||||
}
|
||||
return bindDN;
|
||||
}
|
||||
|
||||
public static Set<Setting<?>> getSettings() {
|
||||
Set<Setting<?>> settings = new HashSet<>();
|
||||
settings.addAll(SessionFactory.getSettings());
|
||||
|
@ -131,6 +207,7 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
settings.add(Setting.simpleString(AD_UPN_USER_SEARCH_FILTER_SETTING, Setting.Property.NodeScope));
|
||||
settings.add(Setting.simpleString(AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING, Setting.Property.NodeScope));
|
||||
settings.add(Setting.simpleString(AD_USER_SEARCH_SCOPE_SETTING, Setting.Property.NodeScope));
|
||||
settings.addAll(PoolingSessionFactory.getSettings());
|
||||
return settings;
|
||||
}
|
||||
|
||||
|
@ -154,6 +231,8 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
final String userSearchDN;
|
||||
final LdapSearchScope userSearchScope;
|
||||
final String userSearchFilter;
|
||||
final String bindDN;
|
||||
final String bindPassword; // TODO this needs to be a setting in the secure settings store!
|
||||
|
||||
ADAuthenticator(RealmConfig realm, TimeValue timeout, boolean ignoreReferralErrors, Logger logger,
|
||||
GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN,
|
||||
|
@ -165,6 +244,8 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
this.groupsResolver = groupsResolver;
|
||||
this.metaDataResolver = metaDataResolver;
|
||||
final Settings settings = realm.settings();
|
||||
this.bindDN = getBindDN(settings);
|
||||
this.bindPassword = BIND_PASSWORD.get(settings);
|
||||
userSearchDN = settings.get(AD_USER_SEARCH_BASEDN_SETTING, domainDN);
|
||||
userSearchScope = LdapSearchScope.resolve(settings.get(AD_USER_SEARCH_SCOPE_SETTING), LdapSearchScope.SUB_TREE);
|
||||
userSearchFilter = settings.get(userSearchFilterSetting, defaultUserSearchFilter);
|
||||
|
@ -174,7 +255,11 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
ActionListener<LdapSession> listener) {
|
||||
boolean success = false;
|
||||
try {
|
||||
connection.bind(bindUsername(username), new String(password.getChars()));
|
||||
connection.bind(new SimpleBindRequest(bindUsername(username), CharArrays.toUtf8Bytes(password.getChars()),
|
||||
new AuthorizationIdentityRequestControl()));
|
||||
if (bindDN.isEmpty() == false) {
|
||||
connection.bind(new SimpleBindRequest(bindDN, bindPassword));
|
||||
}
|
||||
searchForDN(connection, username, password, Math.toIntExact(timeout.seconds()), ActionListener.wrap((entry) -> {
|
||||
if (entry == null) {
|
||||
IOUtils.close(connection);
|
||||
|
@ -200,6 +285,28 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
}
|
||||
}
|
||||
|
||||
final void authenticate(LDAPConnectionPool pool, String username, SecureString password,
|
||||
ActionListener<LdapSession> listener) {
|
||||
try {
|
||||
LdapUtils.privilegedConnect(() -> {
|
||||
SimpleBindRequest request = new SimpleBindRequest(bindUsername(username), CharArrays.toUtf8Bytes(password.getChars()));
|
||||
return pool.bindAndRevertAuthentication(request);
|
||||
});
|
||||
searchForDN(pool, username, password, Math.toIntExact(timeout.seconds()), ActionListener.wrap((entry) -> {
|
||||
if (entry == null) {
|
||||
// we did not find the user, cannot authenticate in this realm
|
||||
listener.onFailure(new ElasticsearchSecurityException("search for user [" + username
|
||||
+ "] by principle name yielded no results"));
|
||||
} else {
|
||||
final String dn = entry.getDN();
|
||||
listener.onResponse(new LdapSession(logger, realm, pool, dn, groupsResolver, metaDataResolver, timeout, null));
|
||||
}
|
||||
}, listener::onFailure));
|
||||
} catch (LDAPException e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
String bindUsername(String username) {
|
||||
return username;
|
||||
}
|
||||
|
@ -209,7 +316,7 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
return userSearchFilter;
|
||||
}
|
||||
|
||||
abstract void searchForDN(LDAPConnection connection, String username, SecureString password, int timeLimitSeconds,
|
||||
abstract void searchForDN(LDAPInterface connection, String username, SecureString password, int timeLimitSeconds,
|
||||
ActionListener<SearchResultEntry> listener);
|
||||
}
|
||||
|
||||
|
@ -233,7 +340,7 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
}
|
||||
|
||||
@Override
|
||||
void searchForDN(LDAPConnection connection, String username, SecureString password,
|
||||
void searchForDN(LDAPInterface connection, String username, SecureString password,
|
||||
int timeLimitSeconds, ActionListener<SearchResultEntry> listener) {
|
||||
try {
|
||||
searchForEntry(connection, userSearchDN, userSearchScope.scope(),
|
||||
|
@ -276,7 +383,7 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
}
|
||||
|
||||
@Override
|
||||
void searchForDN(LDAPConnection connection, String username, SecureString password, int timeLimitSeconds,
|
||||
void searchForDN(LDAPInterface connection, String username, SecureString password, int timeLimitSeconds,
|
||||
ActionListener<SearchResultEntry> listener) {
|
||||
String[] parts = username.split("\\\\");
|
||||
assert parts.length == 2;
|
||||
|
@ -285,7 +392,6 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
|
||||
netBiosDomainNameToDn(connection, netBiosDomainName, username, password, timeLimitSeconds, ActionListener.wrap((domainDN) -> {
|
||||
if (domainDN == null) {
|
||||
IOUtils.close(connection);
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
try {
|
||||
|
@ -294,75 +400,75 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
accountName), timeLimitSeconds, ignoreReferralErrors,
|
||||
listener, attributesToSearchFor(groupsResolver.attributes()));
|
||||
} catch (LDAPException e) {
|
||||
IOUtils.closeWhileHandlingException(connection);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}, (e) -> {
|
||||
IOUtils.closeWhileHandlingException(connection);
|
||||
listener.onFailure(e);
|
||||
}));
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
void netBiosDomainNameToDn(LDAPConnection connection, String netBiosDomainName, String username, SecureString password,
|
||||
void netBiosDomainNameToDn(LDAPInterface ldapInterface, String netBiosDomainName, String username, SecureString password,
|
||||
int timeLimitSeconds, ActionListener<String> listener) {
|
||||
final String cachedName = domainNameCache.get(netBiosDomainName);
|
||||
if (cachedName != null) {
|
||||
listener.onResponse(cachedName);
|
||||
} else if (usingGlobalCatalog(settings, connection)) {
|
||||
// the global catalog does not replicate the necessary information to map a netbios
|
||||
// dns name to a DN so we need to instead connect to the normal ports. This code
|
||||
// uses the standard ports to avoid adding even more settings and is probably ok as
|
||||
// most AD users do not use non-standard ports
|
||||
final LDAPConnectionOptions options = connectionOptions(config, sslService, logger);
|
||||
boolean startedSearching = false;
|
||||
LDAPConnection searchConnection = null;
|
||||
try {
|
||||
Filter filter = createFilter(NETBIOS_NAME_FILTER_TEMPLATE, netBiosDomainName);
|
||||
if (connection.getSSLSession() != null) {
|
||||
try {
|
||||
if (cachedName != null) {
|
||||
listener.onResponse(cachedName);
|
||||
} else if (usingGlobalCatalog(ldapInterface)) {
|
||||
// the global catalog does not replicate the necessary information to map a netbios
|
||||
// dns name to a DN so we need to instead connect to the normal ports. This code
|
||||
// uses the standard ports to avoid adding even more settings and is probably ok as
|
||||
// most AD users do not use non-standard ports
|
||||
final LDAPConnectionOptions options = connectionOptions(config, sslService, logger);
|
||||
boolean startedSearching = false;
|
||||
LDAPConnection searchConnection = null;
|
||||
LDAPConnection ldapConnection = null;
|
||||
try {
|
||||
Filter filter = createFilter(NETBIOS_NAME_FILTER_TEMPLATE, netBiosDomainName);
|
||||
if (ldapInterface instanceof LDAPConnection) {
|
||||
ldapConnection = (LDAPConnection) ldapInterface;
|
||||
} else {
|
||||
ldapConnection = LdapUtils.privilegedConnect(((LDAPConnectionPool) ldapInterface)::getConnection);
|
||||
}
|
||||
final LDAPConnection finalLdapConnection = ldapConnection;
|
||||
searchConnection = LdapUtils.privilegedConnect(
|
||||
() -> new LDAPConnection(connection.getSocketFactory(), options,
|
||||
connection.getConnectedAddress(), 636));
|
||||
} else {
|
||||
searchConnection = LdapUtils.privilegedConnect(() ->
|
||||
new LDAPConnection(options, connection.getConnectedAddress(), 389));
|
||||
() -> new LDAPConnection(finalLdapConnection.getSocketFactory(), options,
|
||||
finalLdapConnection.getConnectedAddress(),
|
||||
finalLdapConnection.getSSLSession() != null ? 636 : 389));
|
||||
|
||||
final SimpleBindRequest bindRequest =
|
||||
bindDN.isEmpty() ? new SimpleBindRequest(username, CharArrays.toUtf8Bytes(password.getChars())) :
|
||||
new SimpleBindRequest(bindDN, bindPassword);
|
||||
searchConnection.bind(bindRequest);
|
||||
final LDAPConnection finalConnection = searchConnection;
|
||||
search(finalConnection, domainDN, LdapSearchScope.SUB_TREE.scope(), filter,
|
||||
timeLimitSeconds, ignoreReferralErrors, ActionListener.wrap(
|
||||
(results) -> {
|
||||
IOUtils.close(finalConnection);
|
||||
handleSearchResults(results, netBiosDomainName, domainNameCache, listener);
|
||||
}, (e) -> {
|
||||
IOUtils.closeWhileHandlingException(finalConnection);
|
||||
listener.onFailure(e);
|
||||
}),
|
||||
"ncname");
|
||||
startedSearching = true;
|
||||
} finally {
|
||||
if (startedSearching == false) {
|
||||
IOUtils.closeWhileHandlingException(searchConnection);
|
||||
}
|
||||
if (ldapInterface instanceof LDAPConnectionPool && ldapConnection != null) {
|
||||
((LDAPConnectionPool) ldapInterface).releaseConnection(ldapConnection);
|
||||
}
|
||||
}
|
||||
searchConnection.bind(username, new String(password.getChars()));
|
||||
final LDAPConnection finalConnection = searchConnection;
|
||||
search(finalConnection, domainDN, LdapSearchScope.SUB_TREE.scope(), filter,
|
||||
timeLimitSeconds, ignoreReferralErrors, ActionListener.wrap(
|
||||
(results) -> {
|
||||
IOUtils.close(finalConnection);
|
||||
handleSearchResults(results, netBiosDomainName,
|
||||
domainNameCache, listener);
|
||||
}, (e) -> {
|
||||
IOUtils.closeWhileHandlingException(connection);
|
||||
listener.onFailure(e);
|
||||
}),
|
||||
"ncname");
|
||||
startedSearching = true;
|
||||
} catch (LDAPException e) {
|
||||
listener.onFailure(e);
|
||||
} finally {
|
||||
if (startedSearching == false) {
|
||||
IOUtils.closeWhileHandlingException(searchConnection);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
} else {
|
||||
Filter filter = createFilter(NETBIOS_NAME_FILTER_TEMPLATE, netBiosDomainName);
|
||||
search(connection, domainDN, LdapSearchScope.SUB_TREE.scope(), filter,
|
||||
search(ldapInterface, domainDN, LdapSearchScope.SUB_TREE.scope(), filter,
|
||||
timeLimitSeconds, ignoreReferralErrors, ActionListener.wrap(
|
||||
(results) -> handleSearchResults(results, netBiosDomainName,
|
||||
domainNameCache, listener),
|
||||
(e) -> {
|
||||
IOUtils.closeWhileHandlingException(connection);
|
||||
listener.onFailure(e);
|
||||
}),
|
||||
listener::onFailure),
|
||||
"ncname");
|
||||
} catch (LDAPException e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
} catch (LDAPException e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -385,35 +491,55 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
}
|
||||
}
|
||||
|
||||
static boolean usingGlobalCatalog(Settings settings, LDAPConnection ldapConnection) {
|
||||
Boolean usingGlobalCatalog = settings.getAsBoolean("global_catalog", null);
|
||||
if (usingGlobalCatalog != null) {
|
||||
return usingGlobalCatalog;
|
||||
static boolean usingGlobalCatalog(LDAPInterface ldap) throws LDAPException {
|
||||
if (ldap instanceof LDAPConnection) {
|
||||
return usingGlobalCatalog((LDAPConnection) ldap);
|
||||
} else {
|
||||
LDAPConnectionPool pool = (LDAPConnectionPool) ldap;
|
||||
LDAPConnection connection = null;
|
||||
try {
|
||||
connection = LdapUtils.privilegedConnect(pool::getConnection);
|
||||
return usingGlobalCatalog(connection);
|
||||
} finally {
|
||||
if (connection != null) {
|
||||
pool.releaseConnection(connection);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean usingGlobalCatalog(LDAPConnection ldapConnection) {
|
||||
return ldapConnection.getConnectedPort() == 3268 || ldapConnection.getConnectedPort() == 3269;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticates user principal names provided by the user (eq user@domain). Note this authenticator does not currently support
|
||||
* UPN suffixes that are different than the actual domain name.
|
||||
*/
|
||||
static class UpnADAuthenticator extends ADAuthenticator {
|
||||
|
||||
static final String UPN_USER_FILTER = "(&(objectClass=user)(|(sAMAccountName={0})(userPrincipalName={1})))";
|
||||
static final String UPN_USER_FILTER = "(&(objectClass=user)(userPrincipalName={1}))";
|
||||
|
||||
UpnADAuthenticator(RealmConfig config, TimeValue timeout, boolean ignoreReferralErrors, Logger logger,
|
||||
GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN) {
|
||||
super(config, timeout, ignoreReferralErrors, logger, groupsResolver, metaDataResolver, domainDN,
|
||||
AD_UPN_USER_SEARCH_FILTER_SETTING, UPN_USER_FILTER);
|
||||
if (userSearchFilter.contains("{0}")) {
|
||||
new DeprecationLogger(logger).deprecated("The use of the account name variable {0} in the setting ["
|
||||
+ RealmSettings.getFullSettingKey(config, AD_UPN_USER_SEARCH_FILTER_SETTING) +
|
||||
"] has been deprecated and will be removed in a future version!");
|
||||
}
|
||||
}
|
||||
|
||||
void searchForDN(LDAPConnection connection, String username, SecureString password, int timeLimitSeconds,
|
||||
void searchForDN(LDAPInterface connection, String username, SecureString password, int timeLimitSeconds,
|
||||
ActionListener<SearchResultEntry> listener) {
|
||||
String[] parts = username.split("@");
|
||||
assert parts.length == 2;
|
||||
assert parts.length == 2 : "there should have only been two values for " + username + " after splitting on '@'";
|
||||
final String accountName = parts[0];
|
||||
final String domainName = parts[1];
|
||||
final String domainDN = buildDnFromDomain(domainName);
|
||||
try {
|
||||
Filter filter = createFilter(UPN_USER_FILTER, accountName, username);
|
||||
searchForEntry(connection, domainDN, LdapSearchScope.SUB_TREE.scope(), filter,
|
||||
Filter filter = createFilter(userSearchFilter, accountName, username);
|
||||
searchForEntry(connection, userSearchDN, LdapSearchScope.SUB_TREE.scope(), filter,
|
||||
timeLimitSeconds, ignoreReferralErrors, listener,
|
||||
attributesToSearchFor(groupsResolver.attributes()));
|
||||
} catch (LDAPException e) {
|
||||
|
|
|
@ -5,13 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc.ldap;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import com.unboundid.ldap.sdk.LDAPException;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
|
@ -28,7 +21,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.security.authc.IncomingRequest;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationResult;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.RealmSettings;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapLoadBalancing;
|
||||
|
@ -44,6 +37,13 @@ import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingSt
|
|||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
|
||||
/**
|
||||
* Authenticates username/password tokens against ldap, locates groups and maps them to roles.
|
||||
|
@ -142,7 +142,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
|
|||
* This user will then be passed to the listener
|
||||
*/
|
||||
@Override
|
||||
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<User> listener, IncomingRequest incomingRequest) {
|
||||
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<AuthenticationResult> listener) {
|
||||
// we submit to the threadpool because authentication using LDAP will execute blocking I/O for a bind request and we don't want
|
||||
// network threads stuck waiting for a socket to connect. After the bind, then all interaction with LDAP should be async
|
||||
final CancellableLdapRunnable cancellableLdapRunnable = new CancellableLdapRunnable(listener,
|
||||
|
@ -153,17 +153,19 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doLookupUser(String username, ActionListener<User> listener) {
|
||||
protected void doLookupUser(String username, ActionListener<User> userActionListener) {
|
||||
if (sessionFactory.supportsUnauthenticatedSession()) {
|
||||
// we submit to the threadpool because authentication using LDAP will execute blocking I/O for a bind request and we don't want
|
||||
// network threads stuck waiting for a socket to connect. After the bind, then all interaction with LDAP should be async
|
||||
final CancellableLdapRunnable cancellableLdapRunnable = new CancellableLdapRunnable(listener,
|
||||
final ActionListener<AuthenticationResult> sessionListener = ActionListener.wrap(AuthenticationResult::getUser,
|
||||
userActionListener::onFailure);
|
||||
final CancellableLdapRunnable cancellableLdapRunnable = new CancellableLdapRunnable(userActionListener,
|
||||
() -> sessionFactory.unauthenticatedSession(username,
|
||||
contextPreservingListener(new LdapSessionActionListener("lookup", username, listener))), logger);
|
||||
contextPreservingListener(new LdapSessionActionListener("lookup", username, sessionListener))), logger);
|
||||
threadPool.generic().execute(cancellableLdapRunnable);
|
||||
threadPool.schedule(executionTimeout, Names.SAME, cancellableLdapRunnable::maybeTimeout);
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
userActionListener.onResponse(null);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,7 +190,8 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
|
|||
return usage;
|
||||
}
|
||||
|
||||
private static void buildUser(LdapSession session, String username, ActionListener<User> listener, UserRoleMapper roleMapper) {
|
||||
private static void buildUser(LdapSession session, String username, ActionListener<AuthenticationResult> listener,
|
||||
UserRoleMapper roleMapper) {
|
||||
if (session == null) {
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
|
@ -210,8 +213,8 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
|
|||
roles -> {
|
||||
IOUtils.close(session);
|
||||
String[] rolesArray = roles.toArray(new String[roles.size()]);
|
||||
listener.onResponse(
|
||||
new User(username, rolesArray, null, null, metadata, true)
|
||||
listener.onResponse(AuthenticationResult.success(
|
||||
new User(username, rolesArray, null, null, metadata, true))
|
||||
);
|
||||
}, onFailure
|
||||
));
|
||||
|
@ -236,21 +239,21 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
|
|||
private final AtomicReference<LdapSession> ldapSessionAtomicReference = new AtomicReference<>();
|
||||
private String action;
|
||||
private final String username;
|
||||
private final ActionListener<User> userActionListener;
|
||||
private final ActionListener<AuthenticationResult> resultListener;
|
||||
|
||||
LdapSessionActionListener(String action, String username, ActionListener<User> userActionListener) {
|
||||
LdapSessionActionListener(String action, String username, ActionListener<AuthenticationResult> resultListener) {
|
||||
this.action = action;
|
||||
this.username = username;
|
||||
this.userActionListener = userActionListener;
|
||||
this.resultListener = resultListener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(LdapSession session) {
|
||||
if (session == null) {
|
||||
userActionListener.onResponse(null);
|
||||
resultListener.onResponse(null);
|
||||
} else {
|
||||
ldapSessionAtomicReference.set(session);
|
||||
buildUser(session, username, userActionListener, roleMapper);
|
||||
buildUser(session, username, resultListener, roleMapper);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,8 +265,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
|
|||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(new ParameterizedMessage("Exception occurred during {} for {}", action, LdapRealm.this), e);
|
||||
}
|
||||
setFailedAuthenticationDetails(action + " failed", e);
|
||||
userActionListener.onResponse(null);
|
||||
resultListener.onResponse(AuthenticationResult.unsuccessful(action + " failed", e));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -276,11 +278,11 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
|
|||
static class CancellableLdapRunnable extends AbstractRunnable {
|
||||
|
||||
private final Runnable in;
|
||||
private final ActionListener<User> listener;
|
||||
private final ActionListener<?> listener;
|
||||
private final Logger logger;
|
||||
private final AtomicReference<LdapRunnableState> state = new AtomicReference<>(LdapRunnableState.AWAITING_EXECUTION);
|
||||
|
||||
CancellableLdapRunnable(ActionListener<User> listener, Runnable in, Logger logger) {
|
||||
CancellableLdapRunnable(ActionListener<?> listener, Runnable in, Logger logger) {
|
||||
this.listener = listener;
|
||||
this.in = in;
|
||||
this.logger = logger;
|
||||
|
|
|
@ -5,25 +5,20 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc.ldap;
|
||||
|
||||
import com.unboundid.ldap.sdk.GetEntryLDAPConnectionPoolHealthCheck;
|
||||
import com.unboundid.ldap.sdk.Filter;
|
||||
import com.unboundid.ldap.sdk.LDAPConnection;
|
||||
import com.unboundid.ldap.sdk.LDAPConnectionPool;
|
||||
import com.unboundid.ldap.sdk.LDAPConnectionPoolHealthCheck;
|
||||
import com.unboundid.ldap.sdk.LDAPException;
|
||||
import com.unboundid.ldap.sdk.LDAPInterface;
|
||||
import com.unboundid.ldap.sdk.SearchResultEntry;
|
||||
import com.unboundid.ldap.sdk.ServerSet;
|
||||
import com.unboundid.ldap.sdk.SimpleBindRequest;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.RealmSettings;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapSearchScope;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver;
|
||||
|
@ -35,59 +30,41 @@ import org.elasticsearch.xpack.ssl.SSLService;
|
|||
import java.util.Arrays;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static com.unboundid.ldap.sdk.Filter.createEqualityFilter;
|
||||
import static com.unboundid.ldap.sdk.Filter.encodeValue;
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.attributesToSearchFor;
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.createFilter;
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry;
|
||||
|
||||
class LdapUserSearchSessionFactory extends SessionFactory {
|
||||
class LdapUserSearchSessionFactory extends PoolingSessionFactory {
|
||||
|
||||
static final int DEFAULT_CONNECTION_POOL_SIZE = 20;
|
||||
static final int DEFAULT_CONNECTION_POOL_INITIAL_SIZE = 0;
|
||||
static final String DEFAULT_USERNAME_ATTRIBUTE = "uid";
|
||||
static final TimeValue DEFAULT_HEALTH_CHECK_INTERVAL = TimeValue.timeValueSeconds(60L);
|
||||
private static final String DEFAULT_USERNAME_ATTRIBUTE = "uid";
|
||||
|
||||
static final String SEARCH_PREFIX = "user_search.";
|
||||
static final Setting<String> SEARCH_ATTRIBUTE = new Setting<>("user_search.attribute", DEFAULT_USERNAME_ATTRIBUTE,
|
||||
Function.identity(), Setting.Property.NodeScope, Setting.Property.Deprecated);
|
||||
|
||||
private static final Setting<String> SEARCH_BASE_DN = Setting.simpleString("user_search.base_dn", Setting.Property.NodeScope);
|
||||
private static final Setting<String> SEARCH_ATTRIBUTE = new Setting<>("user_search.attribute", DEFAULT_USERNAME_ATTRIBUTE,
|
||||
Function.identity(), Setting.Property.NodeScope);
|
||||
private static final Setting<String> SEARCH_FILTER = Setting.simpleString("user_search.filter", Setting.Property.NodeScope);
|
||||
private static final Setting<LdapSearchScope> SEARCH_SCOPE = new Setting<>("user_search.scope", (String) null,
|
||||
s -> LdapSearchScope.resolve(s, LdapSearchScope.SUB_TREE), Setting.Property.NodeScope);
|
||||
|
||||
private static final Setting<Boolean> POOL_ENABLED = Setting.boolSetting("user_search.pool.enabled",
|
||||
true, Setting.Property.NodeScope);
|
||||
private static final Setting<Integer> POOL_INITIAL_SIZE = Setting.intSetting("user_search.pool.initial_size",
|
||||
DEFAULT_CONNECTION_POOL_INITIAL_SIZE, 0, Setting.Property.NodeScope);
|
||||
private static final Setting<Integer> POOL_SIZE = Setting.intSetting("user_search.pool.size",
|
||||
DEFAULT_CONNECTION_POOL_SIZE, 1, Setting.Property.NodeScope);
|
||||
private static final Setting<TimeValue> HEALTH_CHECK_INTERVAL = Setting.timeSetting("user_search.pool.health_check.interval",
|
||||
DEFAULT_HEALTH_CHECK_INTERVAL, Setting.Property.NodeScope);
|
||||
private static final Setting<Boolean> HEALTH_CHECK_ENABLED = Setting.boolSetting("user_search.pool.health_check.enabled",
|
||||
true, Setting.Property.NodeScope);
|
||||
private static final Setting<Optional<String>> HEALTH_CHECK_DN = new Setting<>("user_search.pool.health_check.dn", (String) null,
|
||||
Optional::ofNullable, Setting.Property.NodeScope);
|
||||
|
||||
private static final Setting<String> BIND_DN = Setting.simpleString("bind_dn",
|
||||
Setting.Property.NodeScope, Setting.Property.Filtered);
|
||||
private static final Setting<String> BIND_PASSWORD = Setting.simpleString("bind_password",
|
||||
Setting.Property.NodeScope, Setting.Property.Filtered);
|
||||
private static final Setting<Boolean> POOL_ENABLED = Setting.boolSetting("user_search.pool.enabled", true, Setting.Property.NodeScope);
|
||||
|
||||
private final String userSearchBaseDn;
|
||||
private final LdapSearchScope scope;
|
||||
private final String userAttribute;
|
||||
private final GroupsResolver groupResolver;
|
||||
private final boolean useConnectionPool;
|
||||
|
||||
private final LDAPConnectionPool connectionPool;
|
||||
private final LdapMetaDataResolver metaDataResolver;
|
||||
private final String searchFilter;
|
||||
|
||||
LdapUserSearchSessionFactory(RealmConfig config, SSLService sslService) throws LDAPException {
|
||||
super(config, sslService);
|
||||
super(config, sslService, groupResolver(config.settings()), POOL_ENABLED,
|
||||
() -> LdapUserSearchSessionFactory.bindRequest(config.settings()),
|
||||
() -> {
|
||||
if (BIND_DN.exists(config.settings())) {
|
||||
return BIND_DN.get(config.settings());
|
||||
} else {
|
||||
return SEARCH_BASE_DN.get(config.settings());
|
||||
}
|
||||
});
|
||||
Settings settings = config.settings();
|
||||
if (SEARCH_BASE_DN.exists(settings)) {
|
||||
userSearchBaseDn = SEARCH_BASE_DN.get(settings);
|
||||
|
@ -95,56 +72,9 @@ class LdapUserSearchSessionFactory extends SessionFactory {
|
|||
throw new IllegalArgumentException("[" + RealmSettings.getFullSettingKey(config, SEARCH_BASE_DN) + "] must be specified");
|
||||
}
|
||||
scope = SEARCH_SCOPE.get(settings);
|
||||
userAttribute = SEARCH_ATTRIBUTE.get(settings);
|
||||
groupResolver = groupResolver(settings);
|
||||
metaDataResolver = new LdapMetaDataResolver(config.settings(), ignoreReferralErrors);
|
||||
useConnectionPool = POOL_ENABLED.get(settings);
|
||||
if (useConnectionPool) {
|
||||
connectionPool = createConnectionPool(config, serverSet, timeout, logger);
|
||||
} else {
|
||||
connectionPool = null;
|
||||
}
|
||||
logger.info("Realm [{}] is in user-search mode - base_dn=[{}], attribute=[{}]",
|
||||
config.name(), userSearchBaseDn, userAttribute);
|
||||
}
|
||||
|
||||
static LDAPConnectionPool createConnectionPool(RealmConfig config, ServerSet serverSet, TimeValue timeout, Logger logger)
|
||||
throws LDAPException {
|
||||
Settings settings = config.settings();
|
||||
SimpleBindRequest bindRequest = bindRequest(settings);
|
||||
final int initialSize = POOL_INITIAL_SIZE.get(settings);
|
||||
final int size = POOL_SIZE.get(settings);
|
||||
LDAPConnectionPool pool = null;
|
||||
boolean success = false;
|
||||
try {
|
||||
pool = LdapUtils.privilegedConnect(() -> new LDAPConnectionPool(serverSet, bindRequest, initialSize, size));
|
||||
pool.setRetryFailedOperationsDueToInvalidConnections(true);
|
||||
if (HEALTH_CHECK_ENABLED.get(settings)) {
|
||||
String entryDn = HEALTH_CHECK_DN.get(settings).orElseGet(() -> bindRequest == null ? null : bindRequest.getBindDN());
|
||||
final long healthCheckInterval = HEALTH_CHECK_INTERVAL.get(settings).millis();
|
||||
if (entryDn != null) {
|
||||
// Checks the status of the LDAP connection at a specified interval in the background. We do not check on
|
||||
// on create as the LDAP server may require authentication to get an entry and a bind request has not been executed
|
||||
// yet so we could end up never getting a connection. We do not check on checkout as we always set retry operations
|
||||
// and the pool will handle a bad connection without the added latency on every operation
|
||||
LDAPConnectionPoolHealthCheck healthCheck = new GetEntryLDAPConnectionPoolHealthCheck(entryDn, timeout.millis(),
|
||||
false, false, false, true, false);
|
||||
pool.setHealthCheck(healthCheck);
|
||||
pool.setHealthCheckIntervalMillis(healthCheckInterval);
|
||||
} else {
|
||||
logger.warn("[" + RealmSettings.getFullSettingKey(config, BIND_DN) + "] and [" +
|
||||
RealmSettings.getFullSettingKey(config, HEALTH_CHECK_DN) + "] have not been specified so no " +
|
||||
"ldap query will be run as a health check");
|
||||
}
|
||||
}
|
||||
|
||||
success = true;
|
||||
return pool;
|
||||
} finally {
|
||||
if (success == false && pool != null) {
|
||||
pool.close();
|
||||
}
|
||||
}
|
||||
searchFilter = getSearchFilter(config);
|
||||
logger.info("Realm [{}] is in user-search mode - base_dn=[{}], search filter=[{}]",
|
||||
config.name(), userSearchBaseDn, searchFilter);
|
||||
}
|
||||
|
||||
static SimpleBindRequest bindRequest(Settings settings) {
|
||||
|
@ -155,23 +85,15 @@ class LdapUserSearchSessionFactory extends SessionFactory {
|
|||
}
|
||||
}
|
||||
|
||||
public static boolean hasUserSearchSettings(RealmConfig config) {
|
||||
static boolean hasUserSearchSettings(RealmConfig config) {
|
||||
return config.settings().getByPrefix("user_search.").isEmpty() == false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void session(String user, SecureString password, ActionListener<LdapSession> listener) {
|
||||
if (useConnectionPool) {
|
||||
getSessionWithPool(user, password, listener);
|
||||
} else {
|
||||
getSessionWithoutPool(user, password, listener);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets up a LDAPSession using the connection pool that potentially holds existing connections to the server
|
||||
*/
|
||||
private void getSessionWithPool(String user, SecureString password, ActionListener<LdapSession> listener) {
|
||||
@Override
|
||||
void getSessionWithPool(LDAPConnectionPool connectionPool, String user, SecureString password, ActionListener<LdapSession> listener) {
|
||||
findUser(user, connectionPool, ActionListener.wrap((entry) -> {
|
||||
if (entry == null) {
|
||||
listener.onResponse(null);
|
||||
|
@ -203,7 +125,8 @@ class LdapUserSearchSessionFactory extends SessionFactory {
|
|||
* <li>Creates a new LDAPSession with the bound connection</li>
|
||||
* </ol>
|
||||
*/
|
||||
private void getSessionWithoutPool(String user, SecureString password, ActionListener<LdapSession> listener) {
|
||||
@Override
|
||||
void getSessionWithoutPool(String user, SecureString password, ActionListener<LdapSession> listener) {
|
||||
boolean success = false;
|
||||
LDAPConnection connection = null;
|
||||
try {
|
||||
|
@ -260,33 +183,42 @@ class LdapUserSearchSessionFactory extends SessionFactory {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void unauthenticatedSession(String user, ActionListener<LdapSession> listener) {
|
||||
void getUnauthenticatedSessionWithPool(LDAPConnectionPool connectionPool, String user, ActionListener<LdapSession> listener) {
|
||||
findUser(user, connectionPool, ActionListener.wrap((entry) -> {
|
||||
if (entry == null) {
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
final String dn = entry.getDN();
|
||||
LdapSession session = new LdapSession(logger, config, connectionPool, dn, groupResolver, metaDataResolver, timeout,
|
||||
entry.getAttributes());
|
||||
listener.onResponse(session);
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
@Override
|
||||
void getUnauthenticatedSessionWithoutPool(String user, ActionListener<LdapSession> listener) {
|
||||
LDAPConnection connection = null;
|
||||
boolean success = false;
|
||||
try {
|
||||
final LDAPInterface ldapInterface;
|
||||
if (useConnectionPool) {
|
||||
ldapInterface = connectionPool;
|
||||
} else {
|
||||
connection = LdapUtils.privilegedConnect(serverSet::getConnection);
|
||||
connection.bind(bindRequest(config.settings()));
|
||||
ldapInterface = connection;
|
||||
}
|
||||
connection = LdapUtils.privilegedConnect(serverSet::getConnection);
|
||||
connection.bind(bindRequest(config.settings()));
|
||||
final LDAPConnection finalConnection = connection;
|
||||
|
||||
findUser(user, ldapInterface, ActionListener.wrap((entry) -> {
|
||||
findUser(user, finalConnection, ActionListener.wrap((entry) -> {
|
||||
if (entry == null) {
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
boolean sessionCreated = false;
|
||||
try {
|
||||
final String dn = entry.getDN();
|
||||
LdapSession session = new LdapSession(logger, config, ldapInterface, dn, groupResolver, metaDataResolver, timeout,
|
||||
LdapSession session = new LdapSession(logger, config, finalConnection, dn, groupResolver, metaDataResolver, timeout,
|
||||
entry.getAttributes());
|
||||
sessionCreated = true;
|
||||
listener.onResponse(session);
|
||||
} finally {
|
||||
if (sessionCreated == false && useConnectionPool == false) {
|
||||
IOUtils.close((LDAPConnection) ldapInterface);
|
||||
if (sessionCreated == false) {
|
||||
IOUtils.close(finalConnection);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -302,41 +234,52 @@ class LdapUserSearchSessionFactory extends SessionFactory {
|
|||
}
|
||||
|
||||
private void findUser(String user, LDAPInterface ldapInterface, ActionListener<SearchResultEntry> listener) {
|
||||
final Filter filter;
|
||||
try {
|
||||
filter = createFilter(searchFilter, user);
|
||||
} catch (LDAPException e) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
|
||||
searchForEntry(ldapInterface, userSearchBaseDn, scope.scope(),
|
||||
createEqualityFilter(userAttribute, encodeValue(user)), Math.toIntExact(timeout.seconds()), ignoreReferralErrors, listener,
|
||||
filter, Math.toIntExact(timeout.seconds()), ignoreReferralErrors, listener,
|
||||
attributesToSearchFor(groupResolver.attributes(), metaDataResolver.attributeNames()));
|
||||
}
|
||||
|
||||
/*
|
||||
* This method is used to cleanup the connections
|
||||
*/
|
||||
void shutdown() {
|
||||
if (connectionPool != null) {
|
||||
connectionPool.close();
|
||||
}
|
||||
}
|
||||
|
||||
static GroupsResolver groupResolver(Settings settings) {
|
||||
private static GroupsResolver groupResolver(Settings settings) {
|
||||
if (SearchGroupsResolver.BASE_DN.exists(settings)) {
|
||||
return new SearchGroupsResolver(settings);
|
||||
}
|
||||
return new UserAttributeGroupsResolver(settings);
|
||||
}
|
||||
|
||||
static String getSearchFilter(RealmConfig config) {
|
||||
final Settings settings = config.settings();
|
||||
final boolean hasAttribute = SEARCH_ATTRIBUTE.exists(settings);
|
||||
final boolean hasFilter = SEARCH_FILTER.exists(settings);
|
||||
if (hasAttribute && hasFilter) {
|
||||
throw new IllegalArgumentException("search attribute setting [" +
|
||||
RealmSettings.getFullSettingKey(config, SEARCH_ATTRIBUTE) + "] and filter setting [" +
|
||||
RealmSettings.getFullSettingKey(config, SEARCH_FILTER) + "] cannot be combined!");
|
||||
} else if (hasFilter) {
|
||||
return SEARCH_FILTER.get(settings);
|
||||
} else if (hasAttribute) {
|
||||
return "(" + SEARCH_ATTRIBUTE.get(settings) + "={0})";
|
||||
} else {
|
||||
return "(uid={0})";
|
||||
}
|
||||
}
|
||||
|
||||
public static Set<Setting<?>> getSettings() {
|
||||
Set<Setting<?>> settings = new HashSet<>();
|
||||
settings.addAll(SessionFactory.getSettings());
|
||||
settings.addAll(PoolingSessionFactory.getSettings());
|
||||
settings.add(SEARCH_BASE_DN);
|
||||
settings.add(SEARCH_SCOPE);
|
||||
settings.add(SEARCH_ATTRIBUTE);
|
||||
settings.add(POOL_ENABLED);
|
||||
settings.add(POOL_INITIAL_SIZE);
|
||||
settings.add(POOL_SIZE);
|
||||
settings.add(HEALTH_CHECK_ENABLED);
|
||||
settings.add(HEALTH_CHECK_DN);
|
||||
settings.add(HEALTH_CHECK_INTERVAL);
|
||||
settings.add(BIND_DN);
|
||||
settings.add(BIND_PASSWORD);
|
||||
settings.add(SEARCH_FILTER);
|
||||
|
||||
settings.addAll(SearchGroupsResolver.getSettings());
|
||||
settings.addAll(UserAttributeGroupsResolver.getSettings());
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authc.ldap;
|
||||
|
||||
import com.unboundid.ldap.sdk.BindRequest;
|
||||
import com.unboundid.ldap.sdk.GetEntryLDAPConnectionPoolHealthCheck;
|
||||
import com.unboundid.ldap.sdk.LDAPConnectionPool;
|
||||
import com.unboundid.ldap.sdk.LDAPConnectionPoolHealthCheck;
|
||||
import com.unboundid.ldap.sdk.LDAPException;
|
||||
import com.unboundid.ldap.sdk.ServerSet;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.RealmSettings;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils;
|
||||
import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory;
|
||||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Base class for LDAP session factories that can make use of a connection pool
|
||||
*/
|
||||
abstract class PoolingSessionFactory extends SessionFactory implements Releasable {
|
||||
|
||||
static final int DEFAULT_CONNECTION_POOL_SIZE = 20;
|
||||
static final int DEFAULT_CONNECTION_POOL_INITIAL_SIZE = 0;
|
||||
static final Setting<String> BIND_DN = Setting.simpleString("bind_dn", Setting.Property.NodeScope, Setting.Property.Filtered);
|
||||
static final Setting<String> BIND_PASSWORD = Setting.simpleString("bind_password", Setting.Property.NodeScope,
|
||||
Setting.Property.Filtered);
|
||||
|
||||
private static final TimeValue DEFAULT_HEALTH_CHECK_INTERVAL = TimeValue.timeValueSeconds(60L);
|
||||
private static final Setting<Integer> POOL_INITIAL_SIZE = Setting.intSetting("user_search.pool.initial_size",
|
||||
DEFAULT_CONNECTION_POOL_INITIAL_SIZE, 0, Setting.Property.NodeScope);
|
||||
private static final Setting<Integer> POOL_SIZE = Setting.intSetting("user_search.pool.size",
|
||||
DEFAULT_CONNECTION_POOL_SIZE, 1, Setting.Property.NodeScope);
|
||||
private static final Setting<TimeValue> HEALTH_CHECK_INTERVAL = Setting.timeSetting("user_search.pool.health_check.interval",
|
||||
DEFAULT_HEALTH_CHECK_INTERVAL, Setting.Property.NodeScope);
|
||||
private static final Setting<Boolean> HEALTH_CHECK_ENABLED = Setting.boolSetting("user_search.pool.health_check.enabled",
|
||||
true, Setting.Property.NodeScope);
|
||||
private static final Setting<Optional<String>> HEALTH_CHECK_DN = new Setting<>("user_search.pool.health_check.dn", (String) null,
|
||||
Optional::ofNullable, Setting.Property.NodeScope);
|
||||
|
||||
private final boolean useConnectionPool;
|
||||
private final LDAPConnectionPool connectionPool;
|
||||
|
||||
final LdapMetaDataResolver metaDataResolver;
|
||||
final LdapSession.GroupsResolver groupResolver;
|
||||
|
||||
|
||||
/**
|
||||
* @param config the configuration for the realm
|
||||
* @param sslService the ssl service to get a socket factory or context from
|
||||
* @param groupResolver the resolver to use to find groups belonging to a user
|
||||
* @param poolingEnabled the setting that should be used to determine if connection pooling is enabled
|
||||
* @param bindRequestSupplier the supplier for a bind requests that should be used for pooled connections
|
||||
* @param healthCheckDNSupplier a supplier for the dn to query for health checks
|
||||
*/
|
||||
PoolingSessionFactory(RealmConfig config, SSLService sslService, LdapSession.GroupsResolver groupResolver,
|
||||
Setting<Boolean> poolingEnabled, Supplier<BindRequest> bindRequestSupplier,
|
||||
Supplier<String> healthCheckDNSupplier) throws LDAPException {
|
||||
super(config, sslService);
|
||||
this.groupResolver = groupResolver;
|
||||
this.metaDataResolver = new LdapMetaDataResolver(config.settings(), ignoreReferralErrors);
|
||||
this.useConnectionPool = poolingEnabled.get(config.settings());
|
||||
if (useConnectionPool) {
|
||||
this.connectionPool = createConnectionPool(config, serverSet, timeout, logger, bindRequestSupplier, healthCheckDNSupplier);
|
||||
} else {
|
||||
this.connectionPool = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void session(String user, SecureString password, ActionListener<LdapSession> listener) {
|
||||
if (useConnectionPool) {
|
||||
getSessionWithPool(connectionPool, user, password, listener);
|
||||
} else {
|
||||
getSessionWithoutPool(user, password, listener);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void unauthenticatedSession(String user, ActionListener<LdapSession> listener) {
|
||||
if (useConnectionPool) {
|
||||
getUnauthenticatedSessionWithPool(connectionPool, user, listener);
|
||||
} else {
|
||||
getUnauthenticatedSessionWithoutPool(user, listener);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to get a {@link LdapSession} using the provided credentials and makes use of the provided connection pool
|
||||
*/
|
||||
abstract void getSessionWithPool(LDAPConnectionPool connectionPool, String user, SecureString password,
|
||||
ActionListener<LdapSession> listener);
|
||||
|
||||
/**
|
||||
* Attempts to get a {@link LdapSession} using the provided credentials and opens a new connection to the ldap server
|
||||
*/
|
||||
abstract void getSessionWithoutPool(String user, SecureString password, ActionListener<LdapSession> listener);
|
||||
|
||||
/**
|
||||
* Attempts to search using a pooled connection for the user and provides an unauthenticated {@link LdapSession} to the listener if the
|
||||
* user is found
|
||||
*/
|
||||
abstract void getUnauthenticatedSessionWithPool(LDAPConnectionPool connectionPool, String user, ActionListener<LdapSession> listener);
|
||||
|
||||
/**
|
||||
* Attempts to search using a new connection for the user and provides an unauthenticated {@link LdapSession} to the listener if the
|
||||
* user is found
|
||||
*/
|
||||
abstract void getUnauthenticatedSessionWithoutPool(String user, ActionListener<LdapSession> listener);
|
||||
|
||||
/**
|
||||
* Creates the connection pool that will be used by the session factory and initializes the health check support
|
||||
*/
|
||||
static LDAPConnectionPool createConnectionPool(RealmConfig config, ServerSet serverSet, TimeValue timeout, Logger logger,
|
||||
Supplier<BindRequest> bindRequestSupplier,
|
||||
Supplier<String> healthCheckDnSupplier) throws LDAPException {
|
||||
Settings settings = config.settings();
|
||||
BindRequest bindRequest = bindRequestSupplier.get();
|
||||
final int initialSize = POOL_INITIAL_SIZE.get(settings);
|
||||
final int size = POOL_SIZE.get(settings);
|
||||
LDAPConnectionPool pool = null;
|
||||
boolean success = false;
|
||||
try {
|
||||
pool = LdapUtils.privilegedConnect(() -> new LDAPConnectionPool(serverSet, bindRequest, initialSize, size));
|
||||
pool.setRetryFailedOperationsDueToInvalidConnections(true);
|
||||
if (HEALTH_CHECK_ENABLED.get(settings)) {
|
||||
String entryDn = HEALTH_CHECK_DN.get(settings).orElseGet(healthCheckDnSupplier);
|
||||
final long healthCheckInterval = HEALTH_CHECK_INTERVAL.get(settings).millis();
|
||||
if (entryDn != null) {
|
||||
// Checks the status of the LDAP connection at a specified interval in the background. We do not check on
|
||||
// create as the LDAP server may require authentication to get an entry and a bind request has not been executed
|
||||
// yet so we could end up never getting a connection. We do not check on checkout as we always set retry operations
|
||||
// and the pool will handle a bad connection without the added latency on every operation
|
||||
LDAPConnectionPoolHealthCheck healthCheck = new GetEntryLDAPConnectionPoolHealthCheck(entryDn, timeout.millis(),
|
||||
false, false, false, true, false);
|
||||
pool.setHealthCheck(healthCheck);
|
||||
pool.setHealthCheckIntervalMillis(healthCheckInterval);
|
||||
} else {
|
||||
logger.warn(new ParameterizedMessage("[{}] and [{}} have not been specified or are not valid distinguished names," +
|
||||
"so connection health checking is disabled", RealmSettings.getFullSettingKey(config, BIND_DN),
|
||||
RealmSettings.getFullSettingKey(config, HEALTH_CHECK_DN)));
|
||||
}
|
||||
}
|
||||
|
||||
success = true;
|
||||
return pool;
|
||||
} finally {
|
||||
if (success == false && pool != null) {
|
||||
pool.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is used to cleanup the connection pool if one is being used
|
||||
*/
|
||||
@Override
|
||||
public final void close() {
|
||||
if (connectionPool != null) {
|
||||
connectionPool.close();
|
||||
}
|
||||
}
|
||||
|
||||
public static Set<Setting<?>> getSettings() {
|
||||
return Sets.newHashSet(POOL_INITIAL_SIZE, POOL_SIZE, HEALTH_CHECK_ENABLED, HEALTH_CHECK_INTERVAL, HEALTH_CHECK_DN, BIND_DN,
|
||||
BIND_PASSWORD);
|
||||
}
|
||||
}
|
|
@ -110,7 +110,7 @@ class SearchGroupsResolver implements GroupsResolver {
|
|||
|
||||
private void getUserId(String dn, Collection<Attribute> attributes, LDAPInterface connection,
|
||||
TimeValue timeout, ActionListener<String> listener) {
|
||||
if (isNullOrEmpty(userAttribute)) {
|
||||
if (isNullOrEmpty(userAttribute) || userAttribute.equals("dn")) {
|
||||
listener.onResponse(dn);
|
||||
} else if (attributes != null) {
|
||||
final String value = attributes.stream()
|
||||
|
|
|
@ -48,8 +48,7 @@ import java.util.stream.Collectors;
|
|||
|
||||
public final class LdapUtils {
|
||||
|
||||
public static final Filter OBJECT_CLASS_PRESENCE_FILTER =
|
||||
Filter.createPresenceFilter("objectClass");
|
||||
public static final Filter OBJECT_CLASS_PRESENCE_FILTER = Filter.createPresenceFilter("objectClass");
|
||||
|
||||
private static final Logger LOGGER = ESLoggerFactory.getLogger(LdapUtils.class);
|
||||
|
||||
|
@ -320,7 +319,7 @@ public final class LdapUtils {
|
|||
: attributes.toArray(new String[attributes.size()]);
|
||||
}
|
||||
|
||||
static String[] encodeFilterValues(String... arguments) {
|
||||
private static String[] encodeFilterValues(String... arguments) {
|
||||
for (int i = 0; i < arguments.length; i++) {
|
||||
arguments[i] = Filter.encodeValue(arguments[i]);
|
||||
}
|
||||
|
|
|
@ -17,17 +17,17 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.security.authc.IncomingRequest;
|
||||
import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
|
||||
import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper;
|
||||
import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore;
|
||||
import org.elasticsearch.xpack.ssl.CertUtils;
|
||||
import org.elasticsearch.xpack.ssl.SSLConfigurationSettings;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationResult;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationToken;
|
||||
import org.elasticsearch.xpack.security.authc.Realm;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.RealmSettings;
|
||||
import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
|
||||
import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper;
|
||||
import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.elasticsearch.xpack.ssl.CertUtils;
|
||||
import org.elasticsearch.xpack.ssl.SSLConfigurationSettings;
|
||||
|
||||
import javax.net.ssl.X509TrustManager;
|
||||
import java.security.cert.Certificate;
|
||||
|
@ -82,17 +82,18 @@ public class PkiRealm extends Realm {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void authenticate(AuthenticationToken authToken, ActionListener<User> listener, IncomingRequest incomingRequest) {
|
||||
public void authenticate(AuthenticationToken authToken, ActionListener<AuthenticationResult> listener) {
|
||||
X509AuthenticationToken token = (X509AuthenticationToken)authToken;
|
||||
if (isCertificateChainTrusted(trustManager, token, logger) == false) {
|
||||
listener.onResponse(null);
|
||||
listener.onResponse(AuthenticationResult.unsuccessful("Certificate for " + token.dn() + " is not trusted", null));
|
||||
} else {
|
||||
final Map<String, Object> metadata = Collections.singletonMap("pki_dn", token.dn());
|
||||
final UserRoleMapper.UserData user = new UserRoleMapper.UserData(token.principal(),
|
||||
token.dn(), Collections.emptySet(), metadata, this.config);
|
||||
roleMapper.resolveRoles(user, ActionListener.wrap(
|
||||
roles -> listener.onResponse(new User(token.principal(),
|
||||
roles.toArray(new String[roles.size()]), null, null, metadata, true)),
|
||||
roles -> listener.onResponse(AuthenticationResult.success(
|
||||
new User(token.principal(), roles.toArray(new String[roles.size()]), null, null, metadata, true)
|
||||
)),
|
||||
listener::onFailure
|
||||
));
|
||||
}
|
||||
|
|
|
@ -5,23 +5,24 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc.support;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationResult;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationToken;
|
||||
import org.elasticsearch.xpack.security.authc.IncomingRequest;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm implements CachingRealm {
|
||||
|
||||
public static final Setting<String> CACHE_HASH_ALGO_SETTING = Setting.simpleString("cache.hash_algo", Setting.Property.NodeScope);
|
||||
|
@ -70,16 +71,15 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm
|
|||
* doAuthenticate
|
||||
* @param authToken The authentication token
|
||||
* @param listener to be called at completion
|
||||
* @param incomingRequest the request that is being authenticated
|
||||
*/
|
||||
@Override
|
||||
public final void authenticate(AuthenticationToken authToken, ActionListener<User> listener, IncomingRequest incomingRequest) {
|
||||
public final void authenticate(AuthenticationToken authToken, ActionListener<AuthenticationResult> listener) {
|
||||
UsernamePasswordToken token = (UsernamePasswordToken) authToken;
|
||||
try {
|
||||
if (cache == null) {
|
||||
doAuthenticate(token, listener, incomingRequest);
|
||||
doAuthenticate(token, listener);
|
||||
} else {
|
||||
authenticateWithCache(token, listener, incomingRequest);
|
||||
authenticateWithCache(token, listener);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// each realm should handle exceptions, if we get one here it should be considered fatal
|
||||
|
@ -87,72 +87,75 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm
|
|||
}
|
||||
}
|
||||
|
||||
private void authenticateWithCache(UsernamePasswordToken token, ActionListener<User> listener, IncomingRequest incomingRequest) {
|
||||
private void authenticateWithCache(UsernamePasswordToken token, ActionListener<AuthenticationResult> listener) {
|
||||
UserWithHash userWithHash = cache.get(token.principal());
|
||||
if (userWithHash == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("user [{}] not found in cache for realm [{}], proceeding with normal authentication",
|
||||
token.principal(), name());
|
||||
}
|
||||
doAuthenticateAndCache(token, ActionListener.wrap((user) -> {
|
||||
if (user != null) {
|
||||
doAuthenticateAndCache(token, ActionListener.wrap((result) -> {
|
||||
if (result.isAuthenticated()) {
|
||||
final User user = result.getUser();
|
||||
logger.debug("realm [{}] authenticated user [{}], with roles [{}]", name(), token.principal(), user.roles());
|
||||
}
|
||||
listener.onResponse(user);
|
||||
}, listener::onFailure), incomingRequest);
|
||||
listener.onResponse(result);
|
||||
}, listener::onFailure));
|
||||
} else if (userWithHash.hasHash()) {
|
||||
if (userWithHash.verify(token.credentials())) {
|
||||
if (userWithHash.user.enabled()) {
|
||||
User user = userWithHash.user;
|
||||
logger.debug("realm [{}] authenticated user [{}], with roles [{}]", name(), token.principal(), user.roles());
|
||||
listener.onResponse(user);
|
||||
listener.onResponse(AuthenticationResult.success(user));
|
||||
} else {
|
||||
// We successfully authenticated, but the cached user is disabled.
|
||||
// Reload the primary record to check whether the user is still disabled
|
||||
cache.invalidate(token.principal());
|
||||
doAuthenticateAndCache(token, ActionListener.wrap((user) -> {
|
||||
if (user != null) {
|
||||
doAuthenticateAndCache(token, ActionListener.wrap((result) -> {
|
||||
if (result.isAuthenticated()) {
|
||||
final User user = result.getUser();
|
||||
logger.debug("realm [{}] authenticated user [{}] (enabled:{}), with roles [{}]", name(), token.principal(),
|
||||
user.enabled(), user.roles());
|
||||
}
|
||||
listener.onResponse(user);
|
||||
}, listener::onFailure), incomingRequest);
|
||||
listener.onResponse(result);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
} else {
|
||||
cache.invalidate(token.principal());
|
||||
doAuthenticateAndCache(token, ActionListener.wrap((user) -> {
|
||||
if (user != null) {
|
||||
doAuthenticateAndCache(token, ActionListener.wrap((result) -> {
|
||||
if (result.isAuthenticated()) {
|
||||
final User user = result.getUser();
|
||||
logger.debug("cached user's password changed. realm [{}] authenticated user [{}], with roles [{}]",
|
||||
name(), token.principal(), user.roles());
|
||||
}
|
||||
listener.onResponse(user);
|
||||
}, listener::onFailure), incomingRequest);
|
||||
listener.onResponse(result);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
} else {
|
||||
cache.invalidate(token.principal());
|
||||
doAuthenticateAndCache(token, ActionListener.wrap((user) -> {
|
||||
if (user != null) {
|
||||
doAuthenticateAndCache(token, ActionListener.wrap((result) -> {
|
||||
if (result.isAuthenticated()) {
|
||||
final User user = result.getUser();
|
||||
logger.debug("cached user came from a lookup and could not be used for authentication. " +
|
||||
"realm [{}] authenticated user [{}] with roles [{}]", name(), token.principal(), user.roles());
|
||||
}
|
||||
listener.onResponse(user);
|
||||
}, listener::onFailure), incomingRequest);
|
||||
listener.onResponse(result);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
}
|
||||
|
||||
private void doAuthenticateAndCache(UsernamePasswordToken token, ActionListener<User> listener, IncomingRequest incomingRequest) {
|
||||
ActionListener<User> wrapped = ActionListener.wrap((user) -> {
|
||||
if (user == null) {
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
UserWithHash userWithHash = new UserWithHash(user, token.credentials(), hasher);
|
||||
private void doAuthenticateAndCache(UsernamePasswordToken token, ActionListener<AuthenticationResult> listener) {
|
||||
ActionListener<AuthenticationResult> wrapped = ActionListener.wrap((result) -> {
|
||||
Objects.requireNonNull(result, "AuthenticationResult cannot be null");
|
||||
if (result.getStatus() == AuthenticationResult.Status.SUCCESS) {
|
||||
UserWithHash userWithHash = new UserWithHash(result.getUser(), token.credentials(), hasher);
|
||||
// it doesn't matter if we already computed it elsewhere
|
||||
cache.put(token.principal(), userWithHash);
|
||||
listener.onResponse(user);
|
||||
}
|
||||
listener.onResponse(result);
|
||||
}, listener::onFailure);
|
||||
|
||||
doAuthenticate(token, wrapped, incomingRequest);
|
||||
doAuthenticate(token, wrapped);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -162,7 +165,7 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm
|
|||
return stats;
|
||||
}
|
||||
|
||||
protected abstract void doAuthenticate(UsernamePasswordToken token, ActionListener<User> listener, IncomingRequest incomingRequest);
|
||||
protected abstract void doAuthenticate(UsernamePasswordToken token, ActionListener<AuthenticationResult> listener);
|
||||
|
||||
@Override
|
||||
public final void lookupUser(String username, ActionListener<User> listener) {
|
||||
|
|
|
@ -5,22 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authc.support;
|
||||
|
||||
import com.unboundid.ldap.sdk.DN;
|
||||
import com.unboundid.ldap.sdk.LDAPException;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.watcher.FileChangesListener;
|
||||
import org.elasticsearch.watcher.FileWatcher;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.XPackPlugin;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
|
@ -37,6 +21,25 @@ import java.util.Set;
|
|||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.function.Function;
|
||||
|
||||
import com.unboundid.ldap.sdk.DN;
|
||||
import com.unboundid.ldap.sdk.LDAPException;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.bootstrap.BootstrapCheck;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.watcher.FileChangesListener;
|
||||
import org.elasticsearch.watcher.FileWatcher;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
import org.elasticsearch.xpack.XPackPlugin;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.yaml.snakeyaml.error.YAMLException;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.dn;
|
||||
|
@ -57,20 +60,18 @@ public class DnRoleMapper implements UserRoleMapper {
|
|||
protected final Logger logger;
|
||||
protected final RealmConfig config;
|
||||
|
||||
private final String realmType;
|
||||
private final Path file;
|
||||
private final boolean useUnmappedGroupsAsRoles;
|
||||
private final CopyOnWriteArrayList<Runnable> listeners = new CopyOnWriteArrayList<>();
|
||||
private volatile Map<DN, Set<String>> dnRoles;
|
||||
|
||||
public DnRoleMapper(String realmType, RealmConfig config, ResourceWatcherService watcherService) {
|
||||
this.realmType = realmType;
|
||||
public DnRoleMapper(RealmConfig config, ResourceWatcherService watcherService) {
|
||||
this.config = config;
|
||||
this.logger = config.logger(getClass());
|
||||
|
||||
useUnmappedGroupsAsRoles = USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.get(config.settings());
|
||||
file = resolveFile(config.settings(), config.env());
|
||||
dnRoles = parseFileLenient(file, logger, realmType, config.name());
|
||||
dnRoles = parseFileLenient(file, logger, config.type(), config.name());
|
||||
FileWatcher watcher = new FileWatcher(file.getParent());
|
||||
watcher.addListener(new FileListener());
|
||||
try {
|
||||
|
@ -101,7 +102,7 @@ public class DnRoleMapper implements UserRoleMapper {
|
|||
*/
|
||||
public static Map<DN, Set<String>> parseFileLenient(Path path, Logger logger, String realmType, String realmName) {
|
||||
try {
|
||||
return parseFile(path, logger, realmType, realmName);
|
||||
return parseFile(path, logger, realmType, realmName, false);
|
||||
} catch (Exception e) {
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
|
@ -110,14 +111,20 @@ public class DnRoleMapper implements UserRoleMapper {
|
|||
}
|
||||
}
|
||||
|
||||
public static Map<DN, Set<String>> parseFile(Path path, Logger logger, String realmType, String realmName) {
|
||||
public static Map<DN, Set<String>> parseFile(Path path, Logger logger, String realmType, String realmName, boolean strict) {
|
||||
|
||||
logger.trace("reading realm [{}/{}] role mappings file [{}]...", realmType, realmName, path.toAbsolutePath());
|
||||
|
||||
if (!Files.exists(path)) {
|
||||
logger.warn("Role mapping file [{}] for realm [{}] does not exist. Role mapping will be skipped.",
|
||||
if (Files.exists(path) == false) {
|
||||
final ParameterizedMessage message = new ParameterizedMessage(
|
||||
"Role mapping file [{}] for realm [{}] does not exist.",
|
||||
path.toAbsolutePath(), realmName);
|
||||
return emptyMap();
|
||||
if (strict) {
|
||||
throw new ElasticsearchException(message.getFormattedMessage());
|
||||
} else {
|
||||
logger.warn(message.getFormattedMessage() + " Role mapping will be skipped.");
|
||||
return emptyMap();
|
||||
}
|
||||
}
|
||||
|
||||
try (InputStream in = Files.newInputStream(path)) {
|
||||
|
@ -136,14 +143,18 @@ public class DnRoleMapper implements UserRoleMapper {
|
|||
}
|
||||
dnRoles.add(role);
|
||||
} catch (LDAPException e) {
|
||||
logger.error(new ParameterizedMessage(
|
||||
"invalid DN [{}] found in [{}] role mappings [{}] for realm [{}/{}]. skipping... ",
|
||||
providedDn,
|
||||
realmType,
|
||||
path.toAbsolutePath(),
|
||||
realmType,
|
||||
realmName),
|
||||
e);
|
||||
ParameterizedMessage message = new ParameterizedMessage(
|
||||
"invalid DN [{}] found in [{}] role mappings [{}] for realm [{}/{}].",
|
||||
providedDn,
|
||||
realmType,
|
||||
path.toAbsolutePath(),
|
||||
realmType,
|
||||
realmName);
|
||||
if (strict) {
|
||||
throw new ElasticsearchException(message.getFormattedMessage(), e);
|
||||
} else {
|
||||
logger.error(message.getFormattedMessage() + " skipping...", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,7 +163,7 @@ public class DnRoleMapper implements UserRoleMapper {
|
|||
logger.debug("[{}] role mappings found in file [{}] for realm [{}/{}]", dnToRoles.size(), path.toAbsolutePath(), realmType,
|
||||
realmName);
|
||||
return unmodifiableMap(dnToRoles);
|
||||
} catch (IOException e) {
|
||||
} catch (IOException | YAMLException e) {
|
||||
throw new ElasticsearchException("could not read realm [" + realmType + "/" + realmName + "] role mappings file [" +
|
||||
path.toAbsolutePath() + "]", e);
|
||||
}
|
||||
|
@ -166,7 +177,7 @@ public class DnRoleMapper implements UserRoleMapper {
|
|||
public void resolveRoles(UserData user, ActionListener<Set<String>> listener) {
|
||||
try {
|
||||
listener.onResponse(resolveRoles(user.getDn(), user.getGroups()));
|
||||
} catch( Exception e) {
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
@ -185,8 +196,8 @@ public class DnRoleMapper implements UserRoleMapper {
|
|||
}
|
||||
}
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("the roles [{}], are mapped from these [{}] groups [{}] using file [{}] for realm [{}/{}]", roles, realmType,
|
||||
groupDns, file.getFileName(), realmType, config.name());
|
||||
logger.debug("the roles [{}], are mapped from these [{}] groups [{}] using file [{}] for realm [{}/{}]", roles, config.type(),
|
||||
groupDns, file.getFileName(), config.type(), config.name());
|
||||
}
|
||||
|
||||
DN userDn = dn(userDnString);
|
||||
|
@ -197,7 +208,7 @@ public class DnRoleMapper implements UserRoleMapper {
|
|||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("the roles [{}], are mapped from the user [{}] using file [{}] for realm [{}/{}]",
|
||||
(rolesMappedToUserDn == null) ? Collections.emptySet() : rolesMappedToUserDn, userDnString, file.getFileName(),
|
||||
realmType, config.name());
|
||||
config.type(), config.name());
|
||||
}
|
||||
return roles;
|
||||
}
|
||||
|
@ -225,8 +236,8 @@ public class DnRoleMapper implements UserRoleMapper {
|
|||
public void onFileChanged(Path file) {
|
||||
if (file.equals(DnRoleMapper.this.file)) {
|
||||
logger.info("role mappings file [{}] changed for realm [{}/{}]. updating mappings...", file.toAbsolutePath(),
|
||||
realmType, config.name());
|
||||
dnRoles = parseFileLenient(file, logger, realmType, config.name());
|
||||
config.type(), config.name());
|
||||
dnRoles = parseFileLenient(file, logger, config.type(), config.name());
|
||||
notifyRefresh();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authc.support;
|
||||
|
||||
import java.nio.file.Path;
|
||||
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.bootstrap.BootstrapCheck;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
|
||||
/**
|
||||
* A BootstrapCheck that {@link DnRoleMapper} files exist and are valid (valid YAML and valid DNs)
|
||||
*/
|
||||
public class RoleMappingFileBootstrapCheck implements BootstrapCheck {
|
||||
|
||||
private final RealmConfig realmConfig;
|
||||
private final Path path;
|
||||
|
||||
private final SetOnce<String> error = new SetOnce<>();
|
||||
|
||||
public RoleMappingFileBootstrapCheck(RealmConfig config, Path path) {
|
||||
this.realmConfig = config;
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
try {
|
||||
DnRoleMapper.parseFile(path, realmConfig.logger(getClass()), realmConfig.type(), realmConfig.name(), true);
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
error.set(e.getMessage());
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return error.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean alwaysEnforce() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public static BootstrapCheck create(RealmConfig realmConfig) {
|
||||
if (realmConfig.enabled() && DnRoleMapper.ROLE_MAPPING_FILE_SETTING.exists(realmConfig.settings())) {
|
||||
Path file = DnRoleMapper.resolveFile(realmConfig.settings(), realmConfig.env());
|
||||
return new RoleMappingFileBootstrapCheck(realmConfig, file);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -34,7 +34,7 @@ public class CompositeRoleMapper implements UserRoleMapper {
|
|||
public CompositeRoleMapper(String realmType, RealmConfig realmConfig,
|
||||
ResourceWatcherService watcherService,
|
||||
NativeRoleMappingStore nativeRoleMappingStore) {
|
||||
this(new DnRoleMapper(realmType, realmConfig, watcherService), nativeRoleMappingStore);
|
||||
this(new DnRoleMapper(realmConfig, watcherService), nativeRoleMappingStore);
|
||||
}
|
||||
|
||||
private CompositeRoleMapper(UserRoleMapper... delegates) {
|
||||
|
|
|
@ -152,15 +152,6 @@ public class AuthorizationService extends AbstractComponent {
|
|||
throw denial(authentication, action, request);
|
||||
}
|
||||
|
||||
// norelease
|
||||
// TODO: This functionality is disabled as it is not yet compatible with the upgrade process
|
||||
// If the user is the elastic user in setup mode, then only change password requests can be authorized
|
||||
// if (ElasticUser.isElasticUserInSetupMode(authentication.getUser())
|
||||
// && ChangePasswordAction.NAME.equals(action) == false
|
||||
// && ClusterHealthAction.NAME.equals(action) == false) {
|
||||
// throw denial(authentication, action, request);
|
||||
// }
|
||||
|
||||
// get the roles of the authenticated user, which may be different than the effective
|
||||
Role permission = userRole;
|
||||
|
||||
|
|
|
@ -8,8 +8,8 @@ package org.elasticsearch.xpack.security.authz.accesscontrol;
|
|||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
||||
import org.apache.lucene.search.DocValuesNumbersQuery;
|
||||
import org.apache.lucene.search.FieldValueQuery;
|
||||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
|
@ -23,9 +23,7 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
|
@ -77,8 +75,8 @@ class FieldExtractor {
|
|||
fields.add(((PointRangeQuery)query).getField());
|
||||
} else if (query instanceof PointInSetQuery) {
|
||||
fields.add(((PointInSetQuery)query).getField());
|
||||
} else if (query instanceof FieldValueQuery) {
|
||||
fields.add(((FieldValueQuery)query).getField());
|
||||
} else if (query instanceof DocValuesFieldExistsQuery) {
|
||||
fields.add(((DocValuesFieldExistsQuery)query).getField());
|
||||
} else if (query instanceof DocValuesNumbersQuery) {
|
||||
fields.add(((DocValuesNumbersQuery)query).getField());
|
||||
} else if (query instanceof IndexOrDocValuesQuery) {
|
||||
|
|
|
@ -58,6 +58,7 @@ import org.elasticsearch.index.shard.ShardUtils;
|
|||
import org.elasticsearch.license.XPackLicenseState;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.script.TemplateScript;
|
||||
import org.elasticsearch.xpack.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.security.authz.AuthorizationService;
|
||||
|
@ -272,7 +273,8 @@ public class SecurityIndexSearcherWrapper extends IndexSearcherWrapper {
|
|||
userModel.put("metadata", Collections.unmodifiableMap(user.metadata()));
|
||||
params.put("_user", userModel);
|
||||
// Always enforce mustache script lang:
|
||||
script = new Script(script.getType(), "mustache", script.getIdOrCode(), script.getOptions(), params);
|
||||
script = new Script(script.getType(),
|
||||
script.getType() == ScriptType.STORED ? null : "mustache", script.getIdOrCode(), script.getOptions(), params);
|
||||
TemplateScript compiledTemplate = scriptService.compile(script, TemplateScript.CONTEXT).newInstance(script.getParams());
|
||||
return compiledTemplate.execute();
|
||||
} else {
|
||||
|
|
|
@ -20,7 +20,9 @@ public final class SystemPrivilege extends Privilege {
|
|||
"indices:monitor/*", // added for monitoring
|
||||
"cluster:monitor/*", // added for monitoring
|
||||
"cluster:admin/reroute", // added for DiskThresholdDecider.DiskListener
|
||||
"indices:admin/mapping/put" // needed for recovery and shrink api
|
||||
"indices:admin/mapping/put", // needed for recovery and shrink api
|
||||
"indices:admin/template/put", // needed for the TemplateUpgradeService
|
||||
"indices:admin/template/delete" // needed for the TemplateUpgradeService
|
||||
), Automatons.patterns("internal:transport/proxy/*"))); // no proxy actions for system user!
|
||||
|
||||
private SystemPrivilege() {
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
package org.elasticsearch.xpack.security.authz.store;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
|
@ -83,8 +85,7 @@ public class CompositeRolesStore extends AbstractComponent {
|
|||
public CompositeRolesStore(Settings settings, FileRolesStore fileRolesStore, NativeRolesStore nativeRolesStore,
|
||||
ReservedRolesStore reservedRolesStore,
|
||||
List<BiConsumer<Set<String>, ActionListener<Set<RoleDescriptor>>>> rolesProviders,
|
||||
ThreadContext threadContext,
|
||||
XPackLicenseState licenseState) {
|
||||
ThreadContext threadContext, XPackLicenseState licenseState) {
|
||||
super(settings);
|
||||
this.fileRolesStore = fileRolesStore;
|
||||
// invalidating all on a file based role update is heavy handed to say the least, but in general this should be infrequent so the
|
||||
|
@ -289,6 +290,16 @@ public class CompositeRolesStore extends AbstractComponent {
|
|||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
public void onSecurityIndexHealthChange(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) {
|
||||
final boolean movedFromRedToNonRed = (previousHealth == null || previousHealth.getStatus() == ClusterHealthStatus.RED)
|
||||
&& currentHealth != null && currentHealth.getStatus() != ClusterHealthStatus.RED;
|
||||
final boolean indexDeleted = previousHealth != null && currentHealth == null;
|
||||
|
||||
if (movedFromRedToNonRed || indexDeleted) {
|
||||
invalidateAll();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A mutable class that can be used to represent the combination of one or more {@link IndicesPrivileges}
|
||||
*/
|
||||
|
|
|
@ -58,8 +58,8 @@ public class ReservedRolesStore {
|
|||
null, MetadataUtils.DEFAULT_RESERVED_METADATA))
|
||||
.put("ingest_admin", new RoleDescriptor("ingest_admin", new String[] { "manage_index_templates", "manage_pipeline" },
|
||||
null, null, MetadataUtils.DEFAULT_RESERVED_METADATA))
|
||||
.put("reporting_user", new RoleDescriptor("reporting_user", null, new RoleDescriptor.IndicesPrivileges[] {
|
||||
RoleDescriptor.IndicesPrivileges.builder().indices(".reporting-*").privileges("read", "write").build() },
|
||||
// reporting_user doesn't have any privileges in Elasticsearch, and Kibana authorizes privileges based on this role
|
||||
.put("reporting_user", new RoleDescriptor("reporting_user", null, null,
|
||||
null, MetadataUtils.DEFAULT_RESERVED_METADATA))
|
||||
.put(KibanaUser.ROLE_NAME, new RoleDescriptor(KibanaUser.ROLE_NAME, new String[] { "monitor", MonitoringBulkAction.NAME},
|
||||
new RoleDescriptor.IndicesPrivileges[] {
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.bootstrap;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.xpack.XPackSettings;
|
||||
import org.elasticsearch.xpack.security.SecurityLifecycleService;
|
||||
import org.elasticsearch.xpack.security.action.user.ChangePasswordRequestBuilder;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
|
||||
|
||||
import java.util.concurrent.Semaphore;
|
||||
|
||||
/**
|
||||
* This process adds a ClusterStateListener to the ClusterService that will listen for cluster state updates.
|
||||
* Once the cluster and the security index are ready, it will attempt to bootstrap the elastic user's
|
||||
* password with a password from the keystore. If the password is not in the keystore or the elastic user
|
||||
* already has a password, then the user's password will not be set. Once the process is complete, the
|
||||
* listener will remove itself.
|
||||
*/
|
||||
public final class BootstrapElasticPassword {
|
||||
|
||||
private final Settings settings;
|
||||
private final Logger logger;
|
||||
private final ClusterService clusterService;
|
||||
private final ReservedRealm reservedRealm;
|
||||
private final SecurityLifecycleService lifecycleService;
|
||||
private final boolean reservedRealmDisabled;
|
||||
|
||||
public BootstrapElasticPassword(Settings settings, Logger logger, ClusterService clusterService, ReservedRealm reservedRealm,
|
||||
SecurityLifecycleService lifecycleService) {
|
||||
this.reservedRealmDisabled = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings) == false;
|
||||
this.settings = settings;
|
||||
this.logger = logger;
|
||||
this.clusterService = clusterService;
|
||||
this.reservedRealm = reservedRealm;
|
||||
this.lifecycleService = lifecycleService;
|
||||
}
|
||||
|
||||
public void initiatePasswordBootstrap() {
|
||||
SecureString bootstrapPassword = ReservedRealm.BOOTSTRAP_ELASTIC_PASSWORD.get(settings);
|
||||
if (bootstrapPassword.length() == 0) {
|
||||
return;
|
||||
} else if (reservedRealmDisabled) {
|
||||
logger.warn("elastic password will not be bootstrapped because the reserved realm is disabled");
|
||||
bootstrapPassword.close();
|
||||
return;
|
||||
}
|
||||
|
||||
SecureString passwordHash = new SecureString(ChangePasswordRequestBuilder.validateAndHashPassword(bootstrapPassword));
|
||||
bootstrapPassword.close();
|
||||
|
||||
clusterService.addListener(new BootstrapPasswordClusterStateListener(passwordHash));
|
||||
}
|
||||
|
||||
private class BootstrapPasswordClusterStateListener implements ClusterStateListener {
|
||||
|
||||
private final Semaphore semaphore = new Semaphore(1);
|
||||
private final SecureString passwordHash;
|
||||
private final SetOnce<Boolean> isDone = new SetOnce<>();
|
||||
|
||||
private BootstrapPasswordClusterStateListener(SecureString passwordHash) {
|
||||
this.passwordHash = passwordHash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)
|
||||
|| lifecycleService.isSecurityIndexOutOfDate()
|
||||
|| (lifecycleService.isSecurityIndexExisting() && lifecycleService.isSecurityIndexAvailable() == false)
|
||||
|| lifecycleService.isSecurityIndexWriteable() == false) {
|
||||
// We hold off bootstrapping until the node recovery is complete, the security index is up to date, and
|
||||
// security index is writeable. If the security index currently exists, it must also be available.
|
||||
return;
|
||||
}
|
||||
|
||||
// Only allow one attempt to bootstrap the password at a time
|
||||
if (semaphore.tryAcquire()) {
|
||||
// Ensure that we do not attempt to bootstrap after the process is complete. This is important as we
|
||||
// clear the password hash in the cleanup phase.
|
||||
if (isDone.get() != null) {
|
||||
semaphore.release();
|
||||
return;
|
||||
}
|
||||
|
||||
reservedRealm.bootstrapElasticUserCredentials(passwordHash, new ActionListener<Boolean>() {
|
||||
@Override
|
||||
public void onResponse(Boolean passwordSet) {
|
||||
cleanup();
|
||||
if (passwordSet == false) {
|
||||
logger.warn("elastic password was not bootstrapped because its password was already set");
|
||||
}
|
||||
semaphore.release();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
cleanup();
|
||||
logger.error("unexpected exception when attempting to bootstrap password", e);
|
||||
semaphore.release();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void cleanup() {
|
||||
isDone.set(true);
|
||||
IOUtils.closeWhileHandlingException(() -> clusterService.removeListener(this), passwordHash);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZ
|
|||
import static org.apache.lucene.util.automaton.Operations.concatenate;
|
||||
import static org.apache.lucene.util.automaton.Operations.minus;
|
||||
import static org.apache.lucene.util.automaton.Operations.union;
|
||||
import static org.elasticsearch.common.Strings.collectionToDelimitedString;
|
||||
|
||||
public final class Automatons {
|
||||
|
||||
|
@ -122,11 +123,25 @@ public final class Automatons {
|
|||
}
|
||||
|
||||
public static Predicate<String> predicate(Collection<String> patterns) {
|
||||
return predicate(patterns(patterns));
|
||||
return predicate(patterns(patterns), collectionToDelimitedString(patterns, "|"));
|
||||
}
|
||||
|
||||
public static Predicate<String> predicate(Automaton automaton) {
|
||||
return predicate(automaton, "Predicate for " + automaton);
|
||||
}
|
||||
|
||||
private static Predicate<String> predicate(Automaton automaton, final String toString) {
|
||||
CharacterRunAutomaton runAutomaton = new CharacterRunAutomaton(automaton, DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
return runAutomaton::run;
|
||||
return new Predicate<String>() {
|
||||
@Override
|
||||
public boolean test(String s) {
|
||||
return runAutomaton.run(s);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return toString;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,15 +5,16 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.support;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -34,6 +35,7 @@ import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateReque
|
|||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.cluster.metadata.AliasOrIndex;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
|
@ -82,6 +84,8 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
private final AtomicReference<UpgradeState> migrateDataState = new AtomicReference<>(UpgradeState.NOT_STARTED);
|
||||
private final AtomicInteger migrateDataAttempts = new AtomicInteger(0);
|
||||
|
||||
private final List<BiConsumer<ClusterIndexHealth, ClusterIndexHealth>> indexHealthChangeListeners = new CopyOnWriteArrayList<>();
|
||||
|
||||
private volatile boolean templateIsUpToDate;
|
||||
private volatile boolean indexExists;
|
||||
private volatile boolean isIndexUpToDate;
|
||||
|
@ -155,9 +159,18 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
return this.migrateDataState.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a listener which will be notified when the security index health changes. The previous and
|
||||
* current health will be provided to the listener so that the listener can determine if any action
|
||||
* needs to be taken.
|
||||
*/
|
||||
public void addIndexHealthChangeListener(BiConsumer<ClusterIndexHealth, ClusterIndexHealth> listener) {
|
||||
indexHealthChangeListeners.add(listener);
|
||||
}
|
||||
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
final ClusterState state = event.state();
|
||||
processClusterState(state);
|
||||
processClusterState(event.state());
|
||||
checkIndexHealthChange(event);
|
||||
}
|
||||
|
||||
private void processClusterState(ClusterState state) {
|
||||
|
@ -183,6 +196,37 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private void checkIndexHealthChange(ClusterChangedEvent event) {
|
||||
final ClusterState state = event.state();
|
||||
final ClusterState previousState = event.previousState();
|
||||
final IndexMetaData indexMetaData = resolveConcreteIndex(indexName, state.metaData());
|
||||
final IndexMetaData previousIndexMetaData = resolveConcreteIndex(indexName, previousState.metaData());
|
||||
if (indexMetaData != null) {
|
||||
final ClusterIndexHealth currentHealth =
|
||||
new ClusterIndexHealth(indexMetaData, state.getRoutingTable().index(indexMetaData.getIndex()));
|
||||
final ClusterIndexHealth previousHealth = previousIndexMetaData != null ? new ClusterIndexHealth(previousIndexMetaData,
|
||||
previousState.getRoutingTable().index(previousIndexMetaData.getIndex())) : null;
|
||||
|
||||
if (previousHealth == null || previousHealth.getStatus() != currentHealth.getStatus()) {
|
||||
notifyIndexHealthChangeListeners(previousHealth, currentHealth);
|
||||
}
|
||||
} else if (previousIndexMetaData != null) {
|
||||
final ClusterIndexHealth previousHealth =
|
||||
new ClusterIndexHealth(previousIndexMetaData, previousState.getRoutingTable().index(previousIndexMetaData.getIndex()));
|
||||
notifyIndexHealthChangeListeners(previousHealth, null);
|
||||
}
|
||||
}
|
||||
|
||||
private void notifyIndexHealthChangeListeners(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) {
|
||||
for (BiConsumer<ClusterIndexHealth, ClusterIndexHealth> consumer : indexHealthChangeListeners) {
|
||||
try {
|
||||
consumer.accept(previousHealth, currentHealth);
|
||||
} catch (Exception e) {
|
||||
logger.warn(new ParameterizedMessage("failed to notify listener [{}] of index health change", consumer), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean checkIndexAvailable(ClusterState state) {
|
||||
final IndexRoutingTable routingTable = getIndexRoutingTable(state);
|
||||
if (routingTable != null && routingTable.allPrimaryShardsActive()) {
|
||||
|
|
|
@ -18,27 +18,8 @@ public class ElasticUser extends User {
|
|||
|
||||
public static final String NAME = "elastic";
|
||||
private static final String ROLE_NAME = "superuser";
|
||||
private static final String SETUP_MODE = "_setup_mode";
|
||||
|
||||
public ElasticUser(boolean enabled) {
|
||||
this(enabled, false);
|
||||
}
|
||||
|
||||
public ElasticUser(boolean enabled, boolean setupMode) {
|
||||
super(NAME, new String[] { ROLE_NAME }, null, null, metadata(setupMode), enabled);
|
||||
}
|
||||
|
||||
public static boolean isElasticUserInSetupMode(User user) {
|
||||
return NAME.equals(user.principal()) && Boolean.TRUE.equals(user.metadata().get(SETUP_MODE));
|
||||
}
|
||||
|
||||
private static Map<String, Object> metadata(boolean setupMode) {
|
||||
if (setupMode == false) {
|
||||
return MetadataUtils.DEFAULT_RESERVED_METADATA;
|
||||
} else {
|
||||
HashMap<String, Object> metadata = new HashMap<>(MetadataUtils.DEFAULT_RESERVED_METADATA);
|
||||
metadata.put(SETUP_MODE, true);
|
||||
return metadata;
|
||||
}
|
||||
super(NAME, new String[] { ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,44 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ssl;
|
||||
|
||||
import org.bouncycastle.asn1.ASN1ObjectIdentifier;
|
||||
import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
|
||||
import org.bouncycastle.asn1.pkcs.PrivateKeyInfo;
|
||||
import org.bouncycastle.asn1.x500.X500Name;
|
||||
import org.bouncycastle.asn1.x509.AuthorityKeyIdentifier;
|
||||
import org.bouncycastle.asn1.x509.BasicConstraints;
|
||||
import org.bouncycastle.asn1.x509.Extension;
|
||||
import org.bouncycastle.asn1.x509.ExtensionsGenerator;
|
||||
import org.bouncycastle.asn1.x509.GeneralName;
|
||||
import org.bouncycastle.asn1.x509.GeneralNames;
|
||||
import org.bouncycastle.asn1.x509.Time;
|
||||
import org.bouncycastle.cert.CertIOException;
|
||||
import org.bouncycastle.cert.X509CertificateHolder;
|
||||
import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
|
||||
import org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils;
|
||||
import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder;
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.openssl.PEMEncryptedKeyPair;
|
||||
import org.bouncycastle.openssl.PEMKeyPair;
|
||||
import org.bouncycastle.openssl.PEMParser;
|
||||
import org.bouncycastle.openssl.X509TrustedCertificateBlock;
|
||||
import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter;
|
||||
import org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder;
|
||||
import org.bouncycastle.operator.ContentSigner;
|
||||
import org.bouncycastle.operator.OperatorCreationException;
|
||||
import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
|
||||
import org.bouncycastle.pkcs.PKCS10CertificationRequest;
|
||||
import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.network.InetAddressHelper;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import javax.net.ssl.KeyManager;
|
||||
import javax.net.ssl.KeyManagerFactory;
|
||||
import javax.net.ssl.TrustManager;
|
||||
|
@ -79,12 +41,54 @@ import java.util.Locale;
|
|||
import java.util.Set;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.bouncycastle.asn1.ASN1Encodable;
|
||||
import org.bouncycastle.asn1.ASN1ObjectIdentifier;
|
||||
import org.bouncycastle.asn1.DERIA5String;
|
||||
import org.bouncycastle.asn1.DERSequence;
|
||||
import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
|
||||
import org.bouncycastle.asn1.pkcs.PrivateKeyInfo;
|
||||
import org.bouncycastle.asn1.x500.X500Name;
|
||||
import org.bouncycastle.asn1.x509.AuthorityKeyIdentifier;
|
||||
import org.bouncycastle.asn1.x509.BasicConstraints;
|
||||
import org.bouncycastle.asn1.x509.Extension;
|
||||
import org.bouncycastle.asn1.x509.ExtensionsGenerator;
|
||||
import org.bouncycastle.asn1.x509.GeneralName;
|
||||
import org.bouncycastle.asn1.x509.GeneralNames;
|
||||
import org.bouncycastle.asn1.x509.Time;
|
||||
import org.bouncycastle.cert.CertIOException;
|
||||
import org.bouncycastle.cert.X509CertificateHolder;
|
||||
import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
|
||||
import org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils;
|
||||
import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder;
|
||||
import org.bouncycastle.jce.provider.BouncyCastleProvider;
|
||||
import org.bouncycastle.openssl.PEMEncryptedKeyPair;
|
||||
import org.bouncycastle.openssl.PEMKeyPair;
|
||||
import org.bouncycastle.openssl.PEMParser;
|
||||
import org.bouncycastle.openssl.X509TrustedCertificateBlock;
|
||||
import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter;
|
||||
import org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder;
|
||||
import org.bouncycastle.operator.ContentSigner;
|
||||
import org.bouncycastle.operator.OperatorCreationException;
|
||||
import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
|
||||
import org.bouncycastle.pkcs.PKCS10CertificationRequest;
|
||||
import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.network.InetAddressHelper;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
/**
|
||||
* Utility methods that deal with {@link Certificate}, {@link KeyStore}, {@link X509ExtendedTrustManager}, {@link X509ExtendedKeyManager}
|
||||
* and other certificate related objects.
|
||||
*/
|
||||
public class CertUtils {
|
||||
|
||||
static final String CN_OID = "2.5.4.3";
|
||||
|
||||
private static final int SERIAL_BIT_LENGTH = 20 * 8;
|
||||
static final BouncyCastleProvider BC_PROV = new BouncyCastleProvider();
|
||||
|
||||
|
@ -137,6 +141,7 @@ public class CertUtils {
|
|||
*/
|
||||
public static X509ExtendedTrustManager trustManager(Certificate[] certificates)
|
||||
throws NoSuchAlgorithmException, UnrecoverableKeyException, KeyStoreException, IOException, CertificateException {
|
||||
assert certificates != null : "Cannot create trust manager with null certificates";
|
||||
KeyStore store = KeyStore.getInstance("jks");
|
||||
store.load(null, null);
|
||||
int counter = 0;
|
||||
|
@ -416,4 +421,16 @@ public class CertUtils {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an X.509 {@link GeneralName} for use as a <em>Common Name</em> in the certificate's <em>Subject Alternative Names</em>
|
||||
* extension. A <em>common name</em> is a name with a tag of {@link GeneralName#otherName OTHER}, with an object-id that references
|
||||
* the {@link #CN_OID cn} attribute, and a DER encoded IA5 (ASCII) string for the name.
|
||||
* This usage of using the {@code cn} OID as a <em>Subject Alternative Name</em> is <strong>non-standard</strong> and will not be
|
||||
* recognised by other X.509/TLS implementations.
|
||||
*/
|
||||
static GeneralName createCommonName(String cn) {
|
||||
final ASN1Encodable[] sequence = { new ASN1ObjectIdentifier(CN_OID), new DERIA5String(cn) };
|
||||
return new GeneralName(GeneralName.otherName, new DERSequence(sequence));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,31 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ssl;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.bouncycastle.asn1.DERIA5String;
|
||||
import org.bouncycastle.asn1.x509.GeneralName;
|
||||
import org.bouncycastle.asn1.x509.GeneralNames;
|
||||
import org.bouncycastle.openssl.PEMEncryptor;
|
||||
import org.bouncycastle.openssl.jcajce.JcaPEMWriter;
|
||||
import org.bouncycastle.openssl.jcajce.JcePEMEncryptorBuilder;
|
||||
import org.bouncycastle.pkcs.PKCS10CertificationRequest;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cli.EnvironmentAwareCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import javax.security.auth.x500.X500Principal;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
@ -59,6 +34,31 @@ import java.util.regex.Pattern;
|
|||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipOutputStream;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.bouncycastle.asn1.DERIA5String;
|
||||
import org.bouncycastle.asn1.x509.GeneralName;
|
||||
import org.bouncycastle.asn1.x509.GeneralNames;
|
||||
import org.bouncycastle.openssl.PEMEncryptor;
|
||||
import org.bouncycastle.openssl.jcajce.JcaPEMWriter;
|
||||
import org.bouncycastle.openssl.jcajce.JcePEMEncryptorBuilder;
|
||||
import org.bouncycastle.pkcs.PKCS10CertificationRequest;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cli.EnvironmentAwareCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
/**
|
||||
* CLI tool to make generation of certificates or certificate requests easier for users
|
||||
*/
|
||||
|
@ -89,11 +89,13 @@ public class CertificateTool extends EnvironmentAwareCommand {
|
|||
new ConstructingObjectParser<>(
|
||||
"instances",
|
||||
a -> new CertificateInformation(
|
||||
(String) a[0], (String) (a[1] == null ? a[0] : a[1]), (List<String>) a[2], (List<String>) a[3]));
|
||||
(String) a[0], (String) (a[1] == null ? a[0] : a[1]),
|
||||
(List<String>) a[2], (List<String>) a[3], (List<String>) a[4]));
|
||||
instanceParser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name"));
|
||||
instanceParser.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("filename"));
|
||||
instanceParser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("ip"));
|
||||
instanceParser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("dns"));
|
||||
instanceParser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("cn"));
|
||||
|
||||
PARSER.declareObjectArray(List::addAll, instanceParser, new ParseField("instances"));
|
||||
}
|
||||
|
@ -220,8 +222,9 @@ public class CertificateTool extends EnvironmentAwareCommand {
|
|||
String dnsNames = terminal.readText("Enter DNS names for instance (comma-separated if more than one) []: ");
|
||||
List<String> ipList = Arrays.asList(Strings.splitStringByCommaToArray(ipAddresses));
|
||||
List<String> dnsList = Arrays.asList(Strings.splitStringByCommaToArray(dnsNames));
|
||||
List<String> commonNames = null;
|
||||
|
||||
CertificateInformation information = new CertificateInformation(name, filename, ipList, dnsList);
|
||||
CertificateInformation information = new CertificateInformation(name, filename, ipList, dnsList, commonNames);
|
||||
List<String> validationErrors = information.validate();
|
||||
if (validationErrors.isEmpty()) {
|
||||
if (map.containsKey(name)) {
|
||||
|
@ -269,7 +272,8 @@ public class CertificateTool extends EnvironmentAwareCommand {
|
|||
fullyWriteFile(outputFile, (outputStream, pemWriter) -> {
|
||||
for (CertificateInformation certificateInformation : certInfo) {
|
||||
KeyPair keyPair = CertUtils.generateKeyPair(keysize);
|
||||
GeneralNames sanList = getSubjectAlternativeNamesValue(certificateInformation.ipAddresses, certificateInformation.dnsNames);
|
||||
GeneralNames sanList = getSubjectAlternativeNamesValue(certificateInformation.ipAddresses, certificateInformation.dnsNames,
|
||||
certificateInformation.commonNames);
|
||||
PKCS10CertificationRequest csr = CertUtils.generateCSR(keyPair, certificateInformation.name.x500Principal, sanList);
|
||||
|
||||
final String dirName = certificateInformation.name.filename + "/";
|
||||
|
@ -352,7 +356,8 @@ public class CertificateTool extends EnvironmentAwareCommand {
|
|||
for (CertificateInformation certificateInformation : certificateInformations) {
|
||||
KeyPair keyPair = CertUtils.generateKeyPair(keysize);
|
||||
Certificate certificate = CertUtils.generateSignedCertificate(certificateInformation.name.x500Principal,
|
||||
getSubjectAlternativeNamesValue(certificateInformation.ipAddresses, certificateInformation.dnsNames),
|
||||
getSubjectAlternativeNamesValue(certificateInformation.ipAddresses, certificateInformation.dnsNames,
|
||||
certificateInformation.commonNames),
|
||||
keyPair, caInfo.caCert, caInfo.privateKey, days);
|
||||
|
||||
final String dirName = certificateInformation.name.filename + "/";
|
||||
|
@ -531,7 +536,7 @@ public class CertificateTool extends EnvironmentAwareCommand {
|
|||
}
|
||||
}
|
||||
|
||||
private static GeneralNames getSubjectAlternativeNamesValue(List<String> ipAddresses, List<String> dnsNames) {
|
||||
private static GeneralNames getSubjectAlternativeNamesValue(List<String> ipAddresses, List<String> dnsNames, List<String> commonNames) {
|
||||
Set<GeneralName> generalNameList = new HashSet<>();
|
||||
for (String ip : ipAddresses) {
|
||||
generalNameList.add(new GeneralName(GeneralName.iPAddress, ip));
|
||||
|
@ -541,6 +546,10 @@ public class CertificateTool extends EnvironmentAwareCommand {
|
|||
generalNameList.add(new GeneralName(GeneralName.dNSName, dns));
|
||||
}
|
||||
|
||||
for (String cn : commonNames) {
|
||||
generalNameList.add(CertUtils.createCommonName(cn));
|
||||
}
|
||||
|
||||
if (generalNameList.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
@ -551,11 +560,13 @@ public class CertificateTool extends EnvironmentAwareCommand {
|
|||
final Name name;
|
||||
final List<String> ipAddresses;
|
||||
final List<String> dnsNames;
|
||||
final List<String> commonNames;
|
||||
|
||||
CertificateInformation(String name, String filename, List<String> ipAddresses, List<String> dnsNames) {
|
||||
CertificateInformation(String name, String filename, List<String> ipAddresses, List<String> dnsNames, List<String> commonNames) {
|
||||
this.name = Name.fromUserProvidedName(name, filename);
|
||||
this.ipAddresses = ipAddresses == null ? Collections.emptyList() : ipAddresses;
|
||||
this.dnsNames = dnsNames == null ? Collections.emptyList() : dnsNames;
|
||||
this.commonNames = commonNames == null ? Collections.emptyList() : commonNames;
|
||||
}
|
||||
|
||||
List<String> validate() {
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ssl;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.elasticsearch.xpack.security.support.Automatons;
|
||||
|
||||
/**
|
||||
* Im memory representation of the trusted names for a "trust group".
|
||||
*
|
||||
* @see RestrictedTrustManager
|
||||
*/
|
||||
class CertificateTrustRestrictions {
|
||||
|
||||
private final Set<Predicate<String>> trustedNames;
|
||||
|
||||
CertificateTrustRestrictions(Collection<String> trustedNames) {
|
||||
this.trustedNames = trustedNames.stream().map(Automatons::predicate).collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The names (X509 certificate subjectAlternateNames) of the nodes that are
|
||||
* allowed to connect to this cluster (for the targeted interface) .
|
||||
*/
|
||||
Set<Predicate<String>> getTrustedNames() {
|
||||
return Collections.unmodifiableSet(trustedNames);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "{trustedNames=" + trustedNames + '}';
|
||||
}
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ssl;
|
||||
|
||||
import javax.net.ssl.X509ExtendedTrustManager;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
/**
|
||||
* An implementation of {@link TrustConfig} that constructs a {@link RestrictedTrustManager}.
|
||||
* This implementation always wraps another <code>TrustConfig</code> to perform the
|
||||
* underlying certificate validation.
|
||||
*/
|
||||
public final class RestrictedTrustConfig extends TrustConfig {
|
||||
|
||||
public static final String RESTRICTIONS_KEY_SUBJECT_NAME = "trust.subject_name";
|
||||
private final Settings settings;
|
||||
private final String groupConfigPath;
|
||||
private final TrustConfig delegate;
|
||||
|
||||
public RestrictedTrustConfig(Settings settings, String groupConfigPath, TrustConfig delegate) {
|
||||
this.settings = settings;
|
||||
this.groupConfigPath = Objects.requireNonNull(groupConfigPath);
|
||||
this.delegate = Objects.requireNonNull(delegate);
|
||||
}
|
||||
|
||||
@Override
|
||||
RestrictedTrustManager createTrustManager(@Nullable Environment environment) {
|
||||
try {
|
||||
final X509ExtendedTrustManager delegateTrustManager = delegate.createTrustManager(environment);
|
||||
final CertificateTrustRestrictions trustGroupConfig = readTrustGroup(resolveGroupConfigPath(environment));
|
||||
return new RestrictedTrustManager(settings, delegateTrustManager, trustGroupConfig);
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("failed to initialize TrustManager for {}", e, toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
List<Path> filesToMonitor(@Nullable Environment environment) {
|
||||
return Collections.singletonList(resolveGroupConfigPath(environment));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "restrictedTrust=[" + groupConfigPath + ']';
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
RestrictedTrustConfig that = (RestrictedTrustConfig) o;
|
||||
return this.groupConfigPath.equals(that.groupConfigPath) && this.delegate.equals(that.delegate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = groupConfigPath.hashCode();
|
||||
result = 31 * result + delegate.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
private Path resolveGroupConfigPath(@Nullable Environment environment) {
|
||||
return CertUtils.resolvePath(groupConfigPath, environment);
|
||||
}
|
||||
|
||||
private CertificateTrustRestrictions readTrustGroup(Path path) throws IOException {
|
||||
try (InputStream in = Files.newInputStream(path)) {
|
||||
Settings settings = Settings.builder().loadFromStream(path.toString(), in).build();
|
||||
final String[] trustNodeNames = settings.getAsArray(RESTRICTIONS_KEY_SUBJECT_NAME);
|
||||
return new CertificateTrustRestrictions(Arrays.asList(trustNodeNames));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ssl;
|
||||
|
||||
import javax.net.ssl.SSLEngine;
|
||||
import javax.net.ssl.X509ExtendedTrustManager;
|
||||
import java.net.Socket;
|
||||
import java.security.cert.CertificateException;
|
||||
import java.security.cert.CertificateParsingException;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.bouncycastle.asn1.ASN1ObjectIdentifier;
|
||||
import org.bouncycastle.asn1.ASN1Sequence;
|
||||
import org.bouncycastle.asn1.ASN1TaggedObject;
|
||||
import org.bouncycastle.asn1.DERTaggedObject;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* An X509 trust manager that only trusts connections from a restricted set of predefined network entities (nodes, clients, etc).
|
||||
* The trusted entities are defined as a list of predicates on {@link CertificateTrustRestrictions} that are applied to the
|
||||
* common-names of the certificate.
|
||||
* The common-names are read as subject-alternative-names with type 'Other' and a 'cn' OID.
|
||||
* The underlying certificate validation is delegated to another TrustManager.
|
||||
*/
|
||||
public final class RestrictedTrustManager extends X509ExtendedTrustManager {
|
||||
|
||||
private final Logger logger;
|
||||
private final X509ExtendedTrustManager delegate;
|
||||
private final CertificateTrustRestrictions trustRestrictions;
|
||||
private final int SAN_CODE_OTHERNAME = 0;
|
||||
|
||||
public RestrictedTrustManager(Settings settings, X509ExtendedTrustManager delegate, CertificateTrustRestrictions restrictions) {
|
||||
this.logger = Loggers.getLogger(getClass(), settings);
|
||||
this.delegate = delegate;
|
||||
this.trustRestrictions = restrictions;
|
||||
logger.debug("Configured with trust restrictions: [{}]", restrictions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkClientTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
|
||||
delegate.checkClientTrusted(chain, authType, socket);
|
||||
verifyTrust(chain);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkServerTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException {
|
||||
delegate.checkServerTrusted(chain, authType, socket);
|
||||
verifyTrust(chain);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine engine) throws CertificateException {
|
||||
delegate.checkClientTrusted(chain, authType, engine);
|
||||
verifyTrust(chain);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkServerTrusted(X509Certificate[] chain, String authType, SSLEngine engine) throws CertificateException {
|
||||
delegate.checkServerTrusted(chain, authType, engine);
|
||||
verifyTrust(chain);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {
|
||||
delegate.checkClientTrusted(chain, authType);
|
||||
verifyTrust(chain);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException {
|
||||
delegate.checkServerTrusted(chain, authType);
|
||||
verifyTrust(chain);
|
||||
}
|
||||
|
||||
@Override
|
||||
public X509Certificate[] getAcceptedIssuers() {
|
||||
return delegate.getAcceptedIssuers();
|
||||
}
|
||||
|
||||
private void verifyTrust(X509Certificate[] chain) throws CertificateException {
|
||||
if (chain.length == 0) {
|
||||
throw new CertificateException("No certificate presented");
|
||||
}
|
||||
final X509Certificate certificate = chain[0];
|
||||
Set<String> names = readCommonNames(certificate);
|
||||
if (verifyCertificateNames(names)) {
|
||||
logger.debug(() -> new ParameterizedMessage("Trusting certificate [{}] [{}] with common-names [{}]",
|
||||
certificate.getSubjectDN(), certificate.getSerialNumber().toString(16), names));
|
||||
} else {
|
||||
logger.info("Rejecting certificate [{}] [{}] with common-names [{}]",
|
||||
certificate.getSubjectDN(), certificate.getSerialNumber().toString(16), names);
|
||||
throw new CertificateException("Certificate for " + certificate.getSubjectDN() +
|
||||
" with common-names " + names
|
||||
+ " does not match the trusted names " + trustRestrictions.getTrustedNames());
|
||||
}
|
||||
}
|
||||
|
||||
private boolean verifyCertificateNames(Set<String> names) {
|
||||
for (Predicate<String> trust : trustRestrictions.getTrustedNames()) {
|
||||
final Optional<String> match = names.stream().filter(trust).findFirst();
|
||||
if (match.isPresent()) {
|
||||
logger.debug("Name [{}] matches trusted pattern [{}]", match.get(), trust);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private Set<String> readCommonNames(X509Certificate certificate) throws CertificateParsingException {
|
||||
return getSubjectAlternativeNames(certificate).stream()
|
||||
.filter(pair -> ((Integer) pair.get(0)).intValue() == SAN_CODE_OTHERNAME)
|
||||
.map(pair -> pair.get(1))
|
||||
.map(value -> {
|
||||
ASN1Sequence seq = ASN1Sequence.getInstance(value);
|
||||
assert seq.size() == 2 : "Incorrect sequence length for 'other name'";
|
||||
final String id = ASN1ObjectIdentifier.getInstance(seq.getObjectAt(0)).getId();
|
||||
if (CertUtils.CN_OID.equals(id)) {
|
||||
final ASN1TaggedObject object = DERTaggedObject.getInstance(seq.getObjectAt(1));
|
||||
final String cn = object.getObject().toString();
|
||||
logger.trace("Read cn [{}] from ASN1Sequence [{}]", cn, seq);
|
||||
return cn;
|
||||
} else {
|
||||
logger.debug("Certificate [{}] has 'otherName' [{}] with unsupported object-id [{}]",
|
||||
certificate.getSubjectDN(), seq, id);
|
||||
return null;
|
||||
}
|
||||
})
|
||||
.filter(Objects::nonNull)
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
|
||||
private Collection<List<?>> getSubjectAlternativeNames(X509Certificate certificate) throws CertificateParsingException {
|
||||
final Collection<List<?>> sans = certificate.getSubjectAlternativeNames();
|
||||
logger.trace("Certificate [{}] has subject alternative names [{}]", certificate.getSubjectDN(), sans);
|
||||
return sans == null ? Collections.emptyList() : sans;
|
||||
}
|
||||
}
|
||||
|
|
@ -5,7 +5,9 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ssl;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -39,6 +41,7 @@ public final class SSLConfiguration {
|
|||
/**
|
||||
* Creates a new SSLConfiguration from the given settings. There is no fallback configuration when invoking this constructor so
|
||||
* un-configured aspects will take on their default values.
|
||||
*
|
||||
* @param settings the SSL specific settings; only the settings under a *.ssl. prefix
|
||||
*/
|
||||
SSLConfiguration(Settings settings) {
|
||||
|
@ -53,7 +56,8 @@ public final class SSLConfiguration {
|
|||
/**
|
||||
* Creates a new SSLConfiguration from the given settings and global/default SSLConfiguration. If the settings do not contain a value
|
||||
* for a given aspect, the value from the global configuration will be used.
|
||||
* @param settings the SSL specific settings; only the settings under a *.ssl. prefix
|
||||
*
|
||||
* @param settings the SSL specific settings; only the settings under a *.ssl. prefix
|
||||
* @param globalSSLConfiguration the default configuration that is used as a fallback
|
||||
*/
|
||||
SSLConfiguration(Settings settings, SSLConfiguration globalSSLConfiguration) {
|
||||
|
@ -213,7 +217,15 @@ public final class SSLConfiguration {
|
|||
}
|
||||
|
||||
private static TrustConfig createTrustConfig(Settings settings, KeyConfig keyConfig, SSLConfiguration global) {
|
||||
final TrustConfig trustConfig = createCertChainTrustConfig(settings, keyConfig, global);
|
||||
return SETTINGS_PARSER.trustRestrictionsPath.get(settings)
|
||||
.map(path -> (TrustConfig) new RestrictedTrustConfig(settings, path, trustConfig))
|
||||
.orElse(trustConfig);
|
||||
}
|
||||
|
||||
private static TrustConfig createCertChainTrustConfig(Settings settings, KeyConfig keyConfig, SSLConfiguration global) {
|
||||
String trustStorePath = SETTINGS_PARSER.truststorePath.get(settings).orElse(null);
|
||||
|
||||
List<String> caPaths = getListOrNull(SETTINGS_PARSER.caPaths, settings);
|
||||
if (trustStorePath != null && caPaths != null) {
|
||||
throw new IllegalArgumentException("you cannot specify a truststore and ca files");
|
||||
|
|
|
@ -36,6 +36,7 @@ public class SSLConfigurationSettings {
|
|||
public final Setting<Optional<String>> truststorePath;
|
||||
public final Setting<SecureString> truststorePassword;
|
||||
public final Setting<String> truststoreAlgorithm;
|
||||
public final Setting<Optional<String>> trustRestrictionsPath;
|
||||
public final Setting<Optional<String>> keyPath;
|
||||
public final Setting<SecureString> keyPassword;
|
||||
public final Setting<Optional<String>> cert;
|
||||
|
@ -120,6 +121,11 @@ public class SSLConfigurationSettings {
|
|||
public static final Setting<String> TRUST_STORE_ALGORITHM_PROFILES = Setting.affixKeySetting("transport.profiles.",
|
||||
"xpack.security.ssl.truststore.algorithm", TRUST_STORE_ALGORITHM_TEMPLATE);
|
||||
|
||||
private static final Function<String, Setting<Optional<String>>> TRUST_RESTRICTIONS_TEMPLATE = key -> new Setting<>(key, s -> null,
|
||||
Optional::ofNullable, Property.NodeScope, Property.Filtered);
|
||||
public static final Setting<Optional<String>> TRUST_RESTRICTIONS_PROFILES = Setting.affixKeySetting("transport.profiles.",
|
||||
"xpack.security.ssl.trust_restrictions", TRUST_RESTRICTIONS_TEMPLATE);
|
||||
|
||||
private static final Function<String, Setting<SecureString>> LEGACY_KEY_PASSWORD_TEMPLATE = key -> new Setting<>(key, "",
|
||||
SecureString::new, Property.Deprecated, Property.Filtered, Property.NodeScope);
|
||||
public static final Setting<SecureString> LEGACY_KEY_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.",
|
||||
|
@ -173,6 +179,7 @@ public class SSLConfigurationSettings {
|
|||
truststorePassword = TRUSTSTORE_PASSWORD_TEMPLATE.apply(prefix + "truststore.secure_password");
|
||||
keystoreAlgorithm = KEY_STORE_ALGORITHM_TEMPLATE.apply(prefix + "keystore.algorithm");
|
||||
truststoreAlgorithm = TRUST_STORE_ALGORITHM_TEMPLATE.apply(prefix + "truststore.algorithm");
|
||||
trustRestrictionsPath = TRUST_RESTRICTIONS_TEMPLATE.apply(prefix + "trust_restrictions.path");
|
||||
keyPath = KEY_PATH_TEMPLATE.apply(prefix + "key");
|
||||
legacyKeyPassword = LEGACY_KEY_PASSWORD_TEMPLATE.apply(prefix + "key_passphrase");
|
||||
keyPassword = KEY_PASSWORD_TEMPLATE.apply(prefix + "secure_key_passphrase");
|
||||
|
@ -181,9 +188,11 @@ public class SSLConfigurationSettings {
|
|||
clientAuth = CLIENT_AUTH_SETTING_TEMPLATE.apply(prefix + "client_authentication");
|
||||
verificationMode = VERIFICATION_MODE_SETTING_TEMPLATE.apply(prefix + "verification_mode");
|
||||
|
||||
this.allSettings = Arrays.asList(ciphers, supportedProtocols, keystorePath, keystorePassword, keystoreAlgorithm,
|
||||
keystoreKeyPassword, truststorePath, truststorePassword, truststoreAlgorithm, keyPath, keyPassword, cert, caPaths,
|
||||
clientAuth, verificationMode, legacyKeystorePassword, legacyKeystoreKeyPassword, legacyKeyPassword, legacyTruststorePassword);
|
||||
this.allSettings = Arrays.asList(ciphers, supportedProtocols,
|
||||
keystorePath, keystorePassword, keystoreAlgorithm, keystoreKeyPassword,
|
||||
truststorePath, truststorePassword, truststoreAlgorithm, trustRestrictionsPath,
|
||||
keyPath, keyPassword, cert, caPaths, clientAuth, verificationMode,
|
||||
legacyKeystorePassword, legacyKeystoreKeyPassword, legacyKeyPassword, legacyTruststorePassword);
|
||||
}
|
||||
|
||||
public List<Setting<?>> getAllSettings() {
|
||||
|
@ -213,8 +222,8 @@ public class SSLConfigurationSettings {
|
|||
return Arrays.asList(CIPHERS_SETTING_PROFILES, SUPPORTED_PROTOCOLS_PROFILES, KEYSTORE_PATH_PROFILES,
|
||||
LEGACY_KEYSTORE_PASSWORD_PROFILES, KEYSTORE_PASSWORD_PROFILES, LEGACY_KEYSTORE_KEY_PASSWORD_PROFILES,
|
||||
KEYSTORE_KEY_PASSWORD_PROFILES, TRUST_STORE_PATH_PROFILES, LEGACY_TRUSTSTORE_PASSWORD_PROFILES,
|
||||
TRUSTSTORE_PASSWORD_PROFILES, KEY_STORE_ALGORITHM_PROFILES, TRUST_STORE_ALGORITHM_PROFILES,KEY_PATH_PROFILES,
|
||||
LEGACY_KEY_PASSWORD_PROFILES, KEY_PASSWORD_PROFILES,CERT_PROFILES,CAPATH_SETTING_PROFILES,
|
||||
TRUSTSTORE_PASSWORD_PROFILES, KEY_STORE_ALGORITHM_PROFILES, TRUST_STORE_ALGORITHM_PROFILES, TRUST_RESTRICTIONS_PROFILES,
|
||||
KEY_PATH_PROFILES, LEGACY_KEY_PASSWORD_PROFILES, KEY_PASSWORD_PROFILES,CERT_PROFILES,CAPATH_SETTING_PROFILES,
|
||||
CLIENT_AUTH_SETTING_PROFILES, VERIFICATION_MODE_SETTING_PROFILES);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -451,54 +451,8 @@ public class SSLService extends AbstractComponent {
|
|||
// if no key is provided for transport we can auto-generate a key with a signed certificate for development use only. There is a
|
||||
// bootstrap check that prevents this configuration from being use in production (SSLBootstrapCheck)
|
||||
if (transportSSLConfiguration.keyConfig() == KeyConfig.NONE) {
|
||||
// lazily generate key to avoid slowing down startup where we do not need it
|
||||
final GeneratedKeyConfig generatedKeyConfig = new GeneratedKeyConfig(settings);
|
||||
final TrustConfig trustConfig =
|
||||
new TrustConfig.CombiningTrustConfig(Arrays.asList(transportSSLConfiguration.trustConfig(), new TrustConfig() {
|
||||
@Override
|
||||
X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) {
|
||||
return generatedKeyConfig.createTrustManager(environment);
|
||||
}
|
||||
createDevelopmentTLSConfiguration(sslConfigurations, transportSSLConfiguration, profileSettings);
|
||||
|
||||
@Override
|
||||
List<Path> filesToMonitor(@Nullable Environment environment) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Generated Trust Config. DO NOT USE IN PRODUCTION";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
return this == o;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return System.identityHashCode(this);
|
||||
}
|
||||
}));
|
||||
X509ExtendedTrustManager extendedTrustManager = trustConfig.createTrustManager(env);
|
||||
ReloadableTrustManager trustManager = new ReloadableTrustManager(extendedTrustManager, trustConfig);
|
||||
ReloadableX509KeyManager keyManager =
|
||||
new ReloadableX509KeyManager(generatedKeyConfig.createKeyManager(env), generatedKeyConfig);
|
||||
sslConfigurations.put(transportSSLConfiguration, createSslContext(keyManager, trustManager, transportSSLConfiguration));
|
||||
profileSettings.forEach((profileSetting) -> {
|
||||
SSLConfiguration configuration = new SSLConfiguration(profileSetting, transportSSLConfiguration);
|
||||
if (configuration.keyConfig() == KeyConfig.NONE) {
|
||||
sslConfigurations.compute(configuration, (conf, holder) -> {
|
||||
if (holder != null && holder.keyManager == keyManager && holder.trustManager == trustManager) {
|
||||
return holder;
|
||||
} else {
|
||||
return createSslContext(keyManager, trustManager, configuration);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
sslConfigurations.computeIfAbsent(configuration, this::createSslContext);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
sslConfigurations.computeIfAbsent(transportSSLConfiguration, this::createSslContext);
|
||||
profileSettings.forEach((profileSetting) ->
|
||||
|
@ -507,6 +461,60 @@ public class SSLService extends AbstractComponent {
|
|||
return Collections.unmodifiableMap(sslConfigurations);
|
||||
}
|
||||
|
||||
private void createDevelopmentTLSConfiguration(Map<SSLConfiguration, SSLContextHolder> sslConfigurations,
|
||||
SSLConfiguration transportSSLConfiguration, List<Settings> profileSettings)
|
||||
throws NoSuchAlgorithmException, IOException, CertificateException, OperatorCreationException, UnrecoverableKeyException,
|
||||
KeyStoreException {
|
||||
// lazily generate key to avoid slowing down startup where we do not need it
|
||||
final GeneratedKeyConfig generatedKeyConfig = new GeneratedKeyConfig(settings);
|
||||
final TrustConfig trustConfig =
|
||||
new TrustConfig.CombiningTrustConfig(Arrays.asList(transportSSLConfiguration.trustConfig(), new TrustConfig() {
|
||||
@Override
|
||||
X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) {
|
||||
return generatedKeyConfig.createTrustManager(environment);
|
||||
}
|
||||
|
||||
@Override
|
||||
List<Path> filesToMonitor(@Nullable Environment environment) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Generated Trust Config. DO NOT USE IN PRODUCTION";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
return this == o;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return System.identityHashCode(this);
|
||||
}
|
||||
}));
|
||||
X509ExtendedTrustManager extendedTrustManager = trustConfig.createTrustManager(env);
|
||||
ReloadableTrustManager trustManager = new ReloadableTrustManager(extendedTrustManager, trustConfig);
|
||||
ReloadableX509KeyManager keyManager =
|
||||
new ReloadableX509KeyManager(generatedKeyConfig.createKeyManager(env), generatedKeyConfig);
|
||||
sslConfigurations.put(transportSSLConfiguration, createSslContext(keyManager, trustManager, transportSSLConfiguration));
|
||||
profileSettings.forEach((profileSetting) -> {
|
||||
SSLConfiguration configuration = new SSLConfiguration(profileSetting, transportSSLConfiguration);
|
||||
if (configuration.keyConfig() == KeyConfig.NONE) {
|
||||
sslConfigurations.compute(configuration, (conf, holder) -> {
|
||||
if (holder != null && holder.keyManager == keyManager && holder.trustManager == trustManager) {
|
||||
return holder;
|
||||
} else {
|
||||
return createSslContext(keyManager, trustManager, configuration);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
sslConfigurations.computeIfAbsent(configuration, this::createSslContext);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* This socket factory wraps an existing SSLSocketFactory and sets the protocols and ciphers on each SSLSocket after it is created. This
|
||||
* is needed even though the SSLContext is configured properly as the configuration does not flow down to the sockets created by the
|
||||
|
|
|
@ -14,12 +14,12 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Generic upgrade check applicable to all indices to be upgraded from the current version
|
||||
|
@ -35,7 +35,7 @@ public class IndexUpgradeCheck<T> extends AbstractComponent {
|
|||
public static final int UPRADE_VERSION = 6;
|
||||
|
||||
private final String name;
|
||||
private final BiFunction<IndexMetaData, Map<String, String>, UpgradeActionRequired> actionRequired;
|
||||
private final Function<IndexMetaData, UpgradeActionRequired> actionRequired;
|
||||
private final InternalIndexReindexer<T> reindexer;
|
||||
|
||||
/**
|
||||
|
@ -50,7 +50,7 @@ public class IndexUpgradeCheck<T> extends AbstractComponent {
|
|||
* @param updateScript - the upgrade script that should be used during reindexing
|
||||
*/
|
||||
public IndexUpgradeCheck(String name, Settings settings,
|
||||
BiFunction<IndexMetaData, Map<String, String>, UpgradeActionRequired> actionRequired,
|
||||
Function<IndexMetaData, UpgradeActionRequired> actionRequired,
|
||||
Client client, ClusterService clusterService, String[] types, Script updateScript) {
|
||||
this(name, settings, actionRequired, client, clusterService, types, updateScript,
|
||||
listener -> listener.onResponse(null), (t, listener) -> listener.onResponse(TransportResponse.Empty.INSTANCE));
|
||||
|
@ -70,7 +70,7 @@ public class IndexUpgradeCheck<T> extends AbstractComponent {
|
|||
* @param postUpgrade - action that should be performed after upgrade
|
||||
*/
|
||||
public IndexUpgradeCheck(String name, Settings settings,
|
||||
BiFunction<IndexMetaData, Map<String, String>, UpgradeActionRequired> actionRequired,
|
||||
Function<IndexMetaData, UpgradeActionRequired> actionRequired,
|
||||
Client client, ClusterService clusterService, String[] types, Script updateScript,
|
||||
Consumer<ActionListener<T>> preUpgrade,
|
||||
BiConsumer<T, ActionListener<TransportResponse.Empty>> postUpgrade) {
|
||||
|
@ -92,22 +92,22 @@ public class IndexUpgradeCheck<T> extends AbstractComponent {
|
|||
* This method is called by Upgrade API to verify if upgrade or reindex for this index is required
|
||||
*
|
||||
* @param indexMetaData index metadata
|
||||
* @param params additional user-specified parameters see {@link IndexUpgradeCheckFactory#supportedParams}
|
||||
* @return required action or UpgradeActionRequired.NOT_APPLICABLE if this check cannot be performed on the index
|
||||
*/
|
||||
public UpgradeActionRequired actionRequired(IndexMetaData indexMetaData, Map<String, String> params) {
|
||||
return actionRequired.apply(indexMetaData, params);
|
||||
public UpgradeActionRequired actionRequired(IndexMetaData indexMetaData) {
|
||||
return actionRequired.apply(indexMetaData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform the index upgrade
|
||||
*
|
||||
* @param task the task that executes the upgrade operation
|
||||
* @param indexMetaData index metadata
|
||||
* @param state current cluster state
|
||||
* @param listener the listener that should be called upon completion of the upgrade
|
||||
*/
|
||||
public void upgrade(IndexMetaData indexMetaData, ClusterState state,
|
||||
public void upgrade(TaskId task, IndexMetaData indexMetaData, ClusterState state,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
reindexer.upgrade(indexMetaData.getIndex().getName(), state, listener);
|
||||
reindexer.upgrade(task, indexMetaData.getIndex().getName(), state, listener);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -39,18 +40,16 @@ public class IndexUpgradeService extends AbstractComponent {
|
|||
*
|
||||
* @param indices list of indices to check, specify _all for all indices
|
||||
* @param options wild card resolution option
|
||||
* @param params list of additional parameters that will be passed to upgrade checks
|
||||
* @param state the current cluster state
|
||||
* @return a list of indices that should be upgraded/reindexed
|
||||
*/
|
||||
public Map<String, UpgradeActionRequired> upgradeInfo(String[] indices, IndicesOptions options, Map<String, String> params,
|
||||
ClusterState state) {
|
||||
public Map<String, UpgradeActionRequired> upgradeInfo(String[] indices, IndicesOptions options, ClusterState state) {
|
||||
Map<String, UpgradeActionRequired> results = new HashMap<>();
|
||||
String[] concreteIndexNames = indexNameExpressionResolver.concreteIndexNames(state, options, indices);
|
||||
MetaData metaData = state.getMetaData();
|
||||
for (String index : concreteIndexNames) {
|
||||
IndexMetaData indexMetaData = metaData.index(index);
|
||||
UpgradeActionRequired upgradeActionRequired = upgradeInfo(indexMetaData, index, params);
|
||||
UpgradeActionRequired upgradeActionRequired = upgradeInfo(indexMetaData, index);
|
||||
if (upgradeActionRequired != null) {
|
||||
results.put(index, upgradeActionRequired);
|
||||
}
|
||||
|
@ -58,9 +57,9 @@ public class IndexUpgradeService extends AbstractComponent {
|
|||
return results;
|
||||
}
|
||||
|
||||
private UpgradeActionRequired upgradeInfo(IndexMetaData indexMetaData, String index, Map<String, String> params) {
|
||||
private UpgradeActionRequired upgradeInfo(IndexMetaData indexMetaData, String index) {
|
||||
for (IndexUpgradeCheck check : upgradeChecks) {
|
||||
UpgradeActionRequired upgradeActionRequired = check.actionRequired(indexMetaData, params);
|
||||
UpgradeActionRequired upgradeActionRequired = check.actionRequired(indexMetaData);
|
||||
logger.trace("[{}] check [{}] returned [{}]", index, check.getName(), upgradeActionRequired);
|
||||
switch (upgradeActionRequired) {
|
||||
case UPGRADE:
|
||||
|
@ -87,18 +86,17 @@ public class IndexUpgradeService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public void upgrade(String index, Map<String, String> params, ClusterState state,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
public void upgrade(TaskId task, String index, ClusterState state, ActionListener<BulkByScrollResponse> listener) {
|
||||
IndexMetaData indexMetaData = state.metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
for (IndexUpgradeCheck check : upgradeChecks) {
|
||||
UpgradeActionRequired upgradeActionRequired = check.actionRequired(indexMetaData, params);
|
||||
UpgradeActionRequired upgradeActionRequired = check.actionRequired(indexMetaData);
|
||||
switch (upgradeActionRequired) {
|
||||
case UPGRADE:
|
||||
// this index needs to be upgraded - start the upgrade procedure
|
||||
check.upgrade(indexMetaData, state, listener);
|
||||
check.upgrade(task, indexMetaData, state, listener);
|
||||
return;
|
||||
case REINDEX:
|
||||
// this index needs to be re-indexed
|
||||
|
|
|
@ -5,12 +5,12 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.upgrade;
|
||||
|
||||
import com.carrotsearch.hppc.procedures.ObjectProcedure;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.ParentTaskAssigningClient;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
|||
import org.elasticsearch.index.reindex.ReindexAction;
|
||||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
|
||||
import java.util.function.BiConsumer;
|
||||
|
@ -59,9 +60,10 @@ public class InternalIndexReindexer<T> {
|
|||
this.postUpgrade = postUpgrade;
|
||||
}
|
||||
|
||||
public void upgrade(String index, ClusterState clusterState, ActionListener<BulkByScrollResponse> listener) {
|
||||
public void upgrade(TaskId task, String index, ClusterState clusterState, ActionListener<BulkByScrollResponse> listener) {
|
||||
ParentTaskAssigningClient parentAwareClient = new ParentTaskAssigningClient(client, task);
|
||||
preUpgrade.accept(ActionListener.wrap(
|
||||
t -> innerUpgrade(index, clusterState, ActionListener.wrap(
|
||||
t -> innerUpgrade(parentAwareClient, index, clusterState, ActionListener.wrap(
|
||||
response -> postUpgrade.accept(t, ActionListener.wrap(
|
||||
empty -> listener.onResponse(response),
|
||||
listener::onFailure
|
||||
|
@ -71,22 +73,23 @@ public class InternalIndexReindexer<T> {
|
|||
listener::onFailure));
|
||||
}
|
||||
|
||||
private void innerUpgrade(String index, ClusterState clusterState, ActionListener<BulkByScrollResponse> listener) {
|
||||
private void innerUpgrade(ParentTaskAssigningClient parentAwareClient, String index, ClusterState clusterState,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
String newIndex = index + "_v" + version;
|
||||
try {
|
||||
checkMasterAndDataNodeVersion(clusterState);
|
||||
client.admin().indices().prepareCreate(newIndex).execute(ActionListener.wrap(createIndexResponse ->
|
||||
parentAwareClient.admin().indices().prepareCreate(newIndex).execute(ActionListener.wrap(createIndexResponse ->
|
||||
setReadOnlyBlock(index, ActionListener.wrap(setReadOnlyResponse ->
|
||||
reindex(index, newIndex, ActionListener.wrap(
|
||||
reindex(parentAwareClient, index, newIndex, ActionListener.wrap(
|
||||
bulkByScrollResponse -> // Successful completion of reindexing - delete old index
|
||||
removeReadOnlyBlock(index, ActionListener.wrap(unsetReadOnlyResponse ->
|
||||
client.admin().indices().prepareAliases().removeIndex(index)
|
||||
removeReadOnlyBlock(parentAwareClient, index, ActionListener.wrap(unsetReadOnlyResponse ->
|
||||
parentAwareClient.admin().indices().prepareAliases().removeIndex(index)
|
||||
.addAlias(newIndex, index).execute(ActionListener.wrap(deleteIndexResponse ->
|
||||
listener.onResponse(bulkByScrollResponse), listener::onFailure
|
||||
)), listener::onFailure
|
||||
)),
|
||||
e -> // Something went wrong during reindexing - remove readonly flag and report the error
|
||||
removeReadOnlyBlock(index, ActionListener.wrap(unsetReadOnlyResponse -> {
|
||||
removeReadOnlyBlock(parentAwareClient, index, ActionListener.wrap(unsetReadOnlyResponse -> {
|
||||
listener.onFailure(e);
|
||||
}, e1 -> {
|
||||
listener.onFailure(e);
|
||||
|
@ -105,19 +108,21 @@ public class InternalIndexReindexer<T> {
|
|||
}
|
||||
}
|
||||
|
||||
private void removeReadOnlyBlock(String index, ActionListener<UpdateSettingsResponse> listener) {
|
||||
private void removeReadOnlyBlock(ParentTaskAssigningClient parentAwareClient, String index,
|
||||
ActionListener<UpdateSettingsResponse> listener) {
|
||||
Settings settings = Settings.builder().put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), false).build();
|
||||
client.admin().indices().prepareUpdateSettings(index).setSettings(settings).execute(listener);
|
||||
parentAwareClient.admin().indices().prepareUpdateSettings(index).setSettings(settings).execute(listener);
|
||||
}
|
||||
|
||||
private void reindex(String index, String newIndex, ActionListener<BulkByScrollResponse> listener) {
|
||||
private void reindex(ParentTaskAssigningClient parentAwareClient, String index, String newIndex,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
SearchRequest sourceRequest = new SearchRequest(index);
|
||||
sourceRequest.types(types);
|
||||
IndexRequest destinationRequest = new IndexRequest(newIndex);
|
||||
ReindexRequest reindexRequest = new ReindexRequest(sourceRequest, destinationRequest);
|
||||
reindexRequest.setRefresh(true);
|
||||
reindexRequest.setScript(transformScript);
|
||||
client.execute(ReindexAction.INSTANCE, reindexRequest, listener);
|
||||
parentAwareClient.execute(ReindexAction.INSTANCE, reindexRequest, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -5,17 +5,18 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.upgrade;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -45,30 +46,25 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class Upgrade implements ActionPlugin {
|
||||
|
||||
public static final Version UPGRADE_INTRODUCED = Version.V_5_5_0; // TODO: Probably will need to change this to 5.6.0
|
||||
public static final Version UPGRADE_INTRODUCED = Version.V_5_6_0;
|
||||
|
||||
// this is the required index.format setting for 6.0 services (watcher and security) to start up
|
||||
// this index setting is set by the upgrade API or automatically when a 6.0 index template is created
|
||||
private static final int EXPECTED_INDEX_FORMAT_VERSION = 6;
|
||||
|
||||
private final Settings settings;
|
||||
private final List<BiFunction<InternalClient, ClusterService, IndexUpgradeCheck>> upgradeCheckFactories;
|
||||
private final Set<String> extraParameters;
|
||||
|
||||
public Upgrade(Settings settings) {
|
||||
this.settings = settings;
|
||||
this.extraParameters = new HashSet<>();
|
||||
this.upgradeCheckFactories = new ArrayList<>();
|
||||
for (Tuple<Collection<String>, BiFunction<InternalClient, ClusterService, IndexUpgradeCheck>> checkFactory : Arrays.asList(
|
||||
getKibanaUpgradeCheckFactory(settings),
|
||||
getWatcherUpgradeCheckFactory(settings))) {
|
||||
extraParameters.addAll(checkFactory.v1());
|
||||
upgradeCheckFactories.add(checkFactory.v2());
|
||||
}
|
||||
upgradeCheckFactories.add(getWatcherUpgradeCheckFactory(settings));
|
||||
}
|
||||
|
||||
public Collection<Object> createComponents(InternalClient internalClient, ClusterService clusterService, ThreadPool threadPool,
|
||||
|
@ -95,80 +91,65 @@ public class Upgrade implements ActionPlugin {
|
|||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Supplier<DiscoveryNodes> nodesInCluster) {
|
||||
return Arrays.asList(
|
||||
new RestIndexUpgradeInfoAction(settings, restController, extraParameters),
|
||||
new RestIndexUpgradeAction(settings, restController, extraParameters)
|
||||
new RestIndexUpgradeInfoAction(settings, restController),
|
||||
new RestIndexUpgradeAction(settings, restController)
|
||||
);
|
||||
}
|
||||
|
||||
static Tuple<Collection<String>, BiFunction<InternalClient, ClusterService, IndexUpgradeCheck>> getKibanaUpgradeCheckFactory(
|
||||
Settings settings) {
|
||||
return new Tuple<>(
|
||||
Collections.singletonList("kibana_indices"),
|
||||
(internalClient, clusterService) ->
|
||||
new IndexUpgradeCheck<Void>("kibana",
|
||||
settings,
|
||||
(indexMetaData, params) -> {
|
||||
String indexName = indexMetaData.getIndex().getName();
|
||||
String kibanaIndicesMasks = params.getOrDefault("kibana_indices", ".kibana");
|
||||
String[] kibanaIndices = Strings.delimitedListToStringArray(kibanaIndicesMasks, ",");
|
||||
if (Regex.simpleMatch(kibanaIndices, indexName)) {
|
||||
return UpgradeActionRequired.UPGRADE;
|
||||
} else {
|
||||
return UpgradeActionRequired.NOT_APPLICABLE;
|
||||
}
|
||||
}, internalClient,
|
||||
clusterService,
|
||||
Strings.EMPTY_ARRAY,
|
||||
new Script(ScriptType.INLINE, "painless", "ctx._id = ctx._type + \"-\" + ctx._id;\n" +
|
||||
"ctx._source = [ ctx._type : ctx._source ];\n" +
|
||||
"ctx._source.type = ctx._type;\n" +
|
||||
"ctx._type = \"doc\";",
|
||||
new HashMap<>())));
|
||||
/**
|
||||
* Checks the format of an internal index and returns true if the index is up to date or false if upgrade is required
|
||||
*/
|
||||
public static boolean checkInternalIndexFormat(IndexMetaData indexMetaData) {
|
||||
return indexMetaData.getSettings().getAsInt(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 0) == EXPECTED_INDEX_FORMAT_VERSION;
|
||||
}
|
||||
|
||||
static Tuple<Collection<String>, BiFunction<InternalClient, ClusterService, IndexUpgradeCheck>> getWatcherUpgradeCheckFactory(
|
||||
Settings settings) {
|
||||
return new Tuple<>(
|
||||
Collections.emptyList(),
|
||||
(internalClient, clusterService) ->
|
||||
new IndexUpgradeCheck<Boolean>("watcher",
|
||||
settings,
|
||||
(indexMetaData, params) -> {
|
||||
if (".watches".equals(indexMetaData.getIndex().getName()) ||
|
||||
indexMetaData.getAliases().containsKey(".watches")) {
|
||||
if (indexMetaData.getMappings().size() == 1 && indexMetaData.getMappings().containsKey("doc") ) {
|
||||
return UpgradeActionRequired.UP_TO_DATE;
|
||||
} else {
|
||||
return UpgradeActionRequired.UPGRADE;
|
||||
}
|
||||
} else {
|
||||
return UpgradeActionRequired.NOT_APPLICABLE;
|
||||
}
|
||||
}, internalClient,
|
||||
clusterService,
|
||||
new String[]{"watch"},
|
||||
new Script(ScriptType.INLINE, "painless", "ctx._type = \"doc\";\n" +
|
||||
"if (ctx._source.containsKey(\"_status\") && !ctx._source.containsKey(\"status\") ) {}\n" +
|
||||
" ctx._source.status = ctx._source.remove(\"_status\");\n" +
|
||||
"}",
|
||||
new HashMap<>()),
|
||||
booleanActionListener -> preWatcherUpgrade(internalClient, booleanActionListener),
|
||||
(shouldStartWatcher, listener) -> postWatcherUpgrade(internalClient, shouldStartWatcher, listener)
|
||||
));
|
||||
static BiFunction<InternalClient, ClusterService, IndexUpgradeCheck> getWatcherUpgradeCheckFactory(Settings settings) {
|
||||
return (internalClient, clusterService) ->
|
||||
new IndexUpgradeCheck<Boolean>("watcher",
|
||||
settings,
|
||||
indexMetaData -> {
|
||||
if (".watches".equals(indexMetaData.getIndex().getName()) ||
|
||||
indexMetaData.getAliases().containsKey(".watches")) {
|
||||
if (checkInternalIndexFormat(indexMetaData)) {
|
||||
return UpgradeActionRequired.UP_TO_DATE;
|
||||
} else {
|
||||
return UpgradeActionRequired.UPGRADE;
|
||||
}
|
||||
} else {
|
||||
return UpgradeActionRequired.NOT_APPLICABLE;
|
||||
}
|
||||
}, internalClient,
|
||||
clusterService,
|
||||
new String[]{"watch"},
|
||||
new Script(ScriptType.INLINE, "painless", "ctx._type = \"doc\";\n" +
|
||||
"if (ctx._source.containsKey(\"_status\") && !ctx._source.containsKey(\"status\") ) {\n" +
|
||||
" ctx._source.status = ctx._source.remove(\"_status\");\n" +
|
||||
"}",
|
||||
new HashMap<>()),
|
||||
booleanActionListener -> preWatcherUpgrade(internalClient, booleanActionListener),
|
||||
(shouldStartWatcher, listener) -> postWatcherUpgrade(internalClient, shouldStartWatcher, listener)
|
||||
);
|
||||
}
|
||||
|
||||
private static void preWatcherUpgrade(Client client, ActionListener<Boolean> listener) {
|
||||
ActionListener<DeleteIndexTemplateResponse> triggeredWatchIndexTemplateListener = deleteIndexTemplateListener("triggered_watches",
|
||||
listener, () -> listener.onResponse(true));
|
||||
|
||||
ActionListener<DeleteIndexTemplateResponse> watchIndexTemplateListener = deleteIndexTemplateListener("watches", listener,
|
||||
() -> client.admin().indices().prepareDeleteTemplate("triggered_watches").execute(triggeredWatchIndexTemplateListener));
|
||||
|
||||
new WatcherClient(client).watcherStats(new WatcherStatsRequest(), ActionListener.wrap(
|
||||
stats -> {
|
||||
if (stats.watcherMetaData().manuallyStopped()) {
|
||||
// don't start the watcher after upgrade
|
||||
// don't start watcher after upgrade
|
||||
listener.onResponse(false);
|
||||
} else {
|
||||
// stop the watcher
|
||||
// stop watcher
|
||||
new WatcherClient(client).watcherService(new WatcherServiceRequest().stop(), ActionListener.wrap(
|
||||
stopResponse -> {
|
||||
if (stopResponse.isAcknowledged()) {
|
||||
listener.onResponse(true);
|
||||
// delete old templates before indexing
|
||||
client.admin().indices().prepareDeleteTemplate("watches").execute(watchIndexTemplateListener);
|
||||
} else {
|
||||
listener.onFailure(new IllegalStateException("unable to stop watcher service"));
|
||||
}
|
||||
|
@ -179,16 +160,27 @@ public class Upgrade implements ActionPlugin {
|
|||
}
|
||||
|
||||
private static void postWatcherUpgrade(Client client, Boolean shouldStartWatcher, ActionListener<TransportResponse.Empty> listener) {
|
||||
client.admin().indices().prepareDelete("triggered-watches").execute(ActionListener.wrap(deleteIndexResponse -> {
|
||||
startWatcherIfNeeded(shouldStartWatcher, client, listener);
|
||||
}, e -> {
|
||||
if (e instanceof IndexNotFoundException) {
|
||||
startWatcherIfNeeded(shouldStartWatcher, client, listener);
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
));
|
||||
ActionListener<DeleteIndexResponse> deleteTriggeredWatchIndexResponse = ActionListener.wrap(deleteIndexResponse ->
|
||||
startWatcherIfNeeded(shouldStartWatcher, client, listener), e -> {
|
||||
if (e instanceof IndexNotFoundException) {
|
||||
startWatcherIfNeeded(shouldStartWatcher, client, listener);
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
||||
client.admin().indices().prepareDelete(".triggered_watches").execute(deleteTriggeredWatchIndexResponse);
|
||||
}
|
||||
|
||||
private static ActionListener<DeleteIndexTemplateResponse> deleteIndexTemplateListener(String name, ActionListener<Boolean> listener,
|
||||
Runnable runnable) {
|
||||
return ActionListener.wrap(r -> {
|
||||
if (r.isAcknowledged()) {
|
||||
runnable.run();
|
||||
} else {
|
||||
listener.onFailure(new ElasticsearchException("Deleting [{}] template was not acknowledged", name));
|
||||
}
|
||||
}, listener::onFailure);
|
||||
}
|
||||
|
||||
private static void startWatcherIfNeeded(Boolean shouldStartWatcher, Client client, ActionListener<TransportResponse.Empty> listener) {
|
||||
|
|
|
@ -25,13 +25,14 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.upgrade.IndexUpgradeService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
@ -60,7 +61,6 @@ public class IndexUpgradeAction extends Action<IndexUpgradeAction.Request, BulkB
|
|||
public static class Request extends MasterNodeReadRequest<Request> implements IndicesRequest {
|
||||
|
||||
private String index = null;
|
||||
private Map<String, String> extraParams = Collections.emptyMap();
|
||||
|
||||
// for serialization
|
||||
public Request() {
|
||||
|
@ -95,24 +95,12 @@ public class IndexUpgradeAction extends Action<IndexUpgradeAction.Request, BulkB
|
|||
}
|
||||
|
||||
|
||||
public Map<String, String> extraParams() {
|
||||
return extraParams;
|
||||
}
|
||||
|
||||
public Request extraParams(Map<String, String> extraParams) {
|
||||
this.extraParams = extraParams;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (index == null) {
|
||||
validationException = addValidationError("index is missing", validationException);
|
||||
}
|
||||
if (extraParams == null) {
|
||||
validationException = addValidationError("params are missing", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
|
@ -120,14 +108,12 @@ public class IndexUpgradeAction extends Action<IndexUpgradeAction.Request, BulkB
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
index = in.readString();
|
||||
extraParams = in.readMap(StreamInput::readString, StreamInput::readString);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(index);
|
||||
out.writeMap(extraParams, StreamOutput::writeString, StreamOutput::writeString);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -135,13 +121,22 @@ public class IndexUpgradeAction extends Action<IndexUpgradeAction.Request, BulkB
|
|||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Request request = (Request) o;
|
||||
return Objects.equals(index, request.index) &&
|
||||
Objects.equals(extraParams, request.extraParams);
|
||||
return Objects.equals(index, request.index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(index, extraParams);
|
||||
return Objects.hash(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new CancellableTask(id, type, action, getDescription(), parentTaskId) {
|
||||
@Override
|
||||
public boolean shouldCancelChildrenOnCancellation() {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -155,13 +150,6 @@ public class IndexUpgradeAction extends Action<IndexUpgradeAction.Request, BulkB
|
|||
request.index(index);
|
||||
return this;
|
||||
}
|
||||
|
||||
public RequestBuilder setExtraParams(Map<String, String> params) {
|
||||
request.extraParams(params);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
public static class TransportAction extends TransportMasterNodeAction<Request, BulkByScrollResponse> {
|
||||
|
@ -195,8 +183,16 @@ public class IndexUpgradeAction extends Action<IndexUpgradeAction.Request, BulkB
|
|||
}
|
||||
|
||||
@Override
|
||||
protected final void masterOperation(final Request request, ClusterState state, ActionListener<BulkByScrollResponse> listener) {
|
||||
indexUpgradeService.upgrade(request.index(), request.extraParams(), state, listener);
|
||||
protected final void masterOperation(Task task, Request request, ClusterState state,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
TaskId taskId = new TaskId(clusterService.localNode().getId(), task.getId());
|
||||
indexUpgradeService.upgrade(taskId, request.index(), state, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final void masterOperation(Request request, ClusterState state, ActionListener<BulkByScrollResponse> listener) {
|
||||
throw new UnsupportedOperationException("the task parameter is required");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -37,7 +37,6 @@ import org.elasticsearch.xpack.upgrade.UpgradeActionRequired;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -125,8 +124,7 @@ public class IndexUpgradeInfoAction extends Action<IndexUpgradeInfoAction.Reques
|
|||
public static class Request extends MasterNodeReadRequest<Request> implements IndicesRequest.Replaceable {
|
||||
|
||||
private String[] indices = null;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
|
||||
private Map<String, String> extraParams = Collections.emptyMap();
|
||||
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true);
|
||||
|
||||
// for serialization
|
||||
public Request() {
|
||||
|
@ -157,24 +155,12 @@ public class IndexUpgradeInfoAction extends Action<IndexUpgradeInfoAction.Reques
|
|||
this.indicesOptions = indicesOptions;
|
||||
}
|
||||
|
||||
public Map<String, String> extraParams() {
|
||||
return extraParams;
|
||||
}
|
||||
|
||||
public Request extraParams(Map<String, String> extraParams) {
|
||||
this.extraParams = extraParams;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (indices == null) {
|
||||
validationException = addValidationError("index/indices is missing", validationException);
|
||||
}
|
||||
if (extraParams == null) {
|
||||
validationException = addValidationError("params are missing", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
|
@ -183,7 +169,6 @@ public class IndexUpgradeInfoAction extends Action<IndexUpgradeInfoAction.Reques
|
|||
super.readFrom(in);
|
||||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
extraParams = in.readMap(StreamInput::readString, StreamInput::readString);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -191,7 +176,6 @@ public class IndexUpgradeInfoAction extends Action<IndexUpgradeInfoAction.Reques
|
|||
super.writeTo(out);
|
||||
out.writeStringArray(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
out.writeMap(extraParams, StreamOutput::writeString, StreamOutput::writeString);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -200,13 +184,12 @@ public class IndexUpgradeInfoAction extends Action<IndexUpgradeInfoAction.Reques
|
|||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Request request = (Request) o;
|
||||
return Arrays.equals(indices, request.indices) &&
|
||||
Objects.equals(indicesOptions.toString(), request.indicesOptions.toString()) &&
|
||||
Objects.equals(extraParams, request.extraParams);
|
||||
Objects.equals(indicesOptions.toString(), request.indicesOptions.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(Arrays.hashCode(indices), indicesOptions.toString(), extraParams);
|
||||
return Objects.hash(Arrays.hashCode(indices), indicesOptions.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -225,13 +208,6 @@ public class IndexUpgradeInfoAction extends Action<IndexUpgradeInfoAction.Reques
|
|||
request.indicesOptions(indicesOptions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public RequestBuilder setExtraParams(Map<String, String> params) {
|
||||
request.extraParams(params);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
public static class TransportAction extends TransportMasterNodeReadAction<Request, Response> {
|
||||
|
@ -272,7 +248,7 @@ public class IndexUpgradeInfoAction extends Action<IndexUpgradeInfoAction.Reques
|
|||
protected final void masterOperation(final Request request, ClusterState state, final ActionListener<Response> listener) {
|
||||
if (licenseState.isUpgradeAllowed()) {
|
||||
Map<String, UpgradeActionRequired> results =
|
||||
indexUpgradeService.upgradeInfo(request.indices(), request.indicesOptions(), request.extraParams(), state);
|
||||
indexUpgradeService.upgradeInfo(request.indices(), request.indicesOptions(), state);
|
||||
listener.onResponse(new Response(results));
|
||||
} else {
|
||||
listener.onFailure(LicenseUtils.newComplianceException(XPackPlugin.UPGRADE));
|
||||
|
|
|
@ -27,15 +27,11 @@ import org.elasticsearch.xpack.upgrade.actions.IndexUpgradeAction.Request;
|
|||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class RestIndexUpgradeAction extends BaseRestHandler {
|
||||
private final Set<String> extraParameters;
|
||||
|
||||
public RestIndexUpgradeAction(Settings settings, RestController controller, Set<String> extraParameters) {
|
||||
public RestIndexUpgradeAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.POST, "_xpack/migration/upgrade/{index}", this);
|
||||
this.extraParameters = extraParameters;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -54,14 +50,6 @@ public class RestIndexUpgradeAction extends BaseRestHandler {
|
|||
|
||||
private RestChannelConsumer handlePost(final RestRequest request, NodeClient client) {
|
||||
Request upgradeRequest = new Request(request.param("index"));
|
||||
Map<String, String> extraParamsMap = new HashMap<>();
|
||||
for (String param : extraParameters) {
|
||||
String value = request.param(param);
|
||||
if (value != null) {
|
||||
extraParamsMap.put(param, value);
|
||||
}
|
||||
}
|
||||
upgradeRequest.extraParams(extraParamsMap);
|
||||
Map<String, String> params = new HashMap<>();
|
||||
params.put(BulkByScrollTask.Status.INCLUDE_CREATED, Boolean.toString(true));
|
||||
params.put(BulkByScrollTask.Status.INCLUDE_UPDATED, Boolean.toString(true));
|
||||
|
|
|
@ -17,21 +17,15 @@ import org.elasticsearch.xpack.upgrade.actions.IndexUpgradeInfoAction;
|
|||
import org.elasticsearch.xpack.upgrade.actions.IndexUpgradeInfoAction.Request;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class RestIndexUpgradeInfoAction extends BaseRestHandler {
|
||||
private final Set<String> extraParameters;
|
||||
|
||||
public RestIndexUpgradeInfoAction(Settings settings, RestController controller, Set<String> extraParameters) {
|
||||
public RestIndexUpgradeInfoAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.GET, "/_xpack/migration/assistance", this);
|
||||
controller.registerHandler(RestRequest.Method.GET, "/_xpack/migration/assistance/{index}", this);
|
||||
this.extraParameters = extraParameters;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "xpack_migration_assistance";
|
||||
|
@ -49,14 +43,6 @@ public class RestIndexUpgradeInfoAction extends BaseRestHandler {
|
|||
private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) {
|
||||
Request infoRequest = new Request(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
infoRequest.indicesOptions(IndicesOptions.fromRequest(request, infoRequest.indicesOptions()));
|
||||
Map<String, String> extraParamsMap = new HashMap<>();
|
||||
for (String param : extraParameters) {
|
||||
String value = request.param(param);
|
||||
if (value != null) {
|
||||
extraParamsMap.put(param, value);
|
||||
}
|
||||
}
|
||||
infoRequest.extraParams(extraParamsMap);
|
||||
return channel -> client.execute(IndexUpgradeInfoAction.INSTANCE, infoRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
|
||||
|
|
|
@ -514,9 +514,7 @@ public class Watcher implements ActionPlugin {
|
|||
// These are all old templates from pre 6.0 era, that need to be deleted
|
||||
public UnaryOperator<Map<String, IndexTemplateMetaData>> getIndexTemplateMetaDataUpgrader() {
|
||||
return map -> {
|
||||
map.keySet().removeIf(name -> "watches".equals(name) || "triggered_watches".equals(name)
|
||||
|| name.startsWith("watch_history_"));
|
||||
|
||||
map.keySet().removeIf(name -> name.startsWith("watch_history_"));
|
||||
return map;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -192,18 +192,23 @@ final class WatcherIndexingListener extends AbstractComponent implements Indexin
|
|||
*/
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
if (event.state().nodes().getLocalNode().isDataNode() && event.metaDataChanged()) {
|
||||
try {
|
||||
IndexMetaData metaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData());
|
||||
if (metaData == null) {
|
||||
boolean isWatchExecutionDistributed = WatcherLifeCycleService.isWatchExecutionDistributed(event.state());
|
||||
if (isWatchExecutionDistributed) {
|
||||
if (event.state().nodes().getLocalNode().isDataNode() && event.metaDataChanged()) {
|
||||
try {
|
||||
IndexMetaData metaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData());
|
||||
if (metaData == null) {
|
||||
configuration = INACTIVE;
|
||||
} else {
|
||||
checkWatchIndexHasChanged(metaData, event);
|
||||
}
|
||||
} catch (IllegalStateException e) {
|
||||
logger.error("error loading watches index: [{}]", e.getMessage());
|
||||
configuration = INACTIVE;
|
||||
} else {
|
||||
checkWatchIndexHasChanged(metaData, event);
|
||||
}
|
||||
} catch (IllegalStateException e) {
|
||||
logger.error("error loading watches index: [{}]", e.getMessage());
|
||||
configuration = INACTIVE;
|
||||
}
|
||||
} else {
|
||||
configuration = INACTIVE;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.watcher;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
|
@ -19,6 +20,7 @@ import org.elasticsearch.common.component.LifecycleListener;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.upgrade.Upgrade;
|
||||
import org.elasticsearch.xpack.watcher.execution.TriggeredWatchStore;
|
||||
import org.elasticsearch.xpack.watcher.watch.Watch;
|
||||
import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils;
|
||||
|
@ -34,22 +36,16 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
|
|||
|
||||
public class WatcherLifeCycleService extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
// this is the required index.format setting for watcher to start up at all
|
||||
// this index setting is set by the upgrade API or automatically when a 6.0 index template is created
|
||||
private static final int EXPECTED_INDEX_FORMAT_VERSION = 6;
|
||||
|
||||
private final WatcherService watcherService;
|
||||
private final ClusterService clusterService;
|
||||
private final ExecutorService executor;
|
||||
private AtomicReference<List<String>> previousAllocationIds = new AtomicReference<>(Collections.emptyList());
|
||||
private volatile WatcherMetaData watcherMetaData;
|
||||
|
||||
public WatcherLifeCycleService(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
WatcherService watcherService) {
|
||||
WatcherLifeCycleService(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
WatcherService watcherService) {
|
||||
super(settings);
|
||||
this.executor = threadPool.executor(ThreadPool.Names.GENERIC);
|
||||
this.watcherService = watcherService;
|
||||
this.clusterService = clusterService;
|
||||
clusterService.addListener(this);
|
||||
// Close if the indices service is being stopped, so we don't run into search failures (locally) that will
|
||||
// happen because we're shutting down and an watch is scheduled.
|
||||
|
@ -62,10 +58,6 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste
|
|||
watcherMetaData = new WatcherMetaData(!settings.getAsBoolean("xpack.watcher.start_immediately", true));
|
||||
}
|
||||
|
||||
public void start() {
|
||||
start(clusterService.state(), true);
|
||||
}
|
||||
|
||||
public void stop(String reason) {
|
||||
watcherService.stop(reason);
|
||||
}
|
||||
|
@ -123,60 +115,89 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste
|
|||
if (currentWatcherStopped) {
|
||||
executor.execute(() -> this.stop("watcher manually marked to shutdown in cluster state update, shutting down"));
|
||||
} else {
|
||||
if (watcherService.state() == WatcherState.STARTED && event.state().nodes().getLocalNode().isDataNode()) {
|
||||
DiscoveryNode localNode = event.state().nodes().getLocalNode();
|
||||
RoutingNode routingNode = event.state().getRoutingNodes().node(localNode.getId());
|
||||
IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData());
|
||||
// if there are old nodes in the cluster hosting the watch index shards, we cannot run distributed, only on the master node
|
||||
boolean isDistributedWatchExecutionEnabled = isWatchExecutionDistributed(event.state());
|
||||
if (isDistributedWatchExecutionEnabled) {
|
||||
if (watcherService.state() == WatcherState.STARTED && event.state().nodes().getLocalNode().isDataNode()) {
|
||||
DiscoveryNode localNode = event.state().nodes().getLocalNode();
|
||||
RoutingNode routingNode = event.state().getRoutingNodes().node(localNode.getId());
|
||||
IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData());
|
||||
|
||||
// no watcher index, time to pause, if we currently have shards here
|
||||
if (watcherIndexMetaData == null) {
|
||||
if (previousAllocationIds.get().isEmpty() == false) {
|
||||
previousAllocationIds.set(Collections.emptyList());
|
||||
executor.execute(() -> watcherService.pauseExecution("no watcher index found"));
|
||||
// no watcher index, time to pause, as there are for sure no shards on this node
|
||||
if (watcherIndexMetaData == null) {
|
||||
if (previousAllocationIds.get().isEmpty() == false) {
|
||||
previousAllocationIds.set(Collections.emptyList());
|
||||
executor.execute(() -> watcherService.pauseExecution("no watcher index found"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
String watchIndex = watcherIndexMetaData.getIndex().getName();
|
||||
List<ShardRouting> localShards = routingNode.shardsWithState(watchIndex, RELOCATING, STARTED);
|
||||
String watchIndex = watcherIndexMetaData.getIndex().getName();
|
||||
List<ShardRouting> localShards = routingNode.shardsWithState(watchIndex, RELOCATING, STARTED);
|
||||
|
||||
// no local shards, empty out watcher and not waste resources!
|
||||
if (localShards.isEmpty()) {
|
||||
if (previousAllocationIds.get().isEmpty() == false) {
|
||||
executor.execute(() -> watcherService.pauseExecution("no local watcher shards"));
|
||||
previousAllocationIds.set(Collections.emptyList());
|
||||
// no local shards, empty out watcher and not waste resources!
|
||||
if (localShards.isEmpty()) {
|
||||
if (previousAllocationIds.get().isEmpty() == false) {
|
||||
executor.execute(() -> watcherService.pauseExecution("no local watcher shards"));
|
||||
previousAllocationIds.set(Collections.emptyList());
|
||||
}
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
List<String> currentAllocationIds = localShards.stream()
|
||||
.map(ShardRouting::allocationId)
|
||||
.map(AllocationId::getId)
|
||||
.collect(Collectors.toList());
|
||||
Collections.sort(currentAllocationIds);
|
||||
List<String> currentAllocationIds = localShards.stream()
|
||||
.map(ShardRouting::allocationId)
|
||||
.map(AllocationId::getId)
|
||||
.collect(Collectors.toList());
|
||||
Collections.sort(currentAllocationIds);
|
||||
|
||||
if (previousAllocationIds.get().equals(currentAllocationIds) == false) {
|
||||
previousAllocationIds.set(currentAllocationIds);
|
||||
executor.execute(() -> watcherService.reload(event.state(), "different shard allocation ids"));
|
||||
if (previousAllocationIds.get().equals(currentAllocationIds) == false) {
|
||||
previousAllocationIds.set(currentAllocationIds);
|
||||
executor.execute(() -> watcherService.reload(event.state(), "different shard allocation ids"));
|
||||
}
|
||||
} else if (watcherService.state() != WatcherState.STARTED && watcherService.state() != WatcherState.STARTING) {
|
||||
IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData());
|
||||
IndexMetaData triggeredWatchesIndexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStore.INDEX_NAME,
|
||||
event.state().metaData());
|
||||
boolean isIndexInternalFormatWatchIndex = watcherIndexMetaData == null ||
|
||||
Upgrade.checkInternalIndexFormat(watcherIndexMetaData);
|
||||
boolean isIndexInternalFormatTriggeredWatchIndex = triggeredWatchesIndexMetaData == null ||
|
||||
Upgrade.checkInternalIndexFormat(triggeredWatchesIndexMetaData);
|
||||
if (isIndexInternalFormatTriggeredWatchIndex && isIndexInternalFormatWatchIndex) {
|
||||
executor.execute(() -> start(event.state(), false));
|
||||
} else {
|
||||
logger.warn("Not starting watcher, the indices have not been upgraded yet. Please run the Upgrade API");
|
||||
}
|
||||
}
|
||||
} else if (watcherService.state() != WatcherState.STARTED && watcherService.state() != WatcherState.STARTING) {
|
||||
IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData());
|
||||
IndexMetaData triggeredWatchesIndexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStore.INDEX_NAME,
|
||||
event.state().metaData());
|
||||
String indexFormatSetting = IndexMetaData.INDEX_FORMAT_SETTING.getKey();
|
||||
boolean isIndexInternalFormatWatchIndex = watcherIndexMetaData == null ||
|
||||
watcherIndexMetaData.getSettings().getAsInt(indexFormatSetting, 0) == EXPECTED_INDEX_FORMAT_VERSION;
|
||||
boolean isIndexInternalFormatTriggeredWatchIndex = triggeredWatchesIndexMetaData == null ||
|
||||
triggeredWatchesIndexMetaData.getSettings().getAsInt(indexFormatSetting, 0) == EXPECTED_INDEX_FORMAT_VERSION;
|
||||
if (isIndexInternalFormatTriggeredWatchIndex && isIndexInternalFormatWatchIndex) {
|
||||
executor.execute(() -> start(event.state(), false));
|
||||
} else {
|
||||
if (event.localNodeMaster()) {
|
||||
if (watcherService.state() != WatcherState.STARTED && watcherService.state() != WatcherState.STARTING) {
|
||||
executor.execute(() -> start(event.state(), false));
|
||||
}
|
||||
} else {
|
||||
logger.warn("Not starting watcher, the indices have not been upgraded yet. Please run the Upgrade API");
|
||||
if (watcherService.state() == WatcherState.STARTED || watcherService.state() == WatcherState.STARTING) {
|
||||
executor.execute(() -> watcherService.pauseExecution("Pausing watcher, cluster contains old nodes not supporting" +
|
||||
" distributed watch execution"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the preconditions are given to run watcher with distributed watch execution.
|
||||
* The following requirements need to be fulfilled
|
||||
*
|
||||
* 1. The master node must run on a version greather than or equal 6.0
|
||||
* 2. The nodes holding the watcher shards must run on a version greater than or equal 6.0
|
||||
*
|
||||
* @param state The cluster to check against
|
||||
* @return true, if the above requirements are fulfilled, false otherwise
|
||||
*/
|
||||
public static boolean isWatchExecutionDistributed(ClusterState state) {
|
||||
// short circuit if all nodes are on 6.x, should be standard after upgrade
|
||||
return state.nodes().getMinNodeVersion().onOrAfter(Version.V_6_0_0_beta1);
|
||||
}
|
||||
|
||||
public WatcherMetaData watcherMetaData() {
|
||||
return watcherMetaData;
|
||||
}
|
||||
|
|
|
@ -56,7 +56,6 @@ public class WatcherSearchTemplateRequest implements ToXContentObject {
|
|||
|
||||
public WatcherSearchTemplateRequest(String[] indices, String[] types, SearchType searchType, IndicesOptions indicesOptions,
|
||||
Script template) {
|
||||
assert template == null || Script.DEFAULT_TEMPLATE_LANG.equals(template.getLang());
|
||||
this.indices = indices;
|
||||
this.types = types;
|
||||
this.searchType = searchType;
|
||||
|
@ -248,13 +247,6 @@ public class WatcherSearchTemplateRequest implements ToXContentObject {
|
|||
DEFAULT_INDICES_OPTIONS);
|
||||
} else if (TEMPLATE_FIELD.match(currentFieldName)) {
|
||||
template = Script.parse(parser, Script.DEFAULT_TEMPLATE_LANG);
|
||||
|
||||
// for deprecation of stored script namespaces the default lang is ignored,
|
||||
// so the template lang must be set for a stored script
|
||||
if (template.getType() == ScriptType.STORED) {
|
||||
template = new Script(
|
||||
ScriptType.STORED, Script.DEFAULT_TEMPLATE_LANG, template.getIdOrCode(), template.getParams());
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not read search request. unexpected object field [" +
|
||||
currentFieldName + "]");
|
||||
|
|
|
@ -14,6 +14,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.script.TemplateScript;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.xpack.watcher.Watcher;
|
||||
|
@ -48,7 +49,8 @@ public class WatcherSearchTemplateService extends AbstractComponent {
|
|||
watcherContextParams.putAll(source.getParams());
|
||||
}
|
||||
// Templates are always of lang mustache:
|
||||
Script template = new Script(source.getType(), "mustache", source.getIdOrCode(), source.getOptions(), watcherContextParams);
|
||||
Script template = new Script(source.getType(), source.getType() == ScriptType.STORED ? null : "mustache",
|
||||
source.getIdOrCode(), source.getOptions(), watcherContextParams);
|
||||
TemplateScript.Factory compiledTemplate = scriptService.compile(template, Watcher.SCRIPT_TEMPLATE_CONTEXT);
|
||||
return compiledTemplate.newInstance(template.getParams()).execute();
|
||||
}
|
||||
|
|
|
@ -6,11 +6,16 @@
|
|||
package org.elasticsearch.xpack.watcher.transport.actions;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.license.LicenseUtils;
|
||||
import org.elasticsearch.license.XPackLicenseState;
|
||||
|
@ -18,23 +23,56 @@ import org.elasticsearch.tasks.Task;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.XPackPlugin;
|
||||
import org.elasticsearch.xpack.watcher.WatcherLifeCycleService;
|
||||
import org.elasticsearch.xpack.watcher.watch.Watch;
|
||||
import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public abstract class WatcherTransportAction<Request extends ActionRequest, Response extends ActionResponse>
|
||||
extends HandledTransportAction<Request, Response> {
|
||||
public abstract class WatcherTransportAction<Request extends MasterNodeRequest<Request>, Response extends ActionResponse>
|
||||
extends TransportMasterNodeAction<Request, Response> {
|
||||
|
||||
protected final XPackLicenseState licenseState;
|
||||
private final ClusterService clusterService;
|
||||
private final Supplier<Response> response;
|
||||
|
||||
public WatcherTransportAction(Settings settings, String actionName, TransportService transportService, ThreadPool threadPool,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
XPackLicenseState licenseState, Supplier<Request> request) {
|
||||
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
|
||||
XPackLicenseState licenseState, ClusterService clusterService, Supplier<Request> request,
|
||||
Supplier<Response> response) {
|
||||
super(settings, actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, request);
|
||||
this.licenseState = licenseState;
|
||||
this.clusterService = clusterService;
|
||||
this.response = response;
|
||||
}
|
||||
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.GENERIC;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
|
||||
protected Response newResponse() {
|
||||
return response.get();
|
||||
}
|
||||
|
||||
protected abstract void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception;
|
||||
|
||||
protected boolean localExecute(Request request) {
|
||||
return WatcherLifeCycleService.isWatchExecutionDistributed(clusterService.state());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
|
||||
IndexMetaData index = WatchStoreUtils.getConcreteIndex(Watch.INDEX, state.metaData());
|
||||
if (index != null) {
|
||||
return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, index.getIndex().getName());
|
||||
} else {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, final Request request, ActionListener<Response> listener) {
|
||||
if (licenseState.isWatcherAllowed()) {
|
||||
super.doExecute(task, request, listener);
|
||||
} else {
|
||||
|
|
|
@ -12,8 +12,10 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.Preference;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -44,16 +46,17 @@ public class TransportAckWatchAction extends WatcherTransportAction<AckWatchRequ
|
|||
@Inject
|
||||
public TransportAckWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Clock clock, XPackLicenseState licenseState,
|
||||
Watch.Parser parser, InternalClient client) {
|
||||
Watch.Parser parser, InternalClient client, ClusterService clusterService) {
|
||||
super(settings, AckWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
|
||||
licenseState, AckWatchRequest::new);
|
||||
licenseState, clusterService, AckWatchRequest::new, AckWatchResponse::new);
|
||||
this.clock = clock;
|
||||
this.parser = parser;
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(AckWatchRequest request, ActionListener<AckWatchResponse> listener) {
|
||||
protected void masterOperation(AckWatchRequest request, ClusterState state,
|
||||
ActionListener<AckWatchResponse> listener) throws Exception {
|
||||
GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId())
|
||||
.preference(Preference.LOCAL.type()).realtime(true);
|
||||
|
||||
|
|
|
@ -12,8 +12,10 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.Preference;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -23,6 +25,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.security.InternalClient;
|
||||
import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction;
|
||||
import org.elasticsearch.xpack.watcher.trigger.TriggerService;
|
||||
import org.elasticsearch.xpack.watcher.watch.Watch;
|
||||
import org.elasticsearch.xpack.watcher.watch.WatchStatus;
|
||||
import org.joda.time.DateTime;
|
||||
|
@ -42,21 +45,25 @@ public class TransportActivateWatchAction extends WatcherTransportAction<Activat
|
|||
private final Clock clock;
|
||||
private final Watch.Parser parser;
|
||||
private final Client client;
|
||||
private final TriggerService triggerService;
|
||||
|
||||
@Inject
|
||||
public TransportActivateWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Clock clock,
|
||||
XPackLicenseState licenseState, Watch.Parser parser,
|
||||
InternalClient client) {
|
||||
XPackLicenseState licenseState, Watch.Parser parser, ClusterService clusterService,
|
||||
InternalClient client, TriggerService triggerService) {
|
||||
super(settings, ActivateWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
|
||||
licenseState, ActivateWatchRequest::new);
|
||||
licenseState, clusterService, ActivateWatchRequest::new, ActivateWatchResponse::new);
|
||||
this.clock = clock;
|
||||
this.parser = parser;
|
||||
this.client = client;
|
||||
this.triggerService = triggerService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(ActivateWatchRequest request, ActionListener<ActivateWatchResponse> listener) {
|
||||
protected void masterOperation(ActivateWatchRequest request, ClusterState state, ActionListener<ActivateWatchResponse> listener)
|
||||
throws Exception {
|
||||
|
||||
try {
|
||||
DateTime now = new DateTime(clock.millis(), UTC);
|
||||
UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId());
|
||||
|
@ -77,6 +84,13 @@ public class TransportActivateWatchAction extends WatcherTransportAction<Activat
|
|||
XContentType.JSON);
|
||||
watch.version(getResponse.getVersion());
|
||||
watch.status().version(getResponse.getVersion());
|
||||
if (localExecute(request)) {
|
||||
if (watch.status().state().isActive()) {
|
||||
triggerService.add(watch);
|
||||
} else {
|
||||
triggerService.remove(watch.id());
|
||||
}
|
||||
}
|
||||
listener.onResponse(new ActivateWatchResponse(watch.status()));
|
||||
} else {
|
||||
listener.onFailure(new ResourceNotFoundException("Watch with id [{}] does not exist", request.getWatchId()));
|
||||
|
@ -100,4 +114,5 @@ public class TransportActivateWatchAction extends WatcherTransportAction<Activat
|
|||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -12,8 +12,10 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.Preference;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -60,9 +62,10 @@ public class TransportExecuteWatchAction extends WatcherTransportAction<ExecuteW
|
|||
public TransportExecuteWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ExecutionService executionService, Clock clock, XPackLicenseState licenseState,
|
||||
Watch.Parser watchParser, InternalClient client, TriggerService triggerService) {
|
||||
Watch.Parser watchParser, InternalClient client, TriggerService triggerService,
|
||||
ClusterService clusterService) {
|
||||
super(settings, ExecuteWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
|
||||
licenseState, ExecuteWatchRequest::new);
|
||||
licenseState, clusterService, ExecuteWatchRequest::new, ExecuteWatchResponse::new);
|
||||
this.executionService = executionService;
|
||||
this.clock = clock;
|
||||
this.triggerService = triggerService;
|
||||
|
@ -71,7 +74,8 @@ public class TransportExecuteWatchAction extends WatcherTransportAction<ExecuteW
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(ExecuteWatchRequest request, ActionListener<ExecuteWatchResponse> listener) {
|
||||
protected void masterOperation(ExecuteWatchRequest request, ClusterState state,
|
||||
ActionListener<ExecuteWatchResponse> listener) throws Exception {
|
||||
if (request.getId() != null) {
|
||||
GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId())
|
||||
.preference(Preference.LOCAL.type()).realtime(true);
|
||||
|
@ -135,4 +139,5 @@ public class TransportExecuteWatchAction extends WatcherTransportAction<ExecuteW
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -5,11 +5,13 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.watcher.transport.actions.get;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.ValidateActions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.xpack.watcher.watch.Watch;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -59,12 +61,20 @@ public class GetWatchRequest extends MasterNodeReadRequest<GetWatchRequest> {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
in.readLong();
|
||||
in.readByte();
|
||||
}
|
||||
id = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
out.writeLong(1);
|
||||
out.writeByte(VersionType.INTERNAL.getValue());
|
||||
}
|
||||
out.writeString(id);
|
||||
}
|
||||
|
||||
|
|
|
@ -9,8 +9,10 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.Preference;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -39,16 +41,17 @@ public class TransportGetWatchAction extends WatcherTransportAction<GetWatchRequ
|
|||
@Inject
|
||||
public TransportGetWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, XPackLicenseState licenseState,
|
||||
Watch.Parser parser, Clock clock, InternalClient client) {
|
||||
Watch.Parser parser, Clock clock, InternalClient client, ClusterService clusterService) {
|
||||
super(settings, GetWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
|
||||
licenseState, GetWatchRequest::new);
|
||||
licenseState, clusterService, GetWatchRequest::new, GetWatchResponse::new);
|
||||
this.parser = parser;
|
||||
this.clock = clock;
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(GetWatchRequest request, ActionListener<GetWatchResponse> listener) {
|
||||
protected void masterOperation(GetWatchRequest request, ClusterState state,
|
||||
ActionListener<GetWatchResponse> listener) throws Exception {
|
||||
GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId())
|
||||
.preference(Preference.LOCAL.type()).realtime(true);
|
||||
|
||||
|
|
|
@ -10,7 +10,9 @@ import org.elasticsearch.action.DocWriteResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -22,6 +24,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.elasticsearch.xpack.security.InternalClient;
|
||||
import org.elasticsearch.xpack.watcher.support.xcontent.WatcherParams;
|
||||
import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction;
|
||||
import org.elasticsearch.xpack.watcher.trigger.TriggerService;
|
||||
import org.elasticsearch.xpack.watcher.watch.Payload;
|
||||
import org.elasticsearch.xpack.watcher.watch.Watch;
|
||||
import org.joda.time.DateTime;
|
||||
|
@ -36,20 +39,24 @@ public class TransportPutWatchAction extends WatcherTransportAction<PutWatchRequ
|
|||
private final Clock clock;
|
||||
private final Watch.Parser parser;
|
||||
private final InternalClient client;
|
||||
private final TriggerService triggerService;
|
||||
|
||||
@Inject
|
||||
public TransportPutWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Clock clock, XPackLicenseState licenseState,
|
||||
Watch.Parser parser, InternalClient client) {
|
||||
Watch.Parser parser, InternalClient client, ClusterService clusterService,
|
||||
TriggerService triggerService) {
|
||||
super(settings, PutWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
|
||||
licenseState, PutWatchRequest::new);
|
||||
licenseState, clusterService, PutWatchRequest::new, PutWatchResponse::new);
|
||||
this.clock = clock;
|
||||
this.parser = parser;
|
||||
this.client = client;
|
||||
this.triggerService = triggerService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final PutWatchRequest request, final ActionListener<PutWatchResponse> listener) {
|
||||
protected void masterOperation(PutWatchRequest request, ClusterState state,
|
||||
ActionListener<PutWatchResponse> listener) throws Exception {
|
||||
try {
|
||||
DateTime now = new DateTime(clock.millis(), UTC);
|
||||
Watch watch = parser.parseWithSecrets(request.getId(), false, request.getSource(), now, request.xContentType());
|
||||
|
@ -66,6 +73,9 @@ public class TransportPutWatchAction extends WatcherTransportAction<PutWatchRequ
|
|||
|
||||
client.index(indexRequest, ActionListener.wrap(indexResponse -> {
|
||||
boolean created = indexResponse.getResult() == DocWriteResponse.Result.CREATED;
|
||||
if (localExecute(request) == false && watch.status().state().isActive()) {
|
||||
triggerService.add(watch);
|
||||
}
|
||||
listener.onResponse(new PutWatchResponse(indexResponse.getId(), indexResponse.getVersion(), created));
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"index_patterns": ".monitoring-alerts-${monitoring.template.version}",
|
||||
"version": 6000002,
|
||||
"version": 6000026,
|
||||
"settings": {
|
||||
"index": {
|
||||
"number_of_shards": 1,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"index_patterns": ".monitoring-beats-${monitoring.template.version}-*",
|
||||
"version": 6000002,
|
||||
"version": 6000026,
|
||||
"settings": {
|
||||
"index.number_of_shards": 1,
|
||||
"index.number_of_replicas": 1,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"index_patterns": ".monitoring-es-${monitoring.template.version}-*",
|
||||
"version": 6000002,
|
||||
"version": 6000026,
|
||||
"settings": {
|
||||
"index.number_of_shards": 1,
|
||||
"index.number_of_replicas": 1,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue