Merge remote-tracking branch 'elastic/master' into feature/sql_2
Original commit: elastic/x-pack-elasticsearch@9ac41d008e
This commit is contained in:
commit
70cea58262
|
@ -0,0 +1,28 @@
|
|||
[[ml-calendars]]
|
||||
=== Calendars and Scheduled Events
|
||||
|
||||
Sometimes there are periods when you expect unusual activity to take place,
|
||||
such as bank holidays, "Black Friday", or planned system outages. If you
|
||||
identify these events in advance, no anomalies are generated during that period.
|
||||
The {ml} model is not ill-affected and you do not receive spurious results.
|
||||
|
||||
You can create calendars and scheduled events in the **Settings** pane on the
|
||||
**Machine Learning** page in {kib} or by using {ref}/ml-apis.html[{ml} APIs].
|
||||
|
||||
A scheduled event must have a start time, end time, and description. You can
|
||||
identify zero or more scheduled events in a calendar. Jobs can then subscribe to
|
||||
calendars and the {ml} analytics handle all subsequent scheduled events
|
||||
appropriately.
|
||||
|
||||
If you want to add multiple scheduled events at once, you can import an
|
||||
iCalendar (`.ics`) file in {kib} or a JSON file in the
|
||||
//{ref}/ml-post-calendar-event.html[
|
||||
add events to calendar API
|
||||
//]
|
||||
.
|
||||
|
||||
NOTE: Bucket results are generated during scheduled events but they have an
|
||||
anomaly score of zero. For more information about bucket results, see
|
||||
{ref}/ml-results-resource.html[Results Resources].
|
||||
|
||||
//TO-DO: Add screenshot showing special events in Single Metric Viewer?
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
include::analyzing.asciidoc[]
|
||||
include::forecasting.asciidoc[]
|
||||
include::calendars.asciidoc[]
|
||||
|
||||
[[ml-concepts]]
|
||||
=== Basic Machine Learning Terms
|
||||
|
|
|
@ -11,4 +11,8 @@ such as encrypting communications, role-based access control, IP filtering, and
|
|||
auditing. For more information, see
|
||||
{xpack-ref}/xpack-security.html[Securing the Elastic Stack].
|
||||
|
||||
include::securing-communications/securing-elasticsearch.asciidoc[]
|
||||
include::securing-communications/configuring-tls-docker.asciidoc[]
|
||||
include::securing-communications/enabling-cipher-suites.asciidoc[]
|
||||
include::securing-communications/separating-node-client-traffic.asciidoc[]
|
||||
include::{xes-repo-dir}/settings/security-settings.asciidoc[]
|
||||
|
|
|
@ -18,6 +18,14 @@ cluster and receiving data through replication.
|
|||
|
||||
include::securing-communications/setting-up-ssl.asciidoc[]
|
||||
|
||||
include::securing-communications/enabling-cipher-suites.asciidoc[]
|
||||
//TO-DO: These sections can be removed when all links to them are removed.
|
||||
|
||||
include::securing-communications/separating-node-client-traffic.asciidoc[]
|
||||
[[ciphers]]
|
||||
=== Enabling Cipher Suites for Stronger Encryption
|
||||
|
||||
See {ref}/ciphers.html[Enabling Cipher Suites for Stronger Encryption].
|
||||
|
||||
[[separating-node-client-traffic]]
|
||||
=== Separating node-to-node and client traffic
|
||||
|
||||
See {ref}/separating-node-client-traffic.html[Separating node-to-node and client traffic].
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
[role="xpack"]
|
||||
[[ciphers]]
|
||||
=== Enabling Cipher Suites for Stronger Encryption
|
||||
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
[[node-certificates]]
|
||||
==== Generating Node Certificates
|
||||
|
||||
TLS requires X.509 certificates to perform encryption and authentication of the
|
||||
application that is being communicated with. In order for the communication
|
||||
between nodes to be truly secure, the certificates must be validated. The
|
||||
recommended approach for validating certificate authenticity in a {es} cluster
|
||||
is to trust the certificate authority (CA) that signed the certificate. By doing
|
||||
this, as nodes are added to your cluster they just need to use a certificate
|
||||
signed by the same CA and the node is automatically allowed to join the cluster.
|
||||
Additionally, it is recommended that the certificates contain subject alternative
|
||||
names (SAN) that correspond to the node's IP address and DNS name so that
|
||||
hostname verification can be performed.
|
||||
|
||||
In order to simplify the process of generating certificates for the Elastic
|
||||
Stack, a command line tool, {ref}/certutil.html[`certutil`] has been included
|
||||
with {xpack}. This tool takes care of generating a CA and signing certificates
|
||||
with the CA. `certutil` can be used interactively or in a silent mode through
|
||||
the use of an input file. The `certutil` tool also supports generation of
|
||||
certificate signing requests (CSR), so that a commercial- or
|
||||
organization-specific CA can be used to sign the certificates. For example:
|
||||
|
||||
. Optional: Create a certificate authority for your {es} cluster.
|
||||
+
|
||||
--
|
||||
For example, use the `certutil ca` command:
|
||||
|
||||
[source,shell]
|
||||
----------------------------------------------------------
|
||||
bin/x-pack/certutil ca
|
||||
----------------------------------------------------------
|
||||
|
||||
You can configure the cluster to trust all nodes that have a certificate that
|
||||
has been signed by this CA.
|
||||
|
||||
The command outputs a single file, with a default name of `elastic-stack-ca.p12`.
|
||||
This file is a PKCS#12 keystore that contains the public certificate for your CA
|
||||
and the private key that is used to sign the certificates for each node.
|
||||
|
||||
The `certutil` command also prompts you for a password to protect the file and
|
||||
key. If you plan to add more nodes to your cluster in the future, retain a copy
|
||||
of the file and remember its password.
|
||||
--
|
||||
|
||||
. Generate a certificate and private key for for each node in your cluster.
|
||||
+
|
||||
--
|
||||
For example, use the `certutil cert` command:
|
||||
|
||||
[source,shell]
|
||||
----------------------------------------------------------
|
||||
bin/x-pack/certutil cert --ca elastic-stack-ca.p12
|
||||
----------------------------------------------------------
|
||||
The output is a single PKCS#12 keystore that includes the node certificate, node
|
||||
key, and CA certificate.
|
||||
|
||||
You are also prompted for a password. You can enter a password for your
|
||||
certificate and key, or you can leave the password blank by pressing Enter.
|
||||
|
||||
By default `certutil` generates certificates that have no hostname information
|
||||
in them (that is, they do not have any Subject Alternative Name fields).
|
||||
This means that you can use the certificate for every node in your cluster, but
|
||||
you must turn off hostname verification as shown in the configuration below.
|
||||
|
||||
If you want to use hostname verification within your cluster, run the
|
||||
`certutil cert` command once for each of your nodes and provide the `--name`,
|
||||
`--dns` and `--ip` options.
|
||||
|
||||
NOTE: You should secure the output files, since they contain the private keys
|
||||
for your instance.
|
||||
|
||||
Alternatively, if you want to use a commercial or organization-specific CA,
|
||||
you can use the `certutil csr` command to generate certificate signing requests
|
||||
(CSR) for the nodes in your cluster. For more information, see <<certutil>>.
|
||||
--
|
||||
|
||||
. Copy the node certificate to the appropriate locations.
|
||||
+
|
||||
--
|
||||
Copy the applicable `.p12` file into a directory within the {es} configuration
|
||||
directory on each node. For example, `/home/es/config/certs`. There is no need
|
||||
to copy the CA file to this directory.
|
||||
|
||||
For each additional Elastic product that you want to configure, copy the
|
||||
certificates to the relevant configuration directory. For more information, see
|
||||
<<enable-ssl>>.
|
||||
--
|
||||
|
||||
NOTE: If you choose not to use `certutil`, the certificates that you obtain must
|
||||
allow for both `clientAuth` and `serverAuth` if the extended key usage extension
|
||||
is present. The certificates need to be in PEM or PKCS#12 format. Although not
|
||||
required, it is highly recommended that the certificate contain the DNS names
|
||||
and/or IP addresses of the node so that hostname verification can be used.
|
|
@ -0,0 +1,102 @@
|
|||
[[enable-ssl]]
|
||||
==== Enabling TLS on {es} Nodes
|
||||
|
||||
Once you have the signed certificate, private key, and CA certificate you need
|
||||
to modify the node configuration to enable Transport Layer Security (TLS/SSL).
|
||||
|
||||
. Specify the information required to access the node's certificate.
|
||||
|
||||
** If the certificate is in PKCS#12 format, add the following information to the
|
||||
`elasticsearch.yml` file on each node:
|
||||
+
|
||||
--
|
||||
[source,yaml]
|
||||
-----------------------------------------------------------
|
||||
xpack.ssl.keystore.path: certs/elastic-certificates.p12 <1>
|
||||
xpack.ssl.truststore.path: certs/elastic-certificates.p12 <2>
|
||||
-----------------------------------------------------------
|
||||
<1> If you created a separate certificate for each node, then you might need to
|
||||
customize this path on each node. If the filename matches the node name, you can
|
||||
use the `certs/${node.name}.p12` format, for example.
|
||||
<2> The `certutil` output includes the CA certificate inside the PKCS#12
|
||||
keystore, therefore the keystore can also be used as the truststore. This name
|
||||
should match the `keystore.path` value.
|
||||
--
|
||||
|
||||
** If the certificate is in PEM format, add the following information to the
|
||||
`elasticsearch.yml` file on each node:
|
||||
+
|
||||
--
|
||||
[source, yaml]
|
||||
--------------------------------------------------
|
||||
xpack.ssl.key: /home/es/config/x-pack/node01.key <1>
|
||||
xpack.ssl.certificate: /home/es/config/x-pack/node01.crt <2>
|
||||
xpack.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] <3>
|
||||
--------------------------------------------------
|
||||
<1> The full path to the node key file. This must be a location within the
|
||||
{es} configuration directory.
|
||||
<2> The full path to the node certificate. This must be a location within the
|
||||
{es} configuration directory.
|
||||
<3> An array of paths to the CA certificates that should be trusted. These paths
|
||||
must be a location within the {es} configuration directory.
|
||||
--
|
||||
|
||||
. If you secured the node's certificate with a password, add the password to
|
||||
your {es} keystore:
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
-----------------------------------------------------------
|
||||
bin/elasticsearch-keystore add xpack.ssl.keystore.secure_password
|
||||
|
||||
bin/elasticsearch-keystore add xpack.ssl.truststore.secure_password
|
||||
-----------------------------------------------------------
|
||||
--
|
||||
|
||||
. Enable TLS on the transport networking layer to ensure that communication
|
||||
between nodes is encrypted. Make the following changes in `elasticsearch.yml`:
|
||||
+
|
||||
--
|
||||
[source, yaml]
|
||||
--------------------------------------------------
|
||||
xpack.security.transport.ssl.enabled: true
|
||||
xpack.security.transport.ssl.verification_mode: certificate <1>
|
||||
--------------------------------------------------
|
||||
<1> If you used the `--dns` or `--ip` options with the `certutil cert` command
|
||||
and you want to enable strict hostname checking, set the verification mode to
|
||||
`full`.
|
||||
--
|
||||
|
||||
. Optional: Enable TLS on the HTTP layer to ensure that communication between HTTP clients
|
||||
and the cluster is encrypted.
|
||||
+
|
||||
--
|
||||
NOTE: Enabling TLS on the HTTP layer is strongly recommended but is not required.
|
||||
If you enable TLS on the HTTP layer in {es}, then you might need to make
|
||||
configuration changes in other parts of the Elastic Stack and in any {es}
|
||||
clients that you use.
|
||||
|
||||
Make the following changes in `elasticsearch.yml`:
|
||||
|
||||
[source, yaml]
|
||||
--------------------------------------------------
|
||||
xpack.security.http.ssl.enabled: true
|
||||
--------------------------------------------------
|
||||
--
|
||||
|
||||
. Restart {es}.
|
||||
+
|
||||
--
|
||||
You must perform a full cluster restart. Nodes which are configured to use TLS
|
||||
cannot communicate with nodes that are using unencrypted networking (and
|
||||
vice-versa). After enabling TLS you must restart all nodes in order to maintain
|
||||
communication across the cluster.
|
||||
--
|
||||
|
||||
NOTE: All TLS-related node settings are considered to be highly sensitive and
|
||||
therefore are not exposed via the
|
||||
{ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more
|
||||
information about any of these settings, see <<security-settings>>.
|
||||
|
||||
For information about configuring other products in the Elastic Stack, see
|
||||
{xpack-ref}/ssl-tls.html[Setting Up TLS on a Cluster].
|
|
@ -0,0 +1,28 @@
|
|||
[role="xpack"]
|
||||
[[configuring-tls]]
|
||||
=== Encrypting Communications in {es}
|
||||
|
||||
{security} enables you to encrypt traffic to, from, and within your {es} cluster.
|
||||
Connections are secured using Transport Layer Security (TLS/SSL).
|
||||
|
||||
WARNING: Clusters that do not have encryption enabled send all data in plain text
|
||||
including passwords and will not be able to install a license that enables {security}.
|
||||
|
||||
To enable encryption, you need to perform the following steps on each node in
|
||||
the cluster:
|
||||
|
||||
. <<installing-xpack-es,Install {xpack} into {es}>>.
|
||||
|
||||
. <<node-certificates, Generate a private key and X.509 certificate>>.
|
||||
|
||||
. <<enable-ssl, Configure each node>> to:
|
||||
.. Identify itself using its signed certificate.
|
||||
.. Required: Enable SSL on the transport layer.
|
||||
.. Recommended: Enable SSL on the HTTP layer.
|
||||
. Restart {es}.
|
||||
|
||||
For more information about encrypting communications across the Elastic Stack,
|
||||
see {xpack-ref}/encrypting-communications.html[Encrypting Communications].
|
||||
|
||||
include::node-certificates.asciidoc[]
|
||||
include::node-config.asciidoc[]
|
|
@ -1,3 +1,4 @@
|
|||
[role="xpack"]
|
||||
[[separating-node-client-traffic]]
|
||||
=== Separating node-to-node and client traffic
|
||||
|
||||
|
@ -35,7 +36,8 @@ transport.profiles.client.bind_host: 1.1.1.1 <2>
|
|||
<1> The bind address for the network that will be used for node-to-node communication
|
||||
<2> The bind address for the network used for client communication
|
||||
|
||||
If separate networks are not available, then <<ip-filtering, IP Filtering>> can
|
||||
If separate networks are not available, then
|
||||
{xpack-ref}/ip-filtering.html[IP Filtering] can
|
||||
be enabled to limit access to the profiles.
|
||||
|
||||
When using SSL for transport, a different set of certificates can also be used
|
||||
|
@ -62,4 +64,5 @@ transport.profiles.client.xpack.security.ssl.client_authentication: none
|
|||
|
||||
This setting keeps certificate authentication active for node-to-node traffic,
|
||||
but removes the requirement to distribute a signed certificate to transport
|
||||
clients. Please see the <<transport-client, Transport Client>> section.
|
||||
clients. For more information, see
|
||||
{xpack-ref}/java-clients.html#transport-client[Configuring the Transport Client to work with a Secured Cluster].
|
||||
|
|
|
@ -1,117 +1,50 @@
|
|||
[[ssl-tls]]
|
||||
=== Setting Up SSL/TLS on a Cluster
|
||||
=== Setting Up TLS on a Cluster
|
||||
|
||||
{security} enables you to encrypt traffic to, from, and within your Elasticsearch
|
||||
{security} enables you to encrypt traffic to, from, and within your {es}
|
||||
cluster. Connections are secured using Transport Layer Security (TLS), which is
|
||||
commonly referred to as "SSL".
|
||||
|
||||
WARNING: Clusters that do not have encryption enabled send all data in plain text
|
||||
including passwords and will not be able to install a license that enables {security}.
|
||||
|
||||
To enable encryption, you need to perform the following steps on each node in
|
||||
the cluster:
|
||||
The following steps describe how to enable encryption across the various
|
||||
components of the Elastic Stack. You must perform each of the steps that are
|
||||
applicable to your cluster.
|
||||
|
||||
. <<installing-node-certificates, Generate a private key and X.509 certificate>>.
|
||||
. Generate a private key and X.509 certificate for each of your {es} nodes. See
|
||||
{ref}/configuring-tls.html#node-certificates[Generating Node Certificates].
|
||||
|
||||
. <<configure-ssl, Configure the node>> to:
|
||||
.. Identify itself using its signed certificate.
|
||||
.. Enable SSL on the transport and HTTP layers.
|
||||
. Configure each node in the cluster to identify itself using its signed
|
||||
certificate and enable TLS on the transport layer. You can also optionally
|
||||
enable TLS on the HTTP layer. See
|
||||
{ref}/configuring-tls.html#enable-ssl[Enabling TLS on {es} Nodes].
|
||||
|
||||
. Restart Elasticsearch.
|
||||
. Configure {monitoring} to use encrypted connections. See <<secure-monitoring>>.
|
||||
|
||||
. Configure {kib} to encrypt communications between the browser and
|
||||
the {kib} server and to connect to {es} via HTTPS. See
|
||||
{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}].
|
||||
|
||||
. Configure Logstash to use TLS encryption. See
|
||||
{logstash-ref}/ls-security.html[Configuring Security in Logstash].
|
||||
|
||||
. Configure Beats to use encrypted connections. See <<beats>>.
|
||||
|
||||
. Configure the Java transport client to use encrypted communications.
|
||||
See <<java-clients>>.
|
||||
|
||||
. Configure {es} for Apache Hadoop to use secured transport. See
|
||||
{hadoop-ref}/security.html[{es} for Apache Hadoop Security].
|
||||
|
||||
//The following sections can be removed after we clean up all links to these anchors.
|
||||
|
||||
[[installing-node-certificates]]
|
||||
==== Generating Node Certificates
|
||||
|
||||
TLS requires X.509 certificates to perform encryption and authentication of the application
|
||||
that is being communicated with. In order for the communication between nodes to be truly
|
||||
secure, the certificates must be validated. The recommended approach for validating
|
||||
certificate authenticity in a Elasticsearch cluster is to trust the certificate authority (CA)
|
||||
that signed the certificate. By doing this, as nodes are added to your cluster they just need
|
||||
to use a certificate signed by the same CA and the node is automatically allowed to join the
|
||||
cluster. Additionally, it is recommended that the certificates contain subject alternative
|
||||
names (SAN) that correspond to the node's IP address and DNS name so that hostname verification
|
||||
can be performed.
|
||||
|
||||
In order to simplify the process of generating certificates for the Elastic Stack, a command
|
||||
line tool, {ref}/certutil.html[`certutil`] has been included with {xpack}. This
|
||||
tool takes care of the generating
|
||||
a CA and signing certificates with the CA. `certutil` can be used interactively or in a silent
|
||||
mode through the use of an input file. The `certutil` tool also supports generation of certificate
|
||||
signing requests (CSR), so that a commercial or organization specific CA can be used to sign
|
||||
the certificates. For example:
|
||||
|
||||
1. Optional: Create a certificate authority by using the `certutil ca` command.
|
||||
2. Generate a certificate for each node by using the `certutil cert` command.
|
||||
+
|
||||
--
|
||||
NOTE: You should secure the output files, since they contain the private keys
|
||||
for your instance.
|
||||
|
||||
--
|
||||
3. Copy the node certificate to the appropriate locations. For each Elastic
|
||||
product that you want to configure, copy the certificates to the relevant
|
||||
configuration directory. For more information, see
|
||||
<<enable-ssl,Configuring security in {es}>>,
|
||||
{kibana-ref}/using-kibana-with-security.html[Configuring security in {kib}], and
|
||||
{logstash-ref}/ls-security.html[Configuring security in Logstash].
|
||||
|
||||
NOTE: If you choose not to use `certutil`, the certificates that you obtain must allow for both
|
||||
`clientAuth` and `serverAuth` if the extended key usage extension is present. The certificates
|
||||
need to be in PEM or PKCS#12 format. Although not required, it is highly recommended that the certificate contain
|
||||
the DNS names and/or IP addresses of the node so that hostname verification can be used.
|
||||
==== Node Certificates
|
||||
|
||||
See {ref}/node-certificates.html[Generating Node Certificates].
|
||||
|
||||
[[enable-ssl]]
|
||||
==== Enabling SSL in the Node Configuration
|
||||
==== Enabling TLS in the Node Configuration
|
||||
|
||||
Once you have the signed certificate, private key, and CA certificate you need to
|
||||
modify the node configuration to enable SSL.
|
||||
|
||||
[[configure-ssl]]
|
||||
To enable SSL, make the following changes in `elasticsearch.yml`:
|
||||
|
||||
. Specify the location of the node's keystore and the password(s) needed to
|
||||
access the node's certificate. For example:
|
||||
+
|
||||
--
|
||||
[source, yaml]
|
||||
--------------------------------------------------
|
||||
xpack.ssl.key: /home/es/config/x-pack/node01.key <1>
|
||||
xpack.ssl.certificate: /home/es/config/x-pack/node01.crt <2>
|
||||
xpack.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] <3>
|
||||
--------------------------------------------------
|
||||
<1> The full path to the node key file. This must be a location within the
|
||||
Elasticsearch configuration directory.
|
||||
<2> The full path to the node certificate. This must be a location within the
|
||||
Elasticsearch configuration directory.
|
||||
<3> An array of paths to the CA certificates that should be trusted. These paths
|
||||
must be a location within the Elasticsearch configuration directory.
|
||||
--
|
||||
|
||||
. Enable SSL on the transport networking layer to ensure that communication
|
||||
between nodes is encrypted:
|
||||
+
|
||||
[source, yaml]
|
||||
--------------------------------------------------
|
||||
xpack.security.transport.ssl.enabled: true
|
||||
--------------------------------------------------
|
||||
+
|
||||
. Enable SSL on the HTTP layer to ensure that communication between HTTP clients
|
||||
and the cluster is encrypted:
|
||||
+
|
||||
[source, yaml]
|
||||
--------------------------------------------------
|
||||
xpack.security.http.ssl.enabled: true
|
||||
--------------------------------------------------
|
||||
+
|
||||
|
||||
. Restart Elasticsearch.
|
||||
+
|
||||
You must perform a full cluster restart. Nodes which are configured to use
|
||||
SSL/TLS cannot communicate with nodes that are using unencrypted networking
|
||||
(and vice-versa). After enabling SSL/TLS you must restart all nodes in order
|
||||
to maintain communication across the cluster.
|
||||
|
||||
NOTE: All SSL related node settings that are considered to be highly sensitive
|
||||
and therefore are not exposed via the
|
||||
{ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API].
|
||||
See {ref}/enable-ssl.html[Enabling TLS on {es} Nodes].
|
||||
|
|
|
@ -9,6 +9,6 @@ include::{asciidoc-dir}/../../shared/settings.asciidoc[]
|
|||
include::license-settings.asciidoc[]
|
||||
include::ml-settings.asciidoc[]
|
||||
include::monitoring-settings.asciidoc[]
|
||||
include::security-settings.asciidoc[]
|
||||
//include::security-settings.asciidoc[]
|
||||
include::notification-settings.asciidoc[]
|
||||
include::sql-settings.asciidoc[]
|
||||
|
|
|
@ -159,95 +159,9 @@ information, see
|
|||
{xpack-ref}/encrypting-communications.html[Encrypting Communications].
|
||||
|
||||
--
|
||||
.. Generate node certificates. For example, you can use the `certutil` command
|
||||
line tool to generate a certificate authority (CA) and signed certificates for
|
||||
your nodes. For more information, see <<certutil>>.
|
||||
.. <<node-certificates,Generate node certificates for each of your {es} nodes>>.
|
||||
|
||||
... Generate a new Certificate Authority (CA) for your {es} cluster:
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
----------------------------------------------------------
|
||||
bin/x-pack/certutil ca
|
||||
----------------------------------------------------------
|
||||
|
||||
You can configure the cluster to trust all nodes that have a certificate that
|
||||
has been signed by this CA.
|
||||
|
||||
The command outputs a single file, with a default name of `elastic-stack-ca.p12`.
|
||||
This file is a PKCS#12 keystore that contains the public certificate for your CA
|
||||
and the private key that is used to sign the certificates for each node.
|
||||
|
||||
The `certutil` command also prompts you for a password to protect the file and
|
||||
key. If you plan to add more nodes to your cluster in the future, retain a copy
|
||||
of the file and remember its password.
|
||||
--
|
||||
|
||||
... Generate a certificate and private key for each node in your cluster:
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
----------------------------------------------------------
|
||||
bin/x-pack/certutil cert --ca elastic-stack-ca.p12
|
||||
----------------------------------------------------------
|
||||
The output is a single PKCS#12 keystore that includes the node certificate, node
|
||||
key, and CA certificate.
|
||||
|
||||
You are also prompted for a password. You can enter a password for your
|
||||
certificate and key, or you can leave the password blank by pressing Enter.
|
||||
|
||||
By default `certutil` generates certificates that have no hostname information
|
||||
in them (that is, they do not have any Subject Alternative Name fields).
|
||||
This means that you can use the certificate for every node in your cluster, but
|
||||
you must turn off hostname verification as shown in the configuration below.
|
||||
|
||||
If you want to use hostname verification within your cluster, run the
|
||||
`certutil cert` command once for each of your nodes and provide the `--name`,
|
||||
`--dns` and `--ip` options.
|
||||
--
|
||||
... Alternatively, if you want to use a commercial or organization-specific CA,
|
||||
you can use the `certutil csr` command to generate certificate signing requests
|
||||
(CSR) for the nodes in your cluster. For more information, see <<certutil>>.
|
||||
|
||||
.. Copy the applicable `.p12` file into a directory within the {es} configuration
|
||||
directory on each node. For example, `/home/es/config/certs`. There is no need
|
||||
to copy the CA file to this directory.
|
||||
|
||||
.. Add the following information to the `elasticsearch.yml` file on each node:
|
||||
+
|
||||
--
|
||||
[source,yaml]
|
||||
-----------------------------------------------------------
|
||||
xpack.ssl.keystore.path: certs/elastic-certificates.p12 <1>
|
||||
xpack.ssl.truststore.path: certs/elastic-certificates.p12 <2>
|
||||
xpack.security.transport.ssl.verification_mode: certificate <3>
|
||||
xpack.security.transport.ssl.enabled: true
|
||||
-----------------------------------------------------------
|
||||
<1> If you created a separate certificate for each node, then you might need to
|
||||
customize this path on each node. If the filename matches the node name, you can
|
||||
use the `certs/${node.name}.p12` format, for example.
|
||||
<2> The `certutil` output includes the CA certificate inside the PKCS#12
|
||||
keystore, therefore the keystore can also be used as the truststore. This name
|
||||
should match the `keystore.path` value.
|
||||
<3> If you used the `--dns` or `--ip` options with the `certutil cert` command
|
||||
and you want to enable strict hostname checking, set the verification mode to
|
||||
`full`.
|
||||
|
||||
For more information about these settings, see
|
||||
{ref}/security-settings.html[Security Settings in {es}].
|
||||
--
|
||||
|
||||
.. If you secured the node's PKCS#12 file with a password, then you must add
|
||||
the password to your {es} keystore. Run the following commands:
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
-----------------------------------------------------------
|
||||
bin/elasticsearch-keystore add xpack.ssl.keystore.secure_password
|
||||
|
||||
bin/elasticsearch-keystore add xpack.ssl.truststore.secure_password
|
||||
-----------------------------------------------------------
|
||||
--
|
||||
.. <<enable-ssl, Enable TLS on each {es} node>>.
|
||||
|
||||
. Start {es}.
|
||||
+
|
||||
|
|
|
@ -11,7 +11,7 @@ easy-to-install package. To access this functionality, you must
|
|||
--
|
||||
|
||||
include::installing-xes.asciidoc[]
|
||||
include::{xes-repo-dir}/settings/configuring-xes.asciidoc[]
|
||||
include::setup-xclient.asciidoc[]
|
||||
include::bootstrap-checks-xes.asciidoc[]
|
||||
include::{xes-repo-dir}/security/configuring-es.asciidoc[]
|
||||
include::setup-xclient.asciidoc[]
|
||||
include::{xes-repo-dir}/settings/configuring-xes.asciidoc[]
|
||||
include::bootstrap-checks-xes.asciidoc[]
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.elasticsearch.xpack.ml.job.persistence.JobStorageDeletionTask;
|
|||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class DeleteJobAction extends Action<DeleteJobAction.Request, DeleteJobAction.Response, DeleteJobAction.RequestBuilder> {
|
||||
|
@ -75,8 +76,8 @@ public class DeleteJobAction extends Action<DeleteJobAction.Request, DeleteJobAc
|
|||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new JobStorageDeletionTask(id, type, action, "delete-job-" + jobId, parentTaskId);
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
|
||||
return new JobStorageDeletionTask(id, type, action, "delete-job-" + jobId, parentTaskId, headers);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -12,16 +12,12 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
|
@ -33,8 +29,6 @@ import java.util.List;
|
|||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
||||
|
||||
public class PostCalendarEventsAction extends Action<PostCalendarEventsAction.Request, PostCalendarEventsAction.Response,
|
||||
PostCalendarEventsAction.RequestBuilder> {
|
||||
public static final PostCalendarEventsAction INSTANCE = new PostCalendarEventsAction();
|
||||
|
@ -58,55 +52,25 @@ public class PostCalendarEventsAction extends Action<PostCalendarEventsAction.Re
|
|||
|
||||
public static class Request extends ActionRequest {
|
||||
|
||||
public static Request parseRequest(String calendarId, BytesReference data, XContentType contentType) throws IOException {
|
||||
List<ScheduledEvent.Builder> events = new ArrayList<>();
|
||||
private static final ObjectParser<List<ScheduledEvent.Builder>, Void> PARSER = new ObjectParser<>(NAME, ArrayList::new);
|
||||
|
||||
XContent xContent = contentType.xContent();
|
||||
int lineNumber = 0;
|
||||
int from = 0;
|
||||
int length = data.length();
|
||||
byte marker = xContent.streamSeparator();
|
||||
while (true) {
|
||||
int nextMarker = findNextMarker(marker, from, data, length);
|
||||
if (nextMarker == -1) {
|
||||
break;
|
||||
}
|
||||
lineNumber++;
|
||||
static {
|
||||
PARSER.declareObjectArray(List::addAll, (p, c) -> ScheduledEvent.PARSER.apply(p, null), ScheduledEvent.RESULTS_FIELD);
|
||||
}
|
||||
|
||||
try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, data.slice(from, nextMarker - from))) {
|
||||
try {
|
||||
ScheduledEvent.Builder event = ScheduledEvent.PARSER.apply(parser, null);
|
||||
events.add(event);
|
||||
} catch (ParsingException pe) {
|
||||
throw ExceptionsHelper.badRequestException("Failed to parse scheduled event on line [" + lineNumber + "]", pe);
|
||||
}
|
||||
public static Request parseRequest(String calendarId, XContentParser parser) throws IOException {
|
||||
List<ScheduledEvent.Builder> events = PARSER.apply(parser, null);
|
||||
|
||||
from = nextMarker + 1;
|
||||
}
|
||||
}
|
||||
|
||||
for (ScheduledEvent.Builder event: events) {
|
||||
for (ScheduledEvent.Builder event : events) {
|
||||
if (event.getCalendarId() != null && event.getCalendarId().equals(calendarId) == false) {
|
||||
throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INCONSISTENT_ID,
|
||||
Calendar.ID.getPreferredName(), event.getCalendarId(), calendarId));
|
||||
}
|
||||
|
||||
// Set the calendar Id in case it is null
|
||||
event.calendarId(calendarId);
|
||||
}
|
||||
return new Request(calendarId, events.stream().map(ScheduledEvent.Builder::build).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
private static int findNextMarker(byte marker, int from, BytesReference data, int length) {
|
||||
for (int i = from; i < length; i++) {
|
||||
if (data.get(i) == marker) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
if (from != length) {
|
||||
throw new IllegalArgumentException("The post calendar events request must be terminated by a newline [\n]");
|
||||
}
|
||||
return -1;
|
||||
return new Request(calendarId, events.stream().map(ScheduledEvent.Builder::build).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
private String calendarId;
|
||||
|
|
|
@ -20,12 +20,9 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
import org.elasticsearch.xpack.watcher.support.Exceptions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
|
|
|
@ -139,6 +139,4 @@ public class PutFilterAction extends Action<PutFilterAction.Request, PutFilterAc
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
|
||||
|
@ -126,6 +125,5 @@ public class UpdateCalendarJobAction extends Action<UpdateCalendarJobAction.Requ
|
|||
super(client, INSTANCE, new Request());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobUpdate;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -111,16 +112,18 @@ public class UpdateProcessAction extends
|
|||
|
||||
private ModelPlotConfig modelPlotConfig;
|
||||
private List<JobUpdate.DetectorUpdate> detectorUpdates;
|
||||
private MlFilter filter;
|
||||
private boolean updateScheduledEvents = false;
|
||||
|
||||
Request() {
|
||||
}
|
||||
|
||||
public Request(String jobId, ModelPlotConfig modelPlotConfig, List<JobUpdate.DetectorUpdate> detectorUpdates,
|
||||
public Request(String jobId, ModelPlotConfig modelPlotConfig, List<JobUpdate.DetectorUpdate> detectorUpdates, MlFilter filter,
|
||||
boolean updateScheduledEvents) {
|
||||
super(jobId);
|
||||
this.modelPlotConfig = modelPlotConfig;
|
||||
this.detectorUpdates = detectorUpdates;
|
||||
this.filter = filter;
|
||||
this.updateScheduledEvents = updateScheduledEvents;
|
||||
}
|
||||
|
||||
|
@ -132,6 +135,10 @@ public class UpdateProcessAction extends
|
|||
return detectorUpdates;
|
||||
}
|
||||
|
||||
public MlFilter getFilter() {
|
||||
return filter;
|
||||
}
|
||||
|
||||
public boolean isUpdateScheduledEvents() {
|
||||
return updateScheduledEvents;
|
||||
}
|
||||
|
@ -144,6 +151,7 @@ public class UpdateProcessAction extends
|
|||
detectorUpdates = in.readList(JobUpdate.DetectorUpdate::new);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
filter = in.readOptionalWriteable(MlFilter::new);
|
||||
updateScheduledEvents = in.readBoolean();
|
||||
}
|
||||
}
|
||||
|
@ -158,13 +166,14 @@ public class UpdateProcessAction extends
|
|||
out.writeList(detectorUpdates);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
out.writeOptionalWriteable(filter);
|
||||
out.writeBoolean(updateScheduledEvents);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(getJobId(), modelPlotConfig, detectorUpdates, updateScheduledEvents);
|
||||
return Objects.hash(getJobId(), modelPlotConfig, detectorUpdates, filter, updateScheduledEvents);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -180,8 +189,8 @@ public class UpdateProcessAction extends
|
|||
return Objects.equals(getJobId(), other.getJobId()) &&
|
||||
Objects.equals(modelPlotConfig, other.modelPlotConfig) &&
|
||||
Objects.equals(detectorUpdates, other.detectorUpdates) &&
|
||||
Objects.equals(filter, other.filter) &&
|
||||
Objects.equals(updateScheduledEvents, other.updateScheduledEvents);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import java.util.Objects;
|
|||
|
||||
public class JobUpdate implements Writeable, ToXContentObject {
|
||||
public static final ParseField DETECTORS = new ParseField("detectors");
|
||||
public static final ParseField UPDATE_SCHEDULED_EVENTS = new ParseField("update_scheduled_events");
|
||||
|
||||
public static final ConstructingObjectParser<Builder, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"job_update", args -> new Builder((String) args[0]));
|
||||
|
@ -50,7 +49,6 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT);
|
||||
PARSER.declareString(Builder::setModelSnapshotId, Job.MODEL_SNAPSHOT_ID);
|
||||
PARSER.declareLong(Builder::setEstablishedModelMemory, Job.ESTABLISHED_MODEL_MEMORY);
|
||||
PARSER.declareBoolean(Builder::setUpdateScheduledEvents, UPDATE_SCHEDULED_EVENTS);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,7 +73,6 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
private final Map<String, Object> customSettings;
|
||||
private final String modelSnapshotId;
|
||||
private final Long establishedModelMemory;
|
||||
private final boolean updateScheduledEvents;
|
||||
|
||||
private JobUpdate(String jobId, @Nullable List<String> groups, @Nullable String description,
|
||||
@Nullable List<DetectorUpdate> detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig,
|
||||
|
@ -83,7 +80,7 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
@Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays,
|
||||
@Nullable Long modelSnapshotRetentionDays, @Nullable List<String> categorisationFilters,
|
||||
@Nullable Map<String, Object> customSettings, @Nullable String modelSnapshotId,
|
||||
@Nullable Long establishedModelMemory, boolean updateScheduledEvents) {
|
||||
@Nullable Long establishedModelMemory) {
|
||||
this.jobId = jobId;
|
||||
this.groups = groups;
|
||||
this.description = description;
|
||||
|
@ -98,7 +95,6 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
this.customSettings = customSettings;
|
||||
this.modelSnapshotId = modelSnapshotId;
|
||||
this.establishedModelMemory = establishedModelMemory;
|
||||
this.updateScheduledEvents = updateScheduledEvents;
|
||||
}
|
||||
|
||||
public JobUpdate(StreamInput in) throws IOException {
|
||||
|
@ -133,12 +129,6 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
} else {
|
||||
establishedModelMemory = null;
|
||||
}
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
updateScheduledEvents = in.readBoolean();
|
||||
} else {
|
||||
updateScheduledEvents = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -168,10 +158,6 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
out.writeOptionalLong(establishedModelMemory);
|
||||
}
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
out.writeBoolean(updateScheduledEvents);
|
||||
}
|
||||
}
|
||||
|
||||
public String getJobId() {
|
||||
|
@ -234,10 +220,6 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
return modelPlotConfig != null || detectorUpdates != null;
|
||||
}
|
||||
|
||||
public boolean isUpdateScheduledEvents() {
|
||||
return updateScheduledEvents;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
@ -281,7 +263,6 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
if (establishedModelMemory != null) {
|
||||
builder.field(Job.ESTABLISHED_MODEL_MEMORY.getPreferredName(), establishedModelMemory);
|
||||
}
|
||||
builder.field(UPDATE_SCHEDULED_EVENTS.getPreferredName(), updateScheduledEvents);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -418,15 +399,14 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
&& Objects.equals(this.categorizationFilters, that.categorizationFilters)
|
||||
&& Objects.equals(this.customSettings, that.customSettings)
|
||||
&& Objects.equals(this.modelSnapshotId, that.modelSnapshotId)
|
||||
&& Objects.equals(this.establishedModelMemory, that.establishedModelMemory)
|
||||
&& Objects.equals(this.updateScheduledEvents, that.updateScheduledEvents);
|
||||
&& Objects.equals(this.establishedModelMemory, that.establishedModelMemory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, renormalizationWindowDays,
|
||||
backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, categorizationFilters, customSettings,
|
||||
modelSnapshotId, establishedModelMemory, updateScheduledEvents);
|
||||
modelSnapshotId, establishedModelMemory);
|
||||
}
|
||||
|
||||
public static class DetectorUpdate implements Writeable, ToXContentObject {
|
||||
|
@ -536,7 +516,6 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
private Map<String, Object> customSettings;
|
||||
private String modelSnapshotId;
|
||||
private Long establishedModelMemory;
|
||||
private boolean updateScheduledEvents = false;
|
||||
|
||||
public Builder(String jobId) {
|
||||
this.jobId = jobId;
|
||||
|
@ -612,15 +591,10 @@ public class JobUpdate implements Writeable, ToXContentObject {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setUpdateScheduledEvents(boolean updateScheduledEvents) {
|
||||
this.updateScheduledEvents = updateScheduledEvents;
|
||||
return this;
|
||||
}
|
||||
|
||||
public JobUpdate build() {
|
||||
return new JobUpdate(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, backgroundPersistInterval,
|
||||
renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, categorizationFilters, customSettings,
|
||||
modelSnapshotId, establishedModelMemory, updateScheduledEvents);
|
||||
modelSnapshotId, establishedModelMemory);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,8 +177,8 @@ public class RuleCondition implements ToXContentObject, Writeable {
|
|||
return Objects.hash(type, fieldName, fieldValue, condition, filterId);
|
||||
}
|
||||
|
||||
public static RuleCondition createCategorical(String fieldName, String valueFilter) {
|
||||
return new RuleCondition(RuleConditionType.CATEGORICAL, fieldName, null, null, valueFilter);
|
||||
public static RuleCondition createCategorical(String fieldName, String filterId) {
|
||||
return new RuleCondition(RuleConditionType.CATEGORICAL, fieldName, null, null, filterId);
|
||||
}
|
||||
|
||||
public static RuleCondition createNumerical(RuleConditionType conditionType, String fieldName, String fieldValue,
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.state.Quantiles;
|
|||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
|
@ -53,8 +54,8 @@ import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
|||
public class JobStorageDeletionTask extends Task {
|
||||
private final Logger logger;
|
||||
|
||||
public JobStorageDeletionTask(long id, String type, String action, String description, TaskId parentTask) {
|
||||
super(id, type, action, description, parentTask);
|
||||
public JobStorageDeletionTask(long id, String type, String action, String description, TaskId parentTask, Map<String, String> headers) {
|
||||
super(id, type, action, description, parentTask, headers);
|
||||
this.logger = Loggers.getLogger(getClass());
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.tasks.TaskCancelledException;
|
|||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
|
@ -35,8 +36,9 @@ public class AllocatedPersistentTask extends CancellableTask {
|
|||
private volatile TaskManager taskManager;
|
||||
|
||||
|
||||
public AllocatedPersistentTask(long id, String type, String action, String description, TaskId parentTask) {
|
||||
super(id, type, action, description, parentTask);
|
||||
public AllocatedPersistentTask(long id, String type, String action, String description, TaskId parentTask,
|
||||
Map<String, String> headers) {
|
||||
super(id, type, action, description, parentTask, headers);
|
||||
this.state = new AtomicReference<>(State.STARTED);
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ import org.elasticsearch.tasks.TaskId;
|
|||
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.Assignment;
|
||||
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.PersistentTask;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
|
@ -88,8 +89,8 @@ public abstract class PersistentTasksExecutor<Params extends PersistentTaskParam
|
|||
* Creates a AllocatedPersistentTask for communicating with task manager
|
||||
*/
|
||||
protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId,
|
||||
PersistentTask<Params> taskInProgress) {
|
||||
return new AllocatedPersistentTask(id, type, action, getDescription(taskInProgress), parentTaskId);
|
||||
PersistentTask<Params> taskInProgress, Map<String, String> headers) {
|
||||
return new AllocatedPersistentTask(id, type, action, getDescription(taskInProgress), parentTaskId, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -147,8 +147,8 @@ public class PersistentTasksNodeService extends AbstractComponent implements Clu
|
|||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return executor.createTask(id, type, action, parentTaskId, taskInProgress);
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
|
||||
return executor.createTask(id, type, action, parentTaskId, taskInProgress, headers);
|
||||
}
|
||||
};
|
||||
AllocatedPersistentTask task = (AllocatedPersistentTask) taskManager.register("persistent", taskInProgress.getTaskName() + "[c]",
|
||||
|
|
|
@ -20,6 +20,7 @@ import org.elasticsearch.tasks.Task;
|
|||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
@ -138,8 +139,8 @@ public class IndexUpgradeAction extends Action<IndexUpgradeAction.Request, BulkB
|
|||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new CancellableTask(id, type, action, getDescription(), parentTaskId) {
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
|
||||
return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) {
|
||||
@Override
|
||||
public boolean shouldCancelChildrenOnCancellation() {
|
||||
return true;
|
||||
|
|
|
@ -7,9 +7,6 @@ package org.elasticsearch.xpack.ml.action;
|
|||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.get.GetAction;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
|
@ -26,7 +23,8 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
import org.elasticsearch.xpack.ml.job.JobManager;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
|
||||
|
||||
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
||||
|
@ -34,15 +32,19 @@ import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
|||
public class TransportDeleteCalendarAction extends HandledTransportAction<DeleteCalendarAction.Request, DeleteCalendarAction.Response> {
|
||||
|
||||
private final Client client;
|
||||
private final JobManager jobManager;
|
||||
private final JobProvider jobProvider;
|
||||
|
||||
@Inject
|
||||
public TransportDeleteCalendarAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Client client) {
|
||||
Client client, JobManager jobManager, JobProvider jobProvider) {
|
||||
super(settings, DeleteCalendarAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, DeleteCalendarAction.Request::new);
|
||||
this.client = client;
|
||||
this.jobManager = jobManager;
|
||||
this.jobProvider = jobProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,29 +52,25 @@ public class TransportDeleteCalendarAction extends HandledTransportAction<Delete
|
|||
|
||||
final String calendarId = request.getCalendarId();
|
||||
|
||||
GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, Calendar.documentId(calendarId));
|
||||
executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse getResponse) {
|
||||
if (getResponse.isExists() == false) {
|
||||
listener.onFailure(new ResourceNotFoundException("Could not delete calendar [" + calendarId
|
||||
+ "] because it does not exist"));
|
||||
return;
|
||||
}
|
||||
|
||||
// Delete calendar and events
|
||||
DeleteByQueryRequest dbqRequest = buildDeleteByQuery(calendarId);
|
||||
executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, dbqRequest, ActionListener.wrap(
|
||||
response -> listener.onResponse(new DeleteCalendarAction.Response(true)),
|
||||
listener::onFailure));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(ExceptionsHelper.serverError("Could not delete calendar [" + calendarId + "]", e));
|
||||
}
|
||||
}
|
||||
ActionListener<Calendar> calendarListener = ActionListener.wrap(
|
||||
calendar -> {
|
||||
// Delete calendar and events
|
||||
DeleteByQueryRequest dbqRequest = buildDeleteByQuery(calendarId);
|
||||
executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, dbqRequest, ActionListener.wrap(
|
||||
response -> {
|
||||
if (response.getDeleted() == 0) {
|
||||
listener.onFailure(new ResourceNotFoundException("No calendar with id [" + calendarId + "]"));
|
||||
return;
|
||||
}
|
||||
jobManager.updateProcessOnCalendarChanged(calendar.getJobIds());
|
||||
listener.onResponse(new DeleteCalendarAction.Response(true));
|
||||
},
|
||||
listener::onFailure));
|
||||
},
|
||||
listener::onFailure
|
||||
);
|
||||
|
||||
jobProvider.calendar(calendarId, calendarListener);
|
||||
}
|
||||
|
||||
private DeleteByQueryRequest buildDeleteByQuery(String calendarId) {
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ml.action;
|
||||
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.delete.DeleteAction;
|
||||
|
@ -26,6 +25,8 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.job.JobManager;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.util.Map;
|
||||
|
@ -37,15 +38,19 @@ public class TransportDeleteCalendarEventAction extends HandledTransportAction<D
|
|||
DeleteCalendarEventAction.Response> {
|
||||
|
||||
private final Client client;
|
||||
private final JobProvider jobProvider;
|
||||
private final JobManager jobManager;
|
||||
|
||||
@Inject
|
||||
public TransportDeleteCalendarEventAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Client client) {
|
||||
Client client, JobProvider jobProvider, JobManager jobManager) {
|
||||
super(settings, DeleteCalendarEventAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, DeleteCalendarEventAction.Request::new);
|
||||
this.client = client;
|
||||
this.jobProvider = jobProvider;
|
||||
this.jobManager = jobManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -57,27 +62,34 @@ public class TransportDeleteCalendarEventAction extends HandledTransportAction<D
|
|||
@Override
|
||||
public void onResponse(GetResponse getResponse) {
|
||||
if (getResponse.isExists() == false) {
|
||||
listener.onFailure(new ResourceNotFoundException("Missing event [" + eventId + "]"));
|
||||
listener.onFailure(new ResourceNotFoundException("No event with id [" + eventId + "]"));
|
||||
return;
|
||||
}
|
||||
|
||||
Map<String, Object> source = getResponse.getSourceAsMap();
|
||||
String calendarId = (String) source.get(Calendar.ID.getPreferredName());
|
||||
if (calendarId == null) {
|
||||
listener.onFailure(new ElasticsearchStatusException("Event [" + eventId + "] does not have a valid "
|
||||
+ Calendar.ID.getPreferredName(), RestStatus.BAD_REQUEST));
|
||||
listener.onFailure(ExceptionsHelper.badRequestException("Event [" + eventId + "] does not have a valid "
|
||||
+ Calendar.ID.getPreferredName()));
|
||||
return;
|
||||
}
|
||||
|
||||
if (calendarId.equals(request.getCalendarId()) == false) {
|
||||
listener.onFailure(new ElasticsearchStatusException(
|
||||
listener.onFailure(ExceptionsHelper.badRequestException(
|
||||
"Event [" + eventId + "] has " + Calendar.ID.getPreferredName() +
|
||||
" [" + calendarId + "] which does not match the request " + Calendar.ID.getPreferredName() +
|
||||
" [" + request.getCalendarId() + "]", RestStatus.BAD_REQUEST));
|
||||
" [" + request.getCalendarId() + "]"));
|
||||
return;
|
||||
}
|
||||
|
||||
deleteEvent(eventId, listener);
|
||||
ActionListener<Calendar> calendarListener = ActionListener.wrap(
|
||||
calendar -> {
|
||||
deleteEvent(eventId, calendar, listener);
|
||||
},
|
||||
listener::onFailure
|
||||
);
|
||||
|
||||
jobProvider.calendar(calendarId, calendarListener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -87,7 +99,7 @@ public class TransportDeleteCalendarEventAction extends HandledTransportAction<D
|
|||
});
|
||||
}
|
||||
|
||||
private void deleteEvent(String eventId, ActionListener<DeleteCalendarEventAction.Response> listener) {
|
||||
private void deleteEvent(String eventId, Calendar calendar, ActionListener<DeleteCalendarEventAction.Response> listener) {
|
||||
DeleteRequest deleteRequest = new DeleteRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, eventId);
|
||||
deleteRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
|
||||
|
@ -97,9 +109,9 @@ public class TransportDeleteCalendarEventAction extends HandledTransportAction<D
|
|||
public void onResponse(DeleteResponse response) {
|
||||
|
||||
if (response.status() == RestStatus.NOT_FOUND) {
|
||||
listener.onFailure(new ResourceNotFoundException("Could not delete event [" + eventId
|
||||
+ "] because it does not exist"));
|
||||
listener.onFailure(new ResourceNotFoundException("No event with id [" + eventId + "]"));
|
||||
} else {
|
||||
jobManager.updateProcessOnCalendarChanged(calendar.getJobIds());
|
||||
listener.onResponse(new DeleteCalendarEventAction.Response(true));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -591,8 +591,9 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
|||
|
||||
@Override
|
||||
protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId,
|
||||
PersistentTasksCustomMetaData.PersistentTask<OpenJobAction.JobParams> persistentTask) {
|
||||
return new JobTask(persistentTask.getParams().getJobId(), id, type, action, parentTaskId);
|
||||
PersistentTasksCustomMetaData.PersistentTask<OpenJobAction.JobParams> persistentTask,
|
||||
Map<String, String> headers) {
|
||||
return new JobTask(persistentTask.getParams().getJobId(), id, type, action, parentTaskId, headers);
|
||||
}
|
||||
|
||||
void setMaxConcurrentJobAllocations(int maxConcurrentJobAllocations) {
|
||||
|
@ -613,8 +614,8 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
|||
private final String jobId;
|
||||
private volatile AutodetectProcessManager autodetectProcessManager;
|
||||
|
||||
JobTask(String jobId, long id, String type, String action, TaskId parentTask) {
|
||||
super(id, type, action, "job-" + jobId, parentTask);
|
||||
JobTask(String jobId, long id, String type, String action, TaskId parentTask, Map<String, String> headers) {
|
||||
super(id, type, action, "job-" + jobId, parentTask, headers);
|
||||
this.jobId = jobId;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,9 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
import org.elasticsearch.xpack.ml.job.JobManager;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
|
@ -39,16 +41,18 @@ public class TransportPostCalendarEventsAction extends HandledTransportAction<Po
|
|||
|
||||
private final Client client;
|
||||
private final JobProvider jobProvider;
|
||||
private final JobManager jobManager;
|
||||
|
||||
@Inject
|
||||
public TransportPostCalendarEventsAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Client client, JobProvider jobProvider) {
|
||||
Client client, JobProvider jobProvider, JobManager jobManager) {
|
||||
super(settings, PostCalendarEventsAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, PostCalendarEventsAction.Request::new);
|
||||
this.client = client;
|
||||
this.jobProvider = jobProvider;
|
||||
this.jobManager = jobManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -56,8 +60,8 @@ public class TransportPostCalendarEventsAction extends HandledTransportAction<Po
|
|||
ActionListener<PostCalendarEventsAction.Response> listener) {
|
||||
List<ScheduledEvent> events = request.getScheduledEvents();
|
||||
|
||||
ActionListener<Boolean> calendarExistsListener = ActionListener.wrap(
|
||||
r -> {
|
||||
ActionListener<Calendar> calendarListener = ActionListener.wrap(
|
||||
calendar -> {
|
||||
BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
|
||||
|
||||
for (ScheduledEvent event: events) {
|
||||
|
@ -78,6 +82,7 @@ public class TransportPostCalendarEventsAction extends HandledTransportAction<Po
|
|||
new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse response) {
|
||||
jobManager.updateProcessOnCalendarChanged(calendar.getJobIds());
|
||||
listener.onResponse(new PostCalendarEventsAction.Response(events));
|
||||
}
|
||||
|
||||
|
@ -90,13 +95,6 @@ public class TransportPostCalendarEventsAction extends HandledTransportAction<Po
|
|||
},
|
||||
listener::onFailure);
|
||||
|
||||
checkCalendarExists(request.getCalendarId(), calendarExistsListener);
|
||||
}
|
||||
|
||||
private void checkCalendarExists(String calendarId, ActionListener<Boolean> listener) {
|
||||
jobProvider.calendar(calendarId, ActionListener.wrap(
|
||||
c -> listener.onResponse(true),
|
||||
listener::onFailure
|
||||
));
|
||||
jobProvider.calendar(request.getCalendarId(), calendarListener);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.xpack.ml.MLMetadataField;
|
|||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.job.JobManager;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -42,16 +43,18 @@ public class TransportPutCalendarAction extends HandledTransportAction<PutCalend
|
|||
|
||||
private final Client client;
|
||||
private final ClusterService clusterService;
|
||||
private final JobManager jobManager;
|
||||
|
||||
@Inject
|
||||
public TransportPutCalendarAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Client client, ClusterService clusterService) {
|
||||
Client client, ClusterService clusterService, JobManager jobManager) {
|
||||
super(settings, PutCalendarAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, PutCalendarAction.Request::new);
|
||||
this.client = client;
|
||||
this.clusterService = clusterService;
|
||||
this.jobManager = jobManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -78,6 +81,7 @@ public class TransportPutCalendarAction extends HandledTransportAction<PutCalend
|
|||
new ActionListener<IndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndexResponse indexResponse) {
|
||||
jobManager.updateProcessOnCalendarChanged(calendar.getJobIds());
|
||||
listener.onResponse(new PutCalendarAction.Response(calendar));
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.job.JobManager;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
|
@ -35,15 +36,17 @@ import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
|||
public class TransportPutFilterAction extends HandledTransportAction<PutFilterAction.Request, PutFilterAction.Response> {
|
||||
|
||||
private final Client client;
|
||||
private final JobManager jobManager;
|
||||
|
||||
@Inject
|
||||
public TransportPutFilterAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Client client) {
|
||||
Client client, JobManager jobManager) {
|
||||
super(settings, PutFilterAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, PutFilterAction.Request::new);
|
||||
this.client = client;
|
||||
this.jobManager = jobManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -64,6 +67,7 @@ public class TransportPutFilterAction extends HandledTransportAction<PutFilterAc
|
|||
new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse indexResponse) {
|
||||
jobManager.updateProcessOnFilterChanged(filter);
|
||||
listener.onResponse(new PutFilterAction.Response());
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
|
|||
import org.elasticsearch.xpack.persistent.PersistentTasksExecutor;
|
||||
import org.elasticsearch.xpack.persistent.PersistentTasksService;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/* This class extends from TransportMasterNodeAction for cluster state observing purposes.
|
||||
|
@ -222,8 +223,9 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction<Star
|
|||
@Override
|
||||
protected AllocatedPersistentTask createTask(
|
||||
long id, String type, String action, TaskId parentTaskId,
|
||||
PersistentTasksCustomMetaData.PersistentTask<StartDatafeedAction.DatafeedParams> persistentTask) {
|
||||
return new DatafeedTask(id, type, action, parentTaskId, persistentTask.getParams());
|
||||
PersistentTasksCustomMetaData.PersistentTask<StartDatafeedAction.DatafeedParams> persistentTask,
|
||||
Map<String, String> headers) {
|
||||
return new DatafeedTask(id, type, action, parentTaskId, persistentTask.getParams(), headers);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -235,8 +237,9 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction<Star
|
|||
/* only pck protected for testing */
|
||||
volatile DatafeedManager datafeedManager;
|
||||
|
||||
DatafeedTask(long id, String type, String action, TaskId parentTaskId, StartDatafeedAction.DatafeedParams params) {
|
||||
super(id, type, action, "datafeed-" + params.getDatafeedId(), parentTaskId);
|
||||
DatafeedTask(long id, String type, String action, TaskId parentTaskId, StartDatafeedAction.DatafeedParams params,
|
||||
Map<String, String> headers) {
|
||||
super(id, type, action, "datafeed-" + params.getDatafeedId(), parentTaskId, headers);
|
||||
this.datafeedId = params.getDatafeedId();
|
||||
this.startTime = params.getStartTime();
|
||||
this.endTime = params.getEndTime();
|
||||
|
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MLMetadataField;
|
||||
import org.elasticsearch.xpack.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.ml.job.JobManager;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
|
@ -24,16 +25,18 @@ public class TransportUpdateCalendarJobAction extends HandledTransportAction<Upd
|
|||
|
||||
private final ClusterService clusterService;
|
||||
private final JobProvider jobProvider;
|
||||
private final JobManager jobManager;
|
||||
|
||||
@Inject
|
||||
public TransportUpdateCalendarJobAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterService clusterService, JobProvider jobProvider) {
|
||||
ClusterService clusterService, JobProvider jobProvider, JobManager jobManager) {
|
||||
super(settings, UpdateCalendarJobAction.NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, UpdateCalendarJobAction.Request::new);
|
||||
this.clusterService = clusterService;
|
||||
this.jobProvider = jobProvider;
|
||||
this.jobManager = jobManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -55,6 +58,9 @@ public class TransportUpdateCalendarJobAction extends HandledTransportAction<Upd
|
|||
}
|
||||
|
||||
jobProvider.updateCalendar(request.getCalendarId(), request.getJobIdsToAdd(), request.getJobIdsToRemove(),
|
||||
c -> listener.onResponse(new PutCalendarAction.Response(c)), listener::onFailure);
|
||||
c -> {
|
||||
jobManager.updateProcessOnCalendarChanged(c.getJobIds());
|
||||
listener.onResponse(new PutCalendarAction.Response(c));
|
||||
}, listener::onFailure);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,10 +41,15 @@ public class TransportUpdateProcessAction extends TransportJobTaskAction<UpdateP
|
|||
@Override
|
||||
protected void taskOperation(UpdateProcessAction.Request request, TransportOpenJobAction.JobTask task,
|
||||
ActionListener<UpdateProcessAction.Response> listener) {
|
||||
UpdateParams updateParams = UpdateParams.builder(request.getJobId())
|
||||
.modelPlotConfig(request.getModelPlotConfig())
|
||||
.detectorUpdates(request.getDetectorUpdates())
|
||||
.filter(request.getFilter())
|
||||
.updateScheduledEvents(request.isUpdateScheduledEvents())
|
||||
.build();
|
||||
|
||||
try {
|
||||
processManager.writeUpdateProcessMessage(task,
|
||||
new UpdateParams(request.getModelPlotConfig(),
|
||||
request.getDetectorUpdates(), request.isUpdateScheduledEvents()),
|
||||
processManager.writeUpdateProcessMessage(task, updateParams,
|
||||
e -> {
|
||||
if (e == null) {
|
||||
listener.onResponse(new UpdateProcessAction.Response());
|
||||
|
|
|
@ -37,10 +37,12 @@ import org.elasticsearch.xpack.ml.job.config.DataDescription;
|
|||
import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobState;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobUpdate;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobStorageDeletionTask;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.UpdateParams;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSizeStats;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot;
|
||||
import org.elasticsearch.xpack.ml.notifications.Auditor;
|
||||
|
@ -280,18 +282,16 @@ public class JobManager extends AbstractComponent {
|
|||
// nothing to do
|
||||
return currentState;
|
||||
}
|
||||
changeWasRequired = true;
|
||||
// No change is required if the fields that the C++ uses aren't being updated
|
||||
changeWasRequired = jobUpdate.isAutodetectProcessUpdate();
|
||||
return updateClusterState(updatedJob, true, currentState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
if (changeWasRequired) {
|
||||
PersistentTasksCustomMetaData persistentTasks =
|
||||
newState.metaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
JobState jobState = MlMetadata.getJobState(jobId, persistentTasks);
|
||||
if (jobState == JobState.OPENED) {
|
||||
updateJobProcessNotifier.submitJobUpdate(jobUpdate);
|
||||
if (isJobOpen(newState, jobId)) {
|
||||
updateJobProcessNotifier.submitJobUpdate(UpdateParams.fromJobUpdate(jobUpdate));
|
||||
}
|
||||
} else {
|
||||
logger.debug("[{}] Ignored job update with no changes: {}", () -> jobId, () -> {
|
||||
|
@ -308,12 +308,40 @@ public class JobManager extends AbstractComponent {
|
|||
});
|
||||
}
|
||||
|
||||
private boolean isJobOpen(ClusterState clusterState, String jobId) {
|
||||
PersistentTasksCustomMetaData persistentTasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
JobState jobState = MlMetadata.getJobState(jobId, persistentTasks);
|
||||
return jobState == JobState.OPENED;
|
||||
}
|
||||
|
||||
ClusterState updateClusterState(Job job, boolean overwrite, ClusterState currentState) {
|
||||
MlMetadata.Builder builder = createMlMetadataBuilder(currentState);
|
||||
builder.putJob(job, overwrite);
|
||||
return buildNewClusterState(currentState, builder);
|
||||
}
|
||||
|
||||
public void updateProcessOnFilterChanged(MlFilter filter) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
QueryPage<Job> jobs = expandJobs("*", true, clusterService.state());
|
||||
for (Job job : jobs.results()) {
|
||||
if (isJobOpen(clusterState, job.getId())) {
|
||||
Set<String> jobFilters = job.getAnalysisConfig().extractReferencedFilters();
|
||||
if (jobFilters.contains(filter.getId())) {
|
||||
updateJobProcessNotifier.submitJobUpdate(UpdateParams.filterUpdate(job.getId(), filter));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void updateProcessOnCalendarChanged(List<String> calendarJobIds) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
for (String jobId : calendarJobIds) {
|
||||
if (isJobOpen(clusterState, jobId)) {
|
||||
updateJobProcessNotifier.submitJobUpdate(UpdateParams.scheduledEventsUpdate(jobId));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void deleteJob(DeleteJobAction.Request request, JobStorageDeletionTask task,
|
||||
ActionListener<DeleteJobAction.Response> actionListener) {
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.ml.action.UpdateProcessAction;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobUpdate;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.UpdateParams;
|
||||
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
|
@ -30,7 +30,7 @@ public class UpdateJobProcessNotifier extends AbstractComponent implements Local
|
|||
|
||||
private final Client client;
|
||||
private final ThreadPool threadPool;
|
||||
private final LinkedBlockingQueue<JobUpdate> orderedJobUpdates = new LinkedBlockingQueue<>(1000);
|
||||
private final LinkedBlockingQueue<UpdateParams> orderedJobUpdates = new LinkedBlockingQueue<>(1000);
|
||||
|
||||
private volatile ThreadPool.Cancellable cancellable;
|
||||
|
||||
|
@ -47,8 +47,8 @@ public class UpdateJobProcessNotifier extends AbstractComponent implements Local
|
|||
});
|
||||
}
|
||||
|
||||
boolean submitJobUpdate(JobUpdate jobUpdate) {
|
||||
return orderedJobUpdates.offer(jobUpdate);
|
||||
boolean submitJobUpdate(UpdateParams updateParams) {
|
||||
return orderedJobUpdates.offer(updateParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -82,24 +82,17 @@ public class UpdateJobProcessNotifier extends AbstractComponent implements Local
|
|||
|
||||
private void processNextUpdate() {
|
||||
try {
|
||||
JobUpdate jobUpdate = orderedJobUpdates.poll();
|
||||
if (jobUpdate != null) {
|
||||
executeRemoteJobIfNecessary(jobUpdate);
|
||||
UpdateParams updateParams = orderedJobUpdates.poll();
|
||||
if (updateParams != null) {
|
||||
executeRemoteJob(updateParams);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Unable while processing next job update", e);
|
||||
}
|
||||
}
|
||||
|
||||
void executeRemoteJobIfNecessary(JobUpdate update) {
|
||||
// Do nothing if the fields that the C++ needs aren't being updated
|
||||
if (update.isAutodetectProcessUpdate()) {
|
||||
executeRemoteJob(update);
|
||||
}
|
||||
}
|
||||
|
||||
void executeRemoteJob(JobUpdate update) {
|
||||
Request request = new Request(update.getJobId(), update.getModelPlotConfig(), update.getDetectorUpdates(),
|
||||
void executeRemoteJob(UpdateParams update) {
|
||||
Request request = new Request(update.getJobId(), update.getModelPlotConfig(), update.getDetectorUpdates(), update.getFilter(),
|
||||
update.isUpdateScheduledEvents());
|
||||
|
||||
executeAsyncWithOrigin(client, ML_ORIGIN, UpdateProcessAction.INSTANCE, request,
|
||||
|
@ -126,5 +119,4 @@ public class UpdateJobProcessNotifier extends AbstractComponent implements Local
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzer;
|
|||
import org.elasticsearch.xpack.ml.job.config.AnalysisConfig;
|
||||
import org.elasticsearch.xpack.ml.job.config.CategorizationAnalyzerConfig;
|
||||
import org.elasticsearch.xpack.ml.job.config.DataDescription;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobUpdate;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
|
@ -45,7 +44,6 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.time.Duration;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
@ -58,7 +56,6 @@ import java.util.concurrent.TimeoutException;
|
|||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class AutodetectCommunicator implements Closeable {
|
||||
|
||||
|
@ -215,35 +212,23 @@ public class AutodetectCommunicator implements Closeable {
|
|||
autodetectProcess.writeUpdateModelPlotMessage(updateParams.getModelPlotConfig());
|
||||
}
|
||||
|
||||
List<DetectionRule> eventsAsRules = Collections.emptyList();
|
||||
if (scheduledEvents.isEmpty() == false) {
|
||||
eventsAsRules = scheduledEvents.stream()
|
||||
.map(e -> e.toDetectionRule(job.getAnalysisConfig().getBucketSpan()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
// All detection rules for a detector must be updated together as the update
|
||||
// wipes any previously set rules.
|
||||
// Build a single list of rules for events and detection rules.
|
||||
List<List<DetectionRule>> rules = new ArrayList<>(job.getAnalysisConfig().getDetectors().size());
|
||||
for (int i = 0; i < job.getAnalysisConfig().getDetectors().size(); i++) {
|
||||
List<DetectionRule> detectorRules = new ArrayList<>(eventsAsRules);
|
||||
rules.add(detectorRules);
|
||||
// Filters have to be written before detectors
|
||||
if (updateParams.getFilter() != null) {
|
||||
autodetectProcess.writeUpdateFiltersMessage(Collections.singletonList(updateParams.getFilter()));
|
||||
}
|
||||
|
||||
// Add detector rules
|
||||
if (updateParams.getDetectorUpdates() != null) {
|
||||
for (JobUpdate.DetectorUpdate update : updateParams.getDetectorUpdates()) {
|
||||
if (update.getRules() != null) {
|
||||
rules.get(update.getDetectorIndex()).addAll(update.getRules());
|
||||
autodetectProcess.writeUpdateDetectorRulesMessage(update.getDetectorIndex(), update.getRules());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < job.getAnalysisConfig().getDetectors().size(); i++) {
|
||||
if (!rules.get(i).isEmpty()) {
|
||||
autodetectProcess.writeUpdateDetectorRulesMessage(i, rules.get(i));
|
||||
}
|
||||
// Add scheduled events; null means there's no update but an empty list means we should clear any events in the process
|
||||
if (scheduledEvents != null) {
|
||||
autodetectProcess.writeUpdateScheduledEventsMessage(scheduledEvents, job.getAnalysisConfig().getBucketSpan());
|
||||
}
|
||||
|
||||
return null;
|
||||
|
|
|
@ -5,7 +5,10 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams;
|
||||
|
@ -74,6 +77,23 @@ public interface AutodetectProcess extends Closeable {
|
|||
void writeUpdateDetectorRulesMessage(int detectorIndex, List<DetectionRule> rules)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Write message to update the filters
|
||||
*
|
||||
* @param filters the filters to update
|
||||
* @throws IOException If the write fails
|
||||
*/
|
||||
void writeUpdateFiltersMessage(List<MlFilter> filters) throws IOException;
|
||||
|
||||
/**
|
||||
* Write message to update the scheduled events
|
||||
*
|
||||
* @param events Scheduled events
|
||||
* @param bucketSpan The job bucket span
|
||||
* @throws IOException If the write fails
|
||||
*/
|
||||
void writeUpdateScheduledEventsMessage(List<ScheduledEvent> events, TimeValue bucketSpan) throws IOException;
|
||||
|
||||
/**
|
||||
* Flush the job pushing any stale data into autodetect.
|
||||
* Every flush command generates a unique flush Id which will be output
|
||||
|
|
|
@ -268,7 +268,7 @@ public class AutodetectProcessManager extends AbstractComponent {
|
|||
|
||||
ActionListener<QueryPage<ScheduledEvent>> eventsListener = ActionListener.wrap(
|
||||
events -> {
|
||||
communicator.writeUpdateProcessMessage(updateParams, events.results(), (aVoid, e) -> {
|
||||
communicator.writeUpdateProcessMessage(updateParams, events == null ? null : events.results(), (aVoid, e) -> {
|
||||
if (e == null) {
|
||||
handler.accept(null);
|
||||
} else {
|
||||
|
@ -283,7 +283,7 @@ public class AutodetectProcessManager extends AbstractComponent {
|
|||
ScheduledEventsQueryBuilder query = new ScheduledEventsQueryBuilder().start(Long.toString(new Date().getTime()));
|
||||
jobProvider.scheduledEventsForJob(jobTask.getJobId(), job.getGroups(), query, eventsListener);
|
||||
} else {
|
||||
eventsListener.onResponse(new QueryPage<>(Collections.emptyList(), 0, ScheduledEvent.RESULTS_FIELD));
|
||||
eventsListener.onResponse(null);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,7 +5,10 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.output.FlushAcknowledgement;
|
||||
|
@ -71,6 +74,14 @@ public class BlackHoleAutodetectProcess implements AutodetectProcess {
|
|||
public void writeUpdateDetectorRulesMessage(int detectorIndex, List<DetectionRule> rules) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeUpdateFiltersMessage(List<MlFilter> filters) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeUpdateScheduledEventsMessage(List<ScheduledEvent> events, TimeValue bucketSpan) throws IOException {
|
||||
}
|
||||
|
||||
/**
|
||||
* Accept the request do nothing with it but write the flush acknowledgement to {@link #readAutodetectResults()}
|
||||
* @param params Should interim results be generated
|
||||
|
|
|
@ -8,8 +8,12 @@ package org.elasticsearch.xpack.ml.job.process.autodetect;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.xpack.ml.MachineLearningClientActionPlugin;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder;
|
||||
|
@ -159,6 +163,18 @@ class NativeAutodetectProcess implements AutodetectProcess {
|
|||
writer.writeUpdateDetectorRulesMessage(detectorIndex, rules);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeUpdateFiltersMessage(List<MlFilter> filters) throws IOException {
|
||||
ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields);
|
||||
writer.writeUpdateFiltersMessage(filters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeUpdateScheduledEventsMessage(List<ScheduledEvent> events, TimeValue bucketSpan) throws IOException {
|
||||
ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields);
|
||||
writer.writeUpdateScheduledEventsMessage(events, bucketSpan);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String flushJob(FlushJobParams params) throws IOException {
|
||||
ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields);
|
||||
|
|
|
@ -7,33 +7,105 @@ package org.elasticsearch.xpack.ml.job.process.autodetect;
|
|||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobUpdate;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public final class UpdateParams {
|
||||
|
||||
private final String jobId;
|
||||
private final ModelPlotConfig modelPlotConfig;
|
||||
private final List<JobUpdate.DetectorUpdate> detectorUpdates;
|
||||
private final MlFilter filter;
|
||||
private final boolean updateScheduledEvents;
|
||||
|
||||
public UpdateParams(@Nullable ModelPlotConfig modelPlotConfig,
|
||||
@Nullable List<JobUpdate.DetectorUpdate> detectorUpdates,
|
||||
boolean updateScheduledEvents) {
|
||||
private UpdateParams(String jobId, @Nullable ModelPlotConfig modelPlotConfig, @Nullable List<JobUpdate.DetectorUpdate> detectorUpdates,
|
||||
@Nullable MlFilter filter, boolean updateScheduledEvents) {
|
||||
this.jobId = Objects.requireNonNull(jobId);
|
||||
this.modelPlotConfig = modelPlotConfig;
|
||||
this.detectorUpdates = detectorUpdates;
|
||||
this.filter = filter;
|
||||
this.updateScheduledEvents = updateScheduledEvents;
|
||||
}
|
||||
|
||||
public String getJobId() {
|
||||
return jobId;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public ModelPlotConfig getModelPlotConfig() {
|
||||
return modelPlotConfig;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public List<JobUpdate.DetectorUpdate> getDetectorUpdates() {
|
||||
return detectorUpdates;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public MlFilter getFilter() {
|
||||
return filter;
|
||||
}
|
||||
|
||||
public boolean isUpdateScheduledEvents() {
|
||||
return updateScheduledEvents;
|
||||
}
|
||||
|
||||
public static UpdateParams fromJobUpdate(JobUpdate jobUpdate) {
|
||||
return new Builder(jobUpdate.getJobId())
|
||||
.modelPlotConfig(jobUpdate.getModelPlotConfig())
|
||||
.detectorUpdates(jobUpdate.getDetectorUpdates())
|
||||
.build();
|
||||
}
|
||||
|
||||
public static UpdateParams filterUpdate(String jobId, MlFilter filter) {
|
||||
return new Builder(jobId).filter(filter).build();
|
||||
}
|
||||
|
||||
public static UpdateParams scheduledEventsUpdate(String jobId) {
|
||||
return new Builder(jobId).updateScheduledEvents(true).build();
|
||||
}
|
||||
|
||||
public static Builder builder(String jobId) {
|
||||
return new Builder(jobId);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private String jobId;
|
||||
private ModelPlotConfig modelPlotConfig;
|
||||
private List<JobUpdate.DetectorUpdate> detectorUpdates;
|
||||
private MlFilter filter;
|
||||
private boolean updateScheduledEvents;
|
||||
|
||||
public Builder(String jobId) {
|
||||
this.jobId = Objects.requireNonNull(jobId);
|
||||
}
|
||||
|
||||
public Builder modelPlotConfig(ModelPlotConfig modelPlotConfig) {
|
||||
this.modelPlotConfig = modelPlotConfig;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder detectorUpdates(List<JobUpdate.DetectorUpdate> detectorUpdates) {
|
||||
this.detectorUpdates = detectorUpdates;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder filter(MlFilter filter) {
|
||||
this.filter = filter;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder updateScheduledEvents(boolean updateScheduledEvents) {
|
||||
this.updateScheduledEvents = updateScheduledEvents;
|
||||
return this;
|
||||
}
|
||||
|
||||
public UpdateParams build() {
|
||||
return new UpdateParams(jobId, modelPlotConfig, detectorUpdates, filter, updateScheduledEvents);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,11 +5,14 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect.writer;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams;
|
||||
|
@ -211,6 +214,24 @@ public class ControlMsgToProcessWriter {
|
|||
writeMessage(stringBuilder.toString());
|
||||
}
|
||||
|
||||
public void writeUpdateFiltersMessage(List<MlFilter> filters) throws IOException {
|
||||
StringBuilder stringBuilder = new StringBuilder();
|
||||
stringBuilder.append(UPDATE_MESSAGE_CODE).append("[filters]\n");
|
||||
new MlFilterWriter(filters, stringBuilder).write();
|
||||
writeMessage(stringBuilder.toString());
|
||||
}
|
||||
|
||||
public void writeUpdateScheduledEventsMessage(List<ScheduledEvent> events, TimeValue bucketSpan) throws IOException {
|
||||
StringBuilder stringBuilder = new StringBuilder();
|
||||
stringBuilder.append(UPDATE_MESSAGE_CODE).append("[scheduledEvents]\n");
|
||||
if (events.isEmpty()) {
|
||||
stringBuilder.append("clear = true\n");
|
||||
} else {
|
||||
new ScheduledEventsWriter(events, bucketSpan, stringBuilder).write();
|
||||
}
|
||||
writeMessage(stringBuilder.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform the supplied control message to length encoded values and
|
||||
* write to the OutputStream.
|
||||
|
|
|
@ -22,11 +22,9 @@ import org.elasticsearch.xpack.ml.utils.MlStrings;
|
|||
import java.io.IOException;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.xpack.ml.job.process.autodetect.writer.WriterConstants.EQUALS;
|
||||
|
||||
|
@ -37,10 +35,6 @@ public class FieldConfigWriter {
|
|||
private static final String INFLUENCER_PREFIX = "influencer.";
|
||||
private static final String CATEGORIZATION_FIELD_OPTION = " categorizationfield=";
|
||||
private static final String CATEGORIZATION_FILTER_PREFIX = "categorizationfilter.";
|
||||
private static final String FILTER_PREFIX = "filter.";
|
||||
private static final String SCHEDULED_EVENT_PREFIX = "scheduledevent.";
|
||||
private static final String SCHEDULED_EVENT_DESCRIPTION_SUFFIX = ".description";
|
||||
|
||||
|
||||
// Note: for the Engine API summarycountfield is currently passed as a
|
||||
// command line option to autodetect rather than in the field config file
|
||||
|
@ -68,8 +62,9 @@ public class FieldConfigWriter {
|
|||
public void write() throws IOException {
|
||||
StringBuilder contents = new StringBuilder();
|
||||
|
||||
writeDetectors(contents);
|
||||
// Filters have to be written before the detectors
|
||||
writeFilters(contents);
|
||||
writeDetectors(contents);
|
||||
writeScheduledEvents(contents);
|
||||
|
||||
if (MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA == false) {
|
||||
|
@ -141,46 +136,12 @@ public class FieldConfigWriter {
|
|||
}
|
||||
|
||||
private void writeFilters(StringBuilder buffer) throws IOException {
|
||||
for (MlFilter filter : filters) {
|
||||
|
||||
StringBuilder filterAsJson = new StringBuilder();
|
||||
filterAsJson.append('[');
|
||||
boolean first = true;
|
||||
for (String item : filter.getItems()) {
|
||||
if (first) {
|
||||
first = false;
|
||||
} else {
|
||||
filterAsJson.append(',');
|
||||
}
|
||||
filterAsJson.append('"');
|
||||
filterAsJson.append(item);
|
||||
filterAsJson.append('"');
|
||||
}
|
||||
filterAsJson.append(']');
|
||||
buffer.append(FILTER_PREFIX).append(filter.getId()).append(EQUALS).append(filterAsJson)
|
||||
.append(NEW_LINE);
|
||||
}
|
||||
new MlFilterWriter(filters, buffer).write();
|
||||
}
|
||||
|
||||
private void writeScheduledEvents(StringBuilder contents) throws IOException {
|
||||
if (scheduledEvents.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
int eventIndex = 0;
|
||||
for (ScheduledEvent event: scheduledEvents) {
|
||||
|
||||
contents.append(SCHEDULED_EVENT_PREFIX).append(eventIndex)
|
||||
.append(SCHEDULED_EVENT_DESCRIPTION_SUFFIX).append(EQUALS)
|
||||
.append(event.getDescription())
|
||||
.append(NEW_LINE);
|
||||
|
||||
contents.append(SCHEDULED_EVENT_PREFIX).append(eventIndex)
|
||||
.append(DETECTOR_RULES_SUFFIX).append(EQUALS);
|
||||
writeDetectionRulesJson(Collections.singletonList(event.toDetectionRule(config.getBucketSpan())), contents);
|
||||
contents.append(NEW_LINE);
|
||||
|
||||
++eventIndex;
|
||||
private void writeScheduledEvents(StringBuilder buffer) throws IOException {
|
||||
if (scheduledEvents.isEmpty() == false) {
|
||||
new ScheduledEventsWriter(scheduledEvents, config.getBucketSpan(), buffer).write();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect.writer;
|
||||
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.xpack.ml.job.process.autodetect.writer.WriterConstants.EQUALS;
|
||||
import static org.elasticsearch.xpack.ml.job.process.autodetect.writer.WriterConstants.NEW_LINE;
|
||||
|
||||
public class MlFilterWriter {
|
||||
|
||||
private static final String FILTER_PREFIX = "filter.";
|
||||
|
||||
private final Collection<MlFilter> filters;
|
||||
private final StringBuilder buffer;
|
||||
|
||||
public MlFilterWriter(Collection<MlFilter> filters, StringBuilder buffer) {
|
||||
this.filters = Objects.requireNonNull(filters);
|
||||
this.buffer = Objects.requireNonNull(buffer);
|
||||
}
|
||||
|
||||
public void write() throws IOException {
|
||||
for (MlFilter filter : filters) {
|
||||
|
||||
StringBuilder filterAsJson = new StringBuilder();
|
||||
filterAsJson.append('[');
|
||||
boolean first = true;
|
||||
for (String item : filter.getItems()) {
|
||||
if (first) {
|
||||
first = false;
|
||||
} else {
|
||||
filterAsJson.append(',');
|
||||
}
|
||||
filterAsJson.append('"');
|
||||
filterAsJson.append(item);
|
||||
filterAsJson.append('"');
|
||||
}
|
||||
filterAsJson.append(']');
|
||||
buffer.append(FILTER_PREFIX).append(filter.getId()).append(EQUALS).append(filterAsJson).append(NEW_LINE);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect.writer;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.xpack.ml.job.process.autodetect.writer.WriterConstants.EQUALS;
|
||||
import static org.elasticsearch.xpack.ml.job.process.autodetect.writer.WriterConstants.NEW_LINE;
|
||||
|
||||
public class ScheduledEventsWriter {
|
||||
|
||||
private static final String SCHEDULED_EVENT_PREFIX = "scheduledevent.";
|
||||
private static final String SCHEDULED_EVENT_DESCRIPTION_SUFFIX = ".description";
|
||||
private static final String RULES_SUFFIX = ".rules";
|
||||
|
||||
private final Collection<ScheduledEvent> scheduledEvents;
|
||||
private final TimeValue bucketSpan;
|
||||
private final StringBuilder buffer;
|
||||
|
||||
public ScheduledEventsWriter(Collection<ScheduledEvent> scheduledEvents, TimeValue bucketSpan, StringBuilder buffer) {
|
||||
this.scheduledEvents = Objects.requireNonNull(scheduledEvents);
|
||||
this.bucketSpan = Objects.requireNonNull(bucketSpan);
|
||||
this.buffer = Objects.requireNonNull(buffer);
|
||||
}
|
||||
|
||||
public void write() throws IOException {
|
||||
int eventIndex = 0;
|
||||
for (ScheduledEvent event: scheduledEvents) {
|
||||
|
||||
StringBuilder eventContent = new StringBuilder();
|
||||
eventContent.append(SCHEDULED_EVENT_PREFIX).append(eventIndex)
|
||||
.append(SCHEDULED_EVENT_DESCRIPTION_SUFFIX).append(EQUALS)
|
||||
.append(event.getDescription())
|
||||
.append(NEW_LINE);
|
||||
|
||||
eventContent.append(SCHEDULED_EVENT_PREFIX).append(eventIndex).append(RULES_SUFFIX).append(EQUALS);
|
||||
try (XContentBuilder contentBuilder = XContentFactory.jsonBuilder()) {
|
||||
contentBuilder.startArray();
|
||||
event.toDetectionRule(bucketSpan).toXContent(contentBuilder, null);
|
||||
contentBuilder.endArray();
|
||||
eventContent.append(contentBuilder.string());
|
||||
}
|
||||
|
||||
eventContent.append(NEW_LINE);
|
||||
buffer.append(eventContent.toString());
|
||||
|
||||
++eventIndex;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.rest.calendar;
|
|||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
|
@ -34,8 +35,9 @@ public class RestPostCalendarEventAction extends BaseRestHandler {
|
|||
protected BaseRestHandler.RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
|
||||
String calendarId = restRequest.param(Calendar.ID.getPreferredName());
|
||||
|
||||
XContentParser parser = restRequest.contentOrSourceParamParser();
|
||||
PostCalendarEventsAction.Request request =
|
||||
PostCalendarEventsAction.Request.parseRequest(calendarId, restRequest.requiredContent(), restRequest.getXContentType());
|
||||
PostCalendarEventsAction.Request.parseRequest(calendarId, parser);
|
||||
return channel -> client.execute(PostCalendarEventsAction.INSTANCE, request, new RestToXContentListener<>(channel));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -207,7 +207,6 @@ import java.util.function.Function;
|
|||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.function.UnaryOperator;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
|
@ -755,29 +754,22 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin, Clus
|
|||
}
|
||||
|
||||
final boolean indexAuditingEnabled = Security.indexAuditLoggingEnabled(settings);
|
||||
final String auditIndex;
|
||||
if (indexAuditingEnabled) {
|
||||
auditIndex = "," + IndexAuditTrail.INDEX_NAME_PREFIX + "*";
|
||||
} else {
|
||||
auditIndex = "";
|
||||
}
|
||||
String securityIndices = SecurityLifecycleService.indexNames().stream()
|
||||
.collect(Collectors.joining(","));
|
||||
String errorMessage = LoggerMessageFormat.format(
|
||||
"the [action.auto_create_index] setting value [{}] is too" +
|
||||
" restrictive. disable [action.auto_create_index] or set it to " +
|
||||
"[{}{}]", (Object) value, securityIndices, auditIndex);
|
||||
if (Booleans.isFalse(value)) {
|
||||
throw new IllegalArgumentException(errorMessage);
|
||||
}
|
||||
String auditIndex = IndexAuditTrail.INDEX_NAME_PREFIX + "*";
|
||||
String errorMessage = LoggerMessageFormat.format(
|
||||
"the [action.auto_create_index] setting value [{}] is too" +
|
||||
" restrictive. disable [action.auto_create_index] or set it to include " +
|
||||
"[{}]", (Object) value, auditIndex);
|
||||
if (Booleans.isFalse(value)) {
|
||||
throw new IllegalArgumentException(errorMessage);
|
||||
}
|
||||
|
||||
if (Booleans.isTrue(value)) {
|
||||
return;
|
||||
}
|
||||
if (Booleans.isTrue(value)) {
|
||||
return;
|
||||
}
|
||||
|
||||
String[] matches = Strings.commaDelimitedListToStringArray(value);
|
||||
List<String> indices = new ArrayList<>(SecurityLifecycleService.indexNames());
|
||||
if (indexAuditingEnabled) {
|
||||
String[] matches = Strings.commaDelimitedListToStringArray(value);
|
||||
List<String> indices = new ArrayList<>();
|
||||
DateTime now = new DateTime(DateTimeZone.UTC);
|
||||
// just use daily rollover
|
||||
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now, IndexNameResolver.Rollover.DAILY));
|
||||
|
@ -788,34 +780,32 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin, Clus
|
|||
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusMonths(4), IndexNameResolver.Rollover.DAILY));
|
||||
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusMonths(5), IndexNameResolver.Rollover.DAILY));
|
||||
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusMonths(6), IndexNameResolver.Rollover.DAILY));
|
||||
}
|
||||
|
||||
for (String index : indices) {
|
||||
boolean matched = false;
|
||||
for (String match : matches) {
|
||||
char c = match.charAt(0);
|
||||
if (c == '-') {
|
||||
if (Regex.simpleMatch(match.substring(1), index)) {
|
||||
throw new IllegalArgumentException(errorMessage);
|
||||
}
|
||||
} else if (c == '+') {
|
||||
if (Regex.simpleMatch(match.substring(1), index)) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (Regex.simpleMatch(match, index)) {
|
||||
matched = true;
|
||||
break;
|
||||
for (String index : indices) {
|
||||
boolean matched = false;
|
||||
for (String match : matches) {
|
||||
char c = match.charAt(0);
|
||||
if (c == '-') {
|
||||
if (Regex.simpleMatch(match.substring(1), index)) {
|
||||
throw new IllegalArgumentException(errorMessage);
|
||||
}
|
||||
} else if (c == '+') {
|
||||
if (Regex.simpleMatch(match.substring(1), index)) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (Regex.simpleMatch(match, index)) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!matched) {
|
||||
throw new IllegalArgumentException(errorMessage);
|
||||
}
|
||||
}
|
||||
if (!matched) {
|
||||
throw new IllegalArgumentException(errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
if (indexAuditingEnabled) {
|
||||
logger.warn("the [action.auto_create_index] setting is configured to be restrictive [{}]. " +
|
||||
" for the next 6 months audit indices are allowed to be created, but please make sure" +
|
||||
" that any future history indices after 6 months with the pattern " +
|
||||
|
@ -904,17 +894,8 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin, Clus
|
|||
|
||||
public UnaryOperator<Map<String, IndexTemplateMetaData>> getIndexTemplateMetaDataUpgrader() {
|
||||
return templates -> {
|
||||
final byte[] securityTemplate = TemplateUtils.loadTemplate("/" + SECURITY_TEMPLATE_NAME + ".json",
|
||||
Version.CURRENT.toString(), IndexLifecycleManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8);
|
||||
templates.remove(SECURITY_TEMPLATE_NAME);
|
||||
final XContent xContent = XContentFactory.xContent(XContentType.JSON);
|
||||
|
||||
try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, securityTemplate)) {
|
||||
templates.put(SECURITY_TEMPLATE_NAME, IndexTemplateMetaData.Builder.fromXContent(parser, SECURITY_TEMPLATE_NAME));
|
||||
} catch (IOException e) {
|
||||
// TODO: should we handle this with a thrown exception?
|
||||
logger.error("Error loading template [{}] as part of metadata upgrading", SECURITY_TEMPLATE_NAME);
|
||||
}
|
||||
|
||||
final byte[] auditTemplate = TemplateUtils.loadTemplate("/" + IndexAuditTrail.INDEX_TEMPLATE_NAME + ".json",
|
||||
Version.CURRENT.toString(), IndexLifecycleManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8);
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -28,6 +27,7 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
|
@ -64,7 +64,7 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
|
|||
this.settings = settings;
|
||||
this.threadPool = threadPool;
|
||||
this.indexAuditTrail = indexAuditTrail;
|
||||
this.securityIndex = new IndexLifecycleManager(settings, client, SECURITY_INDEX_NAME, SECURITY_TEMPLATE_NAME);
|
||||
this.securityIndex = new IndexLifecycleManager(settings, client, SECURITY_INDEX_NAME);
|
||||
clusterService.addListener(this);
|
||||
clusterService.addLifecycleListener(new LifecycleListener() {
|
||||
@Override
|
||||
|
@ -114,20 +114,34 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
|
|||
return securityIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@code true} if the security index exists
|
||||
*/
|
||||
public boolean isSecurityIndexExisting() {
|
||||
return securityIndex.indexExists();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the security index does not exist or it exists and has the current
|
||||
* value for the <code>index.format</code> index setting
|
||||
*/
|
||||
public boolean isSecurityIndexUpToDate() {
|
||||
return securityIndex.isIndexUpToDate();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the security index exists and all primary shards are active
|
||||
*/
|
||||
public boolean isSecurityIndexAvailable() {
|
||||
return securityIndex.isAvailable();
|
||||
}
|
||||
|
||||
public boolean isSecurityIndexWriteable() {
|
||||
return securityIndex.isWritable();
|
||||
/**
|
||||
* Returns <code>true</code> if the security index does not exist or the mappings are up to date
|
||||
* based on the version in the <code>_meta</code> field
|
||||
*/
|
||||
public boolean isSecurityIndexMappingUpToDate() {
|
||||
return securityIndex().isMappingUpToDate();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -170,22 +184,16 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
|
|||
}
|
||||
}
|
||||
|
||||
public static boolean securityIndexMappingAndTemplateSufficientToRead(ClusterState clusterState,
|
||||
Logger logger) {
|
||||
return checkTemplateAndMappingVersions(clusterState, logger, MIN_READ_VERSION::onOrBefore);
|
||||
public static boolean securityIndexMappingSufficientToRead(ClusterState clusterState, Logger logger) {
|
||||
return checkMappingVersions(clusterState, logger, MIN_READ_VERSION::onOrBefore);
|
||||
}
|
||||
|
||||
public static boolean securityIndexMappingAndTemplateUpToDate(ClusterState clusterState,
|
||||
Logger logger) {
|
||||
return checkTemplateAndMappingVersions(clusterState, logger, Version.CURRENT::equals);
|
||||
static boolean securityIndexMappingUpToDate(ClusterState clusterState, Logger logger) {
|
||||
return checkMappingVersions(clusterState, logger, Version.CURRENT::equals);
|
||||
}
|
||||
|
||||
private static boolean checkTemplateAndMappingVersions(ClusterState clusterState, Logger logger,
|
||||
Predicate<Version> versionPredicate) {
|
||||
return IndexLifecycleManager.checkTemplateExistsAndVersionMatches(SECURITY_TEMPLATE_NAME,
|
||||
clusterState, logger, versionPredicate) &&
|
||||
IndexLifecycleManager.checkIndexMappingVersionMatches(SECURITY_INDEX_NAME,
|
||||
clusterState, logger, versionPredicate);
|
||||
private static boolean checkMappingVersions(ClusterState clusterState, Logger logger, Predicate<Version> versionPredicate) {
|
||||
return IndexLifecycleManager.checkIndexMappingVersionMatches(SECURITY_INDEX_NAME, clusterState, logger, versionPredicate);
|
||||
}
|
||||
|
||||
public static List<String> indexNames() {
|
||||
|
@ -193,17 +201,11 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates the security index, if it does not already exist, then runs the given
|
||||
* action on the security index.
|
||||
* Prepares the security index by creating it if it doesn't exist or updating the mappings if the mappings are
|
||||
* out of date. After any tasks have been executed, the runnable is then executed.
|
||||
*/
|
||||
public <T> void createIndexIfNeededThenExecute(final ActionListener<T> listener, final Runnable andThen) {
|
||||
if (!isSecurityIndexExisting() || isSecurityIndexUpToDate()) {
|
||||
securityIndex.createIndexIfNeededThenExecute(listener, andThen);
|
||||
} else {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
}
|
||||
public void prepareIndexIfNeededThenExecute(final Consumer<Exception> consumer, final Runnable andThen) {
|
||||
securityIndex.prepareIndexIfNeededThenExecute(consumer, andThen);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
package org.elasticsearch.xpack.security.authc;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
@ -14,8 +16,10 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryAction;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool.Names;
|
||||
import org.elasticsearch.xpack.security.SecurityLifecycleService;
|
||||
|
@ -56,8 +60,10 @@ final class ExpiredTokenRemover extends AbstractRunnable {
|
|||
.filter(QueryBuilders.termQuery("doc_type", TokenService.DOC_TYPE))
|
||||
.filter(QueryBuilders.rangeQuery("expiration_time").lte(Instant.now().toEpochMilli())));
|
||||
executeAsyncWithOrigin(client, SECURITY_ORIGIN, DeleteByQueryAction.INSTANCE, dbq,
|
||||
ActionListener.wrap(r -> markComplete(),
|
||||
e -> {
|
||||
ActionListener.wrap(r -> {
|
||||
debugDbqResponse(r);
|
||||
markComplete();
|
||||
}, e -> {
|
||||
if (isShardNotAvailableException(e) == false) {
|
||||
logger.error("failed to delete expired tokens", e);
|
||||
}
|
||||
|
@ -71,6 +77,21 @@ final class ExpiredTokenRemover extends AbstractRunnable {
|
|||
}
|
||||
}
|
||||
|
||||
private void debugDbqResponse(BulkByScrollResponse response) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("delete by query of tokens finished with [{}] deletions, [{}] bulk failures, [{}] search failures",
|
||||
response.getDeleted(), response.getBulkFailures().size(), response.getSearchFailures().size());
|
||||
for (BulkItemResponse.Failure failure : response.getBulkFailures()) {
|
||||
logger.debug(new ParameterizedMessage("deletion failed for index [{}], type [{}], id [{}]",
|
||||
failure.getIndex(), failure.getType(), failure.getId()), failure.getCause());
|
||||
}
|
||||
for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) {
|
||||
logger.debug(new ParameterizedMessage("search failed for index [{}], shard [{}] on node [{}]",
|
||||
failure.getIndex(), failure.getShardId(), failure.getNodeId()), failure.getReason());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
boolean isExpirationInProgress() {
|
||||
return inProgress.get();
|
||||
}
|
||||
|
|
|
@ -271,14 +271,7 @@ public final class TokenService extends AbstractComponent {
|
|||
*/
|
||||
public void invalidateToken(String tokenString, ActionListener<Boolean> listener) {
|
||||
ensureEnabled();
|
||||
if (lifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
} else if (lifecycleService.isSecurityIndexWriteable() == false) {
|
||||
listener.onFailure(new IllegalStateException("cannot write to the tokens index"));
|
||||
} else if (Strings.isNullOrEmpty(tokenString)) {
|
||||
if (Strings.isNullOrEmpty(tokenString)) {
|
||||
listener.onFailure(new IllegalArgumentException("token must be provided"));
|
||||
} else {
|
||||
maybeStartTokenRemover();
|
||||
|
@ -291,7 +284,7 @@ public final class TokenService extends AbstractComponent {
|
|||
listener.onResponse(false);
|
||||
} else {
|
||||
final String id = getDocumentId(userToken);
|
||||
lifecycleService.createIndexIfNeededThenExecute(listener, () -> {
|
||||
lifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, id)
|
||||
.setOpType(OpType.CREATE)
|
||||
|
@ -338,47 +331,38 @@ public final class TokenService extends AbstractComponent {
|
|||
* have been explicitly cleared.
|
||||
*/
|
||||
private void checkIfTokenIsRevoked(UserToken userToken, ActionListener<UserToken> listener) {
|
||||
if (lifecycleService.isSecurityIndexAvailable()) {
|
||||
if (lifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
}
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, getDocumentId(userToken)).request(),
|
||||
new ActionListener<GetResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(GetResponse response) {
|
||||
if (response.isExists()) {
|
||||
// this token is explicitly expired!
|
||||
listener.onFailure(expiredTokenException());
|
||||
} else {
|
||||
listener.onResponse(userToken);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// if the index or the shard is not there / available we assume that
|
||||
// the token is not valid
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.warn("failed to get token [{}] since index is not available", userToken.getId());
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
logger.error(new ParameterizedMessage("failed to get token [{}]", userToken.getId()), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}, client::get);
|
||||
} else if (lifecycleService.isSecurityIndexExisting()) {
|
||||
// index exists but the index isn't available, do not trust the token
|
||||
logger.warn("could not validate token as the security index is not available");
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
if (lifecycleService.isSecurityIndexExisting() == false) {
|
||||
// index doesn't exist so the token is considered valid.
|
||||
listener.onResponse(userToken);
|
||||
} else {
|
||||
lifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () ->
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, getDocumentId(userToken)).request(),
|
||||
new ActionListener<GetResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(GetResponse response) {
|
||||
if (response.isExists()) {
|
||||
// this token is explicitly expired!
|
||||
listener.onFailure(expiredTokenException());
|
||||
} else {
|
||||
listener.onResponse(userToken);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// if the index or the shard is not there / available we assume that
|
||||
// the token is not valid
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.warn("failed to get token [{}] since index is not available", userToken.getId());
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
logger.error(new ParameterizedMessage("failed to get token [{}]", userToken.getId()), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}, client::get));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.action.index.IndexResponse;
|
|||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ContextPreservingActionListener;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
|
@ -113,25 +114,23 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
listener.onFailure(t);
|
||||
}
|
||||
};
|
||||
if (userNames.length == 1) { // optimization for single user lookup
|
||||
|
||||
if (securityLifecycleService.isSecurityIndexExisting() == false) {
|
||||
// TODO remove this short circuiting and fix tests that fail without this!
|
||||
listener.onResponse(Collections.emptyList());
|
||||
} else if (userNames.length == 1) { // optimization for single user lookup
|
||||
final String username = userNames[0];
|
||||
getUserAndPassword(username, ActionListener.wrap(
|
||||
(uap) -> listener.onResponse(uap == null ? Collections.emptyList() : Collections.singletonList(uap.user())),
|
||||
handleException::accept));
|
||||
handleException));
|
||||
} else {
|
||||
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational " +
|
||||
"until the upgrade API is run on the security index"));
|
||||
return;
|
||||
}
|
||||
try {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
final QueryBuilder query;
|
||||
if (userNames == null || userNames.length == 0) {
|
||||
query = QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), USER_DOC_TYPE);
|
||||
} else {
|
||||
final String[] users = Arrays.asList(userNames).stream()
|
||||
.map(s -> getIdForUser(USER_DOC_TYPE, s)).toArray(String[]::new);
|
||||
.map(s -> getIdForUser(USER_DOC_TYPE, s)).toArray(String[]::new);
|
||||
query = QueryBuilders.boolQuery().filter(QueryBuilders.idsQuery(INDEX_TYPE).addIds(users));
|
||||
}
|
||||
final Supplier<ThreadContext.StoredContext> supplier = client.threadPool().getThreadContext().newRestorableContext(false);
|
||||
|
@ -148,10 +147,7 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
return u != null ? u.user() : null;
|
||||
});
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("unable to retrieve users {}", Arrays.toString(userNames)), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,43 +155,34 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
* Async method to retrieve a user and their password
|
||||
*/
|
||||
private void getUserAndPassword(final String user, final ActionListener<UserAndPassword> listener) {
|
||||
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
}
|
||||
try {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME,
|
||||
INDEX_TYPE, getIdForUser(USER_DOC_TYPE, user)).request(),
|
||||
new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse response) {
|
||||
listener.onResponse(transformUser(response.getId(), response.getSource()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
if (t instanceof IndexNotFoundException) {
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"could not retrieve user [{}] because security index does not exist", user), t);
|
||||
} else {
|
||||
logger.error(new ParameterizedMessage("failed to retrieve user [{}]", user), t);
|
||||
}
|
||||
// We don't invoke the onFailure listener here, instead
|
||||
// we call the response with a null user
|
||||
listener.onResponse(null);
|
||||
}
|
||||
}, client::get);
|
||||
} catch (IndexNotFoundException infe) {
|
||||
logger.trace((org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("could not retrieve user [{}] because security index does not exist", user));
|
||||
if (securityLifecycleService.isSecurityIndexExisting() == false) {
|
||||
// TODO remove this short circuiting and fix tests that fail without this!
|
||||
listener.onResponse(null);
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("unable to retrieve user [{}]", user), e);
|
||||
listener.onFailure(e);
|
||||
} else {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () ->
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME,
|
||||
INDEX_TYPE, getIdForUser(USER_DOC_TYPE, user)).request(),
|
||||
new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse response) {
|
||||
listener.onResponse(transformUser(response.getId(), response.getSource()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
if (t instanceof IndexNotFoundException) {
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"could not retrieve user [{}] because security index does not exist", user), t);
|
||||
} else {
|
||||
logger.error(new ParameterizedMessage("failed to retrieve user [{}]", user), t);
|
||||
}
|
||||
// We don't invoke the onFailure listener here, instead
|
||||
// we call the response with a null user
|
||||
listener.onResponse(null);
|
||||
}
|
||||
}, client::get));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -208,55 +195,46 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
assert SystemUser.NAME.equals(username) == false && XPackUser.NAME.equals(username) == false : username + "is internal!";
|
||||
if (isTribeNode) {
|
||||
listener.onFailure(new UnsupportedOperationException("users may not be created or modified using a tribe node"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
|
||||
listener.onFailure(new IllegalStateException("password cannot be changed as user service cannot write until template and " +
|
||||
"mappings are up to date"));
|
||||
return;
|
||||
}
|
||||
|
||||
final String docType;
|
||||
if (ClientReservedRealm.isReserved(username, settings)) {
|
||||
docType = RESERVED_USER_TYPE;
|
||||
} else {
|
||||
docType = USER_DOC_TYPE;
|
||||
}
|
||||
final String docType;
|
||||
if (ClientReservedRealm.isReserved(username, settings)) {
|
||||
docType = RESERVED_USER_TYPE;
|
||||
} else {
|
||||
docType = USER_DOC_TYPE;
|
||||
}
|
||||
|
||||
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(docType, username))
|
||||
.setDoc(Requests.INDEX_CONTENT_TYPE, Fields.PASSWORD.getPreferredName(), String.valueOf(request.passwordHash()))
|
||||
.setRefreshPolicy(request.getRefreshPolicy()).request(),
|
||||
new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
assert updateResponse.getResult() == DocWriteResponse.Result.UPDATED;
|
||||
clearRealmCache(request.username(), listener, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (isIndexNotFoundOrDocumentMissing(e)) {
|
||||
if (docType.equals(RESERVED_USER_TYPE)) {
|
||||
createReservedUser(username, request.passwordHash(), request.getRefreshPolicy(), listener);
|
||||
} else {
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to change password for user [{}]", request.username()), e);
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("user must exist in order to change password");
|
||||
listener.onFailure(validationException);
|
||||
}
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(docType, username))
|
||||
.setDoc(Requests.INDEX_CONTENT_TYPE, Fields.PASSWORD.getPreferredName(),
|
||||
String.valueOf(request.passwordHash()))
|
||||
.setRefreshPolicy(request.getRefreshPolicy()).request(),
|
||||
new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
assert updateResponse.getResult() == DocWriteResponse.Result.UPDATED;
|
||||
clearRealmCache(request.username(), listener, null);
|
||||
}
|
||||
}
|
||||
}, client::update);
|
||||
});
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (isIndexNotFoundOrDocumentMissing(e)) {
|
||||
if (docType.equals(RESERVED_USER_TYPE)) {
|
||||
createReservedUser(username, request.passwordHash(), request.getRefreshPolicy(), listener);
|
||||
} else {
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to change password for user [{}]", request.username()), e);
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("user must exist in order to change password");
|
||||
listener.onFailure(validationException);
|
||||
}
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}, client::update);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -264,13 +242,7 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
* has been indexed
|
||||
*/
|
||||
private void createReservedUser(String username, char[] passwordHash, RefreshPolicy refresh, ActionListener<Void> listener) {
|
||||
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
}
|
||||
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
|
||||
getIdForUser(RESERVED_USER_TYPE, username))
|
||||
|
@ -301,27 +273,10 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
public void putUser(final PutUserRequest request, final ActionListener<Boolean> listener) {
|
||||
if (isTribeNode) {
|
||||
listener.onFailure(new UnsupportedOperationException("users may not be created or modified using a tribe node"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
|
||||
listener.onFailure(new IllegalStateException("user cannot be created or changed as the user service cannot write until " +
|
||||
"template and mappings are up to date"));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
if (request.passwordHash() == null) {
|
||||
updateUserWithoutPassword(request, listener);
|
||||
} else {
|
||||
indexUser(request, listener);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("unable to put user [{}]", request.username()), e);
|
||||
listener.onFailure(e);
|
||||
} else if (request.passwordHash() == null) {
|
||||
updateUserWithoutPassword(request, listener);
|
||||
} else {
|
||||
indexUser(request, listener);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -330,9 +285,8 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
*/
|
||||
private void updateUserWithoutPassword(final PutUserRequest putUserRequest, final ActionListener<Boolean> listener) {
|
||||
assert putUserRequest.passwordHash() == null;
|
||||
assert !securityLifecycleService.isSecurityIndexOutOfDate() : "security index should be up to date";
|
||||
// We must have an existing document
|
||||
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
|
||||
getIdForUser(USER_DOC_TYPE, putUserRequest.username()))
|
||||
|
@ -375,8 +329,7 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
|
||||
private void indexUser(final PutUserRequest putUserRequest, final ActionListener<Boolean> listener) {
|
||||
assert putUserRequest.passwordHash() != null;
|
||||
assert !securityLifecycleService.isSecurityIndexOutOfDate() : "security index should be up to date";
|
||||
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
|
||||
getIdForUser(USER_DOC_TYPE, putUserRequest.username()))
|
||||
|
@ -413,19 +366,7 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
final ActionListener<Void> listener) {
|
||||
if (isTribeNode) {
|
||||
listener.onFailure(new UnsupportedOperationException("users may not be created or modified using a tribe node"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
|
||||
listener.onFailure(new IllegalStateException("enabled status cannot be changed as user service cannot write until template " +
|
||||
"and mappings are up to date"));
|
||||
return;
|
||||
}
|
||||
|
||||
if (ClientReservedRealm.isReserved(username, settings)) {
|
||||
} else if (ClientReservedRealm.isReserved(username, settings)) {
|
||||
setReservedUserEnabled(username, enabled, refreshPolicy, true, listener);
|
||||
} else {
|
||||
setRegularUserEnabled(username, enabled, refreshPolicy, listener);
|
||||
|
@ -434,115 +375,92 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
|
||||
private void setRegularUserEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy,
|
||||
final ActionListener<Void> listener) {
|
||||
assert !securityLifecycleService.isSecurityIndexOutOfDate() : "security index should be up to date";
|
||||
try {
|
||||
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
|
||||
getIdForUser(USER_DOC_TYPE, username))
|
||||
.setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled)
|
||||
.setRefreshPolicy(refreshPolicy)
|
||||
.request(),
|
||||
new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
clearRealmCache(username, listener, null);
|
||||
}
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
|
||||
getIdForUser(USER_DOC_TYPE, username))
|
||||
.setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled)
|
||||
.setRefreshPolicy(refreshPolicy)
|
||||
.request(),
|
||||
new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
clearRealmCache(username, listener, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
Exception failure = e;
|
||||
if (isIndexNotFoundOrDocumentMissing(e)) {
|
||||
// if the index doesn't exist we can never update a user
|
||||
// if the document doesn't exist, then this update is not valid
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to {} user [{}]",
|
||||
enabled ? "enable" : "disable", username), e);
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("only existing users can be " +
|
||||
(enabled ? "enabled" : "disabled"));
|
||||
failure = validationException;
|
||||
}
|
||||
listener.onFailure(failure);
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
Exception failure = e;
|
||||
if (isIndexNotFoundOrDocumentMissing(e)) {
|
||||
// if the index doesn't exist we can never update a user
|
||||
// if the document doesn't exist, then this update is not valid
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to {} user [{}]",
|
||||
enabled ? "enable" : "disable", username), e);
|
||||
ValidationException validationException = new ValidationException();
|
||||
validationException.addValidationError("only existing users can be " +
|
||||
(enabled ? "enabled" : "disabled"));
|
||||
failure = validationException;
|
||||
}
|
||||
}, client::update);
|
||||
});
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
listener.onFailure(failure);
|
||||
}
|
||||
}, client::update);
|
||||
});
|
||||
}
|
||||
|
||||
private void setReservedUserEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy,
|
||||
boolean clearCache, final ActionListener<Void> listener) {
|
||||
assert !securityLifecycleService.isSecurityIndexOutOfDate() : "security index should be up to date";
|
||||
try {
|
||||
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
|
||||
getIdForUser(RESERVED_USER_TYPE, username))
|
||||
.setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled)
|
||||
.setUpsert(XContentType.JSON,
|
||||
Fields.PASSWORD.getPreferredName(), "",
|
||||
Fields.ENABLED.getPreferredName(), enabled,
|
||||
Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE)
|
||||
.setRefreshPolicy(refreshPolicy)
|
||||
.request(),
|
||||
new ActionListener<UpdateResponse>() {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
|
||||
getIdForUser(RESERVED_USER_TYPE, username))
|
||||
.setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled)
|
||||
.setUpsert(XContentType.JSON,
|
||||
Fields.PASSWORD.getPreferredName(), "",
|
||||
Fields.ENABLED.getPreferredName(), enabled,
|
||||
Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE)
|
||||
.setRefreshPolicy(refreshPolicy)
|
||||
.request(),
|
||||
new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
if (clearCache) {
|
||||
clearRealmCache(username, listener, null);
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, client::update);
|
||||
});
|
||||
}
|
||||
|
||||
public void deleteUser(final DeleteUserRequest deleteUserRequest, final ActionListener<Boolean> listener) {
|
||||
if (isTribeNode) {
|
||||
listener.onFailure(new UnsupportedOperationException("users may not be deleted using a tribe node"));
|
||||
} else {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
DeleteRequest request = client.prepareDelete(SecurityLifecycleService.SECURITY_INDEX_NAME,
|
||||
INDEX_TYPE, getIdForUser(USER_DOC_TYPE, deleteUserRequest.username())).request();
|
||||
request.setRefreshPolicy(deleteUserRequest.getRefreshPolicy());
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request,
|
||||
new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
if (clearCache) {
|
||||
clearRealmCache(username, listener, null);
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
clearRealmCache(deleteUserRequest.username(), listener,
|
||||
deleteResponse.getResult() == DocWriteResponse.Result.DELETED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, client::update);
|
||||
}, client::delete);
|
||||
});
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
public void deleteUser(final DeleteUserRequest deleteUserRequest, final ActionListener<Boolean> listener) {
|
||||
if (isTribeNode) {
|
||||
listener.onFailure(new UnsupportedOperationException("users may not be deleted using a tribe node"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
|
||||
listener.onFailure(new IllegalStateException("user cannot be deleted as user service cannot write until template and " +
|
||||
"mappings are up to date"));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
DeleteRequest request = client.prepareDelete(SecurityLifecycleService.SECURITY_INDEX_NAME,
|
||||
INDEX_TYPE, getIdForUser(USER_DOC_TYPE, deleteUserRequest.username())).request();
|
||||
request.indicesOptions().ignoreUnavailable();
|
||||
request.setRefreshPolicy(deleteUserRequest.getRefreshPolicy());
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
clearRealmCache(deleteUserRequest.username(), listener,
|
||||
deleteResponse.getResult() == DocWriteResponse.Result.DELETED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, client::delete);
|
||||
} catch (Exception e) {
|
||||
logger.error("unable to remove user", e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -565,62 +483,52 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
}
|
||||
|
||||
void getReservedUserInfo(String username, ActionListener<ReservedUserInfo> listener) {
|
||||
if (!securityLifecycleService.isSecurityIndexExisting()) {
|
||||
listener.onFailure(new IllegalStateException("Attempt to get reserved user info but the security index does not exist"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
}
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(RESERVED_USER_TYPE, username))
|
||||
.request(),
|
||||
new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse getResponse) {
|
||||
if (getResponse.isExists()) {
|
||||
Map<String, Object> sourceMap = getResponse.getSourceAsMap();
|
||||
String password = (String) sourceMap.get(Fields.PASSWORD.getPreferredName());
|
||||
Boolean enabled = (Boolean) sourceMap.get(Fields.ENABLED.getPreferredName());
|
||||
if (password == null) {
|
||||
listener.onFailure(new IllegalStateException("password hash must not be null!"));
|
||||
} else if (enabled == null) {
|
||||
listener.onFailure(new IllegalStateException("enabled must not be null!"));
|
||||
} else if (password.isEmpty()) {
|
||||
listener.onResponse((enabled ? ReservedRealm.ENABLED_DEFAULT_USER_INFO : ReservedRealm
|
||||
.DISABLED_DEFAULT_USER_INFO).deepClone());
|
||||
} else {
|
||||
listener.onResponse(new ReservedUserInfo(password.toCharArray(), enabled, false));
|
||||
}
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
}
|
||||
if (securityLifecycleService.isSecurityIndexExisting() == false) {
|
||||
// TODO remove this short circuiting and fix tests that fail without this!
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () ->
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
|
||||
getIdForUser(RESERVED_USER_TYPE, username)).request(),
|
||||
new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse getResponse) {
|
||||
if (getResponse.isExists()) {
|
||||
Map<String, Object> sourceMap = getResponse.getSourceAsMap();
|
||||
String password = (String) sourceMap.get(Fields.PASSWORD.getPreferredName());
|
||||
Boolean enabled = (Boolean) sourceMap.get(Fields.ENABLED.getPreferredName());
|
||||
if (password == null) {
|
||||
listener.onFailure(new IllegalStateException("password hash must not be null!"));
|
||||
} else if (enabled == null) {
|
||||
listener.onFailure(new IllegalStateException("enabled must not be null!"));
|
||||
} else if (password.isEmpty()) {
|
||||
listener.onResponse((enabled ? ReservedRealm.ENABLED_DEFAULT_USER_INFO : ReservedRealm
|
||||
.DISABLED_DEFAULT_USER_INFO).deepClone());
|
||||
} else {
|
||||
listener.onResponse(new ReservedUserInfo(password.toCharArray(), enabled, false));
|
||||
}
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (e instanceof IndexNotFoundException) {
|
||||
logger.trace((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"could not retrieve built in user [{}] info since security index does not exist", username), e);
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
logger.error(new ParameterizedMessage("failed to retrieve built in user [{}] info", username), e);
|
||||
listener.onFailure(null);
|
||||
}
|
||||
}
|
||||
}, client::get);
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.trace((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"could not retrieve built in user [{}] info since security index unavailable", username),
|
||||
e);
|
||||
}
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, client::get));
|
||||
}
|
||||
}
|
||||
|
||||
void getAllReservedUserInfo(ActionListener<Map<String, ReservedUserInfo>> listener) {
|
||||
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
}
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () ->
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
|
||||
.setQuery(QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE))
|
||||
.setFetchSource(true).request(),
|
||||
|
@ -661,7 +569,7 @@ public class NativeUsersStore extends AbstractComponent {
|
|||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}, client::search);
|
||||
}, client::search));
|
||||
}
|
||||
|
||||
private <Response> void clearRealmCache(String username, ActionListener<Response> listener, Response response) {
|
||||
|
|
|
@ -167,8 +167,6 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
|
|||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
|
||||
listener.onFailure(new IllegalStateException("role-mappings cannot be modified until template and mappings are up to date"));
|
||||
} else {
|
||||
try {
|
||||
inner.accept(request, ActionListener.wrap(r -> refreshRealms(listener, r), listener::onFailure));
|
||||
|
@ -181,7 +179,7 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
|
|||
|
||||
private void innerPutMapping(PutRoleMappingRequest request, ActionListener<Boolean> listener) {
|
||||
final ExpressionRoleMapping mapping = request.getMapping();
|
||||
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
final XContentBuilder xContentBuilder;
|
||||
try {
|
||||
xContentBuilder = mapping.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS, true);
|
||||
|
@ -270,11 +268,11 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
|
|||
} else {
|
||||
logger.info("The security index is not yet available - no role mappings can be loaded");
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Security Index [{}] [exists: {}] [available: {}] [writable: {}]",
|
||||
logger.debug("Security Index [{}] [exists: {}] [available: {}] [mapping up to date: {}]",
|
||||
SECURITY_INDEX_NAME,
|
||||
securityLifecycleService.isSecurityIndexExisting(),
|
||||
securityLifecycleService.isSecurityIndexAvailable(),
|
||||
securityLifecycleService.isSecurityIndexWriteable()
|
||||
securityLifecycleService.isSecurityIndexMappingUpToDate()
|
||||
);
|
||||
}
|
||||
listener.onResponse(Collections.emptyList());
|
||||
|
|
|
@ -31,8 +31,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.license.LicenseUtils;
|
||||
|
@ -105,16 +103,15 @@ public class NativeRolesStore extends AbstractComponent {
|
|||
* Retrieve a list of roles, if rolesToGet is null or empty, fetch all roles
|
||||
*/
|
||||
public void getRoleDescriptors(String[] names, final ActionListener<Collection<RoleDescriptor>> listener) {
|
||||
if (names != null && names.length == 1) {
|
||||
if (securityLifecycleService.isSecurityIndexExisting() == false) {
|
||||
// TODO remove this short circuiting and fix tests that fail without this!
|
||||
listener.onResponse(Collections.emptyList());
|
||||
} else if (names != null && names.length == 1) {
|
||||
getRoleDescriptor(Objects.requireNonNull(names[0]), ActionListener.wrap(roleDescriptor ->
|
||||
listener.onResponse(roleDescriptor == null ? Collections.emptyList() : Collections.singletonList(roleDescriptor)),
|
||||
listener::onFailure));
|
||||
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
} else {
|
||||
try {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
QueryBuilder query;
|
||||
if (names == null || names.length == 0) {
|
||||
query = QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE);
|
||||
|
@ -134,61 +131,39 @@ public class NativeRolesStore extends AbstractComponent {
|
|||
ScrollHelper.fetchAllByEntity(client, request, new ContextPreservingActionListener<>(supplier, listener),
|
||||
(hit) -> transformRole(hit.getId(), hit.getSourceRef(), logger, licenseState));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("unable to retrieve roles {}", Arrays.toString(names)), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void deleteRole(final DeleteRoleRequest deleteRoleRequest, final ActionListener<Boolean> listener) {
|
||||
if (isTribeNode) {
|
||||
listener.onFailure(new UnsupportedOperationException("roles may not be deleted using a tribe node"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
|
||||
listener.onFailure(new IllegalStateException("role cannot be deleted as service cannot write until template and " +
|
||||
"mappings are up to date"));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
DeleteRequest request = client.prepareDelete(SecurityLifecycleService.SECURITY_INDEX_NAME,
|
||||
ROLE_DOC_TYPE, getIdForUser(deleteRoleRequest.name())).request();
|
||||
request.setRefreshPolicy(deleteRoleRequest.getRefreshPolicy());
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request,
|
||||
new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
clearRoleCache(deleteRoleRequest.name(), listener,
|
||||
deleteResponse.getResult() == DocWriteResponse.Result.DELETED);
|
||||
}
|
||||
|
||||
try {
|
||||
DeleteRequest request = client.prepareDelete(SecurityLifecycleService.SECURITY_INDEX_NAME,
|
||||
ROLE_DOC_TYPE, getIdForUser(deleteRoleRequest.name())).request();
|
||||
request.setRefreshPolicy(deleteRoleRequest.getRefreshPolicy());
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request,
|
||||
new ActionListener<DeleteResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
clearRoleCache(deleteRoleRequest.name(), listener,
|
||||
deleteResponse.getResult() == DocWriteResponse.Result.DELETED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error("failed to delete role from the index", e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, client::delete);
|
||||
} catch (IndexNotFoundException e) {
|
||||
logger.trace("security index does not exist", e);
|
||||
listener.onResponse(false);
|
||||
} catch (Exception e) {
|
||||
logger.error("unable to remove role", e);
|
||||
listener.onFailure(e);
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error("failed to delete role from the index", e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, client::delete);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void putRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener<Boolean> listener) {
|
||||
if (isTribeNode) {
|
||||
listener.onFailure(new UnsupportedOperationException("roles may not be created or modified using a tribe node"));
|
||||
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
|
||||
listener.onFailure(new IllegalStateException("role cannot be created or modified as service cannot write until template and " +
|
||||
"mappings are up to date"));
|
||||
} else if (licenseState.isDocumentAndFieldLevelSecurityAllowed()) {
|
||||
innerPutRole(request, role, listener);
|
||||
} else if (role.isUsingDocumentOrFieldLevelSecurity()) {
|
||||
|
@ -200,44 +175,33 @@ public class NativeRolesStore extends AbstractComponent {
|
|||
|
||||
// pkg-private for testing
|
||||
void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener<Boolean> listener) {
|
||||
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
}
|
||||
try {
|
||||
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
|
||||
final XContentBuilder xContentBuilder;
|
||||
try {
|
||||
xContentBuilder = role.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS, true);
|
||||
} catch (IOException e) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForUser(role.getName()))
|
||||
.setSource(xContentBuilder)
|
||||
.setRefreshPolicy(request.getRefreshPolicy())
|
||||
.request(),
|
||||
new ActionListener<IndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndexResponse indexResponse) {
|
||||
final boolean created = indexResponse.getResult() == DocWriteResponse.Result.CREATED;
|
||||
clearRoleCache(role.getName(), listener, created);
|
||||
}
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
|
||||
final XContentBuilder xContentBuilder;
|
||||
try {
|
||||
xContentBuilder = role.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS, true);
|
||||
} catch (IOException e) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForUser(role.getName()))
|
||||
.setSource(xContentBuilder)
|
||||
.setRefreshPolicy(request.getRefreshPolicy())
|
||||
.request(),
|
||||
new ActionListener<IndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndexResponse indexResponse) {
|
||||
final boolean created = indexResponse.getResult() == DocWriteResponse.Result.CREATED;
|
||||
clearRoleCache(role.getName(), listener, created);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error(new ParameterizedMessage("failed to put role [{}]", request.name()), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, client::index);
|
||||
});
|
||||
} catch (Exception e) {
|
||||
logger.error(new ParameterizedMessage("unable to put role [{}]", request.name()), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error(new ParameterizedMessage("failed to put role [{}]", request.name()), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, client::index);
|
||||
});
|
||||
}
|
||||
|
||||
public void usageStats(ActionListener<Map<String, Object>> listener) {
|
||||
|
@ -248,118 +212,97 @@ public class NativeRolesStore extends AbstractComponent {
|
|||
usageStats.put("dls", false);
|
||||
listener.onResponse(usageStats);
|
||||
} else {
|
||||
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
}
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareMultiSearch()
|
||||
.add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
|
||||
.setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE))
|
||||
.setSize(0))
|
||||
.add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
|
||||
.setQuery(QueryBuilders.boolQuery()
|
||||
.must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE))
|
||||
.must(QueryBuilders.boolQuery()
|
||||
.should(existsQuery("indices.field_security.grant"))
|
||||
.should(existsQuery("indices.field_security.except"))
|
||||
// for backwardscompat with 2.x
|
||||
.should(existsQuery("indices.fields"))))
|
||||
.setSize(0)
|
||||
.setTerminateAfter(1))
|
||||
.add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
|
||||
.setQuery(QueryBuilders.boolQuery()
|
||||
.must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE))
|
||||
.filter(existsQuery("indices.query")))
|
||||
.setSize(0)
|
||||
.setTerminateAfter(1))
|
||||
.request(),
|
||||
new ActionListener<MultiSearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(MultiSearchResponse items) {
|
||||
Item[] responses = items.getResponses();
|
||||
if (responses[0].isFailure()) {
|
||||
usageStats.put("size", 0);
|
||||
} else {
|
||||
usageStats.put("size", responses[0].getResponse().getHits().getTotalHits());
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () ->
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareMultiSearch()
|
||||
.add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
|
||||
.setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE))
|
||||
.setSize(0))
|
||||
.add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
|
||||
.setQuery(QueryBuilders.boolQuery()
|
||||
.must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE))
|
||||
.must(QueryBuilders.boolQuery()
|
||||
.should(existsQuery("indices.field_security.grant"))
|
||||
.should(existsQuery("indices.field_security.except"))
|
||||
// for backwardscompat with 2.x
|
||||
.should(existsQuery("indices.fields"))))
|
||||
.setSize(0)
|
||||
.setTerminateAfter(1))
|
||||
.add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
|
||||
.setQuery(QueryBuilders.boolQuery()
|
||||
.must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE))
|
||||
.filter(existsQuery("indices.query")))
|
||||
.setSize(0)
|
||||
.setTerminateAfter(1))
|
||||
.request(),
|
||||
new ActionListener<MultiSearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(MultiSearchResponse items) {
|
||||
Item[] responses = items.getResponses();
|
||||
if (responses[0].isFailure()) {
|
||||
usageStats.put("size", 0);
|
||||
} else {
|
||||
usageStats.put("size", responses[0].getResponse().getHits().getTotalHits());
|
||||
}
|
||||
|
||||
if (responses[1].isFailure()) {
|
||||
usageStats.put("fls", false);
|
||||
} else {
|
||||
usageStats.put("fls", responses[1].getResponse().getHits().getTotalHits() > 0L);
|
||||
}
|
||||
|
||||
if (responses[2].isFailure()) {
|
||||
usageStats.put("dls", false);
|
||||
} else {
|
||||
usageStats.put("dls", responses[2].getResponse().getHits().getTotalHits() > 0L);
|
||||
}
|
||||
listener.onResponse(usageStats);
|
||||
}
|
||||
|
||||
if (responses[1].isFailure()) {
|
||||
usageStats.put("fls", false);
|
||||
} else {
|
||||
usageStats.put("fls", responses[1].getResponse().getHits().getTotalHits() > 0L);
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
if (responses[2].isFailure()) {
|
||||
usageStats.put("dls", false);
|
||||
} else {
|
||||
usageStats.put("dls", responses[2].getResponse().getHits().getTotalHits() > 0L);
|
||||
}
|
||||
listener.onResponse(usageStats);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}, client::multiSearch);
|
||||
}, client::multiSearch));
|
||||
}
|
||||
}
|
||||
|
||||
private void getRoleDescriptor(final String roleId, ActionListener<RoleDescriptor> roleActionListener) {
|
||||
if (securityLifecycleService.isSecurityIndexExisting() == false) {
|
||||
// TODO remove this short circuiting and fix tests that fail without this!
|
||||
roleActionListener.onResponse(null);
|
||||
} else {
|
||||
executeGetRoleRequest(roleId, new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse response) {
|
||||
final RoleDescriptor descriptor = transformRole(response);
|
||||
roleActionListener.onResponse(descriptor);
|
||||
}
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(roleActionListener::onFailure, () ->
|
||||
executeGetRoleRequest(roleId, new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse response) {
|
||||
final RoleDescriptor descriptor = transformRole(response);
|
||||
roleActionListener.onResponse(descriptor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// if the index or the shard is not there / available we just claim the role is not there
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to load role [{}] index not available", roleId), e);
|
||||
roleActionListener.onResponse(null);
|
||||
} else {
|
||||
logger.error(new ParameterizedMessage("failed to load role [{}]", roleId), e);
|
||||
roleActionListener.onFailure(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// if the index or the shard is not there / available we just claim the role is not there
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to load role [{}] index not available", roleId), e);
|
||||
roleActionListener.onResponse(null);
|
||||
} else {
|
||||
logger.error(new ParameterizedMessage("failed to load role [{}]", roleId), e);
|
||||
roleActionListener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
private void executeGetRoleRequest(String role, ActionListener<GetResponse> listener) {
|
||||
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
|
||||
listener.onFailure(new IllegalStateException(
|
||||
"Security index is not on the current version - the native realm will not be operational until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () ->
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
|
||||
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME,
|
||||
ROLE_DOC_TYPE, getIdForUser(role)).request(),
|
||||
listener,
|
||||
client::get);
|
||||
} catch (IndexNotFoundException e) {
|
||||
logger.trace(
|
||||
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
|
||||
"unable to retrieve role [{}] since security index does not exist", role), e);
|
||||
listener.onResponse(new GetResponse(
|
||||
new GetResult(SecurityLifecycleService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE,
|
||||
getIdForUser(role), -1, false, null, null)));
|
||||
} catch (Exception e) {
|
||||
logger.error("unable to retrieve role", e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
client::get));
|
||||
}
|
||||
|
||||
private <Response> void clearRoleCache(final String role, ActionListener<Response> listener, Response response) {
|
||||
|
|
|
@ -16,6 +16,10 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -25,18 +29,22 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.xpack.template.TemplateUtils;
|
||||
import org.elasticsearch.xpack.upgrade.IndexUpgradeCheck;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -45,6 +53,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETT
|
|||
import static org.elasticsearch.xpack.ClientHelper.SECURITY_ORIGIN;
|
||||
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME;
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_TEMPLATE_NAME;
|
||||
|
||||
/**
|
||||
* Manages the lifecycle of a single index, its template, mapping and and data upgrades/migrations.
|
||||
|
@ -58,7 +67,6 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
Pattern.quote("${security.template.version}");
|
||||
|
||||
private final String indexName;
|
||||
private final String templateName;
|
||||
private final Client client;
|
||||
|
||||
private final List<BiConsumer<ClusterIndexHealth, ClusterIndexHealth>> indexHealthChangeListeners = new CopyOnWriteArrayList<>();
|
||||
|
@ -66,11 +74,10 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
|
||||
private volatile State indexState = new State(false, false, false, false, null);
|
||||
|
||||
public IndexLifecycleManager(Settings settings, Client client, String indexName, String templateName) {
|
||||
public IndexLifecycleManager(Settings settings, Client client, String indexName) {
|
||||
super(settings);
|
||||
this.client = client;
|
||||
this.indexName = indexName;
|
||||
this.templateName = templateName;
|
||||
}
|
||||
|
||||
public boolean checkMappingVersion(Predicate<Version> requiredVersion) {
|
||||
|
@ -95,8 +102,8 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
return this.indexState.indexAvailable;
|
||||
}
|
||||
|
||||
public boolean isWritable() {
|
||||
return this.indexState.canWriteToIndex;
|
||||
public boolean isMappingUpToDate() {
|
||||
return this.indexState.mappingUpToDate;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,12 +140,9 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
final boolean isIndexUpToDate = indexExists == false ||
|
||||
INDEX_FORMAT_SETTING.get(securityIndex.getSettings()).intValue() == INTERNAL_INDEX_FORMAT;
|
||||
final boolean indexAvailable = checkIndexAvailable(clusterState);
|
||||
final boolean templateIsUpToDate = TemplateUtils.checkTemplateExistsAndIsUpToDate(templateName,
|
||||
SECURITY_VERSION_STRING, clusterState, logger);
|
||||
final boolean mappingIsUpToDate = checkIndexMappingUpToDate(clusterState);
|
||||
final boolean canWriteToIndex = templateIsUpToDate && (mappingIsUpToDate || isIndexUpToDate);
|
||||
final boolean mappingIsUpToDate = indexExists == false || checkIndexMappingUpToDate(clusterState);
|
||||
final Version mappingVersion = oldestIndexMappingVersion(clusterState);
|
||||
this.indexState = new State(indexExists, isIndexUpToDate, indexAvailable, canWriteToIndex, mappingVersion);
|
||||
this.indexState = new State(indexExists, isIndexUpToDate, indexAvailable, mappingIsUpToDate, mappingVersion);
|
||||
}
|
||||
|
||||
private void checkIndexHealthChange(ClusterChangedEvent event) {
|
||||
|
@ -284,15 +288,23 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates the security index, if it does not already exist, then runs the given
|
||||
* action on the security index.
|
||||
* Prepares the index by creating it if it doesn't exist or updating the mappings if the mappings are
|
||||
* out of date. After any tasks have been executed, the runnable is then executed.
|
||||
*/
|
||||
public <T> void createIndexIfNeededThenExecute(final ActionListener<T> listener, final Runnable andThen) {
|
||||
if (this.indexState.indexExists) {
|
||||
andThen.run();
|
||||
} else {
|
||||
CreateIndexRequest request = new CreateIndexRequest(INTERNAL_SECURITY_INDEX);
|
||||
request.alias(new Alias(SECURITY_INDEX_NAME));
|
||||
public void prepareIndexIfNeededThenExecute(final Consumer<Exception> consumer, final Runnable andThen) {
|
||||
final State indexState = this.indexState; // use a local copy so all checks execute against the same state!
|
||||
// TODO we should improve this so we don't fire off a bunch of requests to do the same thing (create or update mappings)
|
||||
if (indexState.indexExists && indexState.isIndexUpToDate == false) {
|
||||
consumer.accept(new IllegalStateException(
|
||||
"Security index is not on the current version. Security features relying on the index will not be available until " +
|
||||
"the upgrade API is run on the security index"));
|
||||
} else if (indexState.indexExists == false) {
|
||||
Tuple<String, Settings> mappingAndSettings = loadMappingAndSettingsSourceFromTemplate();
|
||||
CreateIndexRequest request = new CreateIndexRequest(INTERNAL_SECURITY_INDEX)
|
||||
.alias(new Alias(SECURITY_INDEX_NAME))
|
||||
.mapping("doc", mappingAndSettings.v1(), XContentType.JSON)
|
||||
.waitForActiveShards(ActiveShardCount.ALL)
|
||||
.settings(mappingAndSettings.v2());
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request,
|
||||
new ActionListener<CreateIndexResponse>() {
|
||||
@Override
|
||||
|
@ -300,7 +312,7 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
if (createIndexResponse.isAcknowledged()) {
|
||||
andThen.run();
|
||||
} else {
|
||||
listener.onFailure(new ElasticsearchException("Failed to create security index"));
|
||||
consumer.accept(new ElasticsearchException("Failed to create security index"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -312,13 +324,33 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
// node hasn't yet received the cluster state update with the index
|
||||
andThen.run();
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
consumer.accept(e);
|
||||
}
|
||||
}
|
||||
}, client.admin().indices()::create);
|
||||
} else if (indexState.mappingUpToDate == false) {
|
||||
PutMappingRequest request = new PutMappingRequest(INTERNAL_SECURITY_INDEX)
|
||||
.source(loadMappingAndSettingsSourceFromTemplate().v1(), XContentType.JSON)
|
||||
.type("doc");
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request,
|
||||
ActionListener.<PutMappingResponse>wrap(putMappingResponse -> {
|
||||
if (putMappingResponse.isAcknowledged()) {
|
||||
andThen.run();
|
||||
} else {
|
||||
consumer.accept(new IllegalStateException("put mapping request was not acknowledged"));
|
||||
}
|
||||
}, consumer), client.admin().indices()::putMapping);
|
||||
} else {
|
||||
andThen.run();
|
||||
}
|
||||
}
|
||||
|
||||
private Tuple<String, Settings> loadMappingAndSettingsSourceFromTemplate() {
|
||||
final byte[] template = TemplateUtils.loadTemplate("/" + SECURITY_TEMPLATE_NAME + ".json",
|
||||
Version.CURRENT.toString(), IndexLifecycleManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8);
|
||||
PutIndexTemplateRequest request = new PutIndexTemplateRequest(SECURITY_TEMPLATE_NAME).source(template, XContentType.JSON);
|
||||
return new Tuple<>(request.mappings().get("doc"), request.settings());
|
||||
}
|
||||
/**
|
||||
* Holder class so we can update all values at once
|
||||
*/
|
||||
|
@ -326,15 +358,15 @@ public class IndexLifecycleManager extends AbstractComponent {
|
|||
private final boolean indexExists;
|
||||
private final boolean isIndexUpToDate;
|
||||
private final boolean indexAvailable;
|
||||
private final boolean canWriteToIndex;
|
||||
private final boolean mappingUpToDate;
|
||||
private final Version mappingVersion;
|
||||
|
||||
private State(boolean indexExists, boolean isIndexUpToDate, boolean indexAvailable,
|
||||
boolean canWriteToIndex, Version mappingVersion) {
|
||||
boolean mappingUpToDate, Version mappingVersion) {
|
||||
this.indexExists = indexExists;
|
||||
this.isIndexUpToDate = isIndexUpToDate;
|
||||
this.indexAvailable = indexAvailable;
|
||||
this.canWriteToIndex = canWriteToIndex;
|
||||
this.mappingUpToDate = mappingUpToDate;
|
||||
this.mappingVersion = mappingVersion;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.xpack.security.Security;
|
|||
import org.elasticsearch.xpack.security.action.user.GetUsersResponse;
|
||||
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
|
||||
import org.elasticsearch.xpack.security.client.SecurityClient;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -119,6 +120,11 @@ public class LicensingTests extends SecurityIntegTestCase {
|
|||
enableLicensing();
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanupSecurityIndex() {
|
||||
deleteSecurityIndex();
|
||||
}
|
||||
|
||||
public void testEnableDisableBehaviour() throws Exception {
|
||||
IndexResponse indexResponse = index("test", "type", jsonBuilder()
|
||||
.startObject()
|
||||
|
|
|
@ -58,8 +58,7 @@ import static org.elasticsearch.test.SecuritySettingsSource.TEST_PASSWORD_SECURE
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME;
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.securityIndexMappingAndTemplateSufficientToRead;
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.securityIndexMappingAndTemplateUpToDate;
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.securityIndexMappingSufficientToRead;
|
||||
import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.core.IsCollectionContaining.hasItem;
|
||||
|
@ -450,33 +449,9 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase {
|
|||
ClusterState clusterState = client.admin().cluster().prepareState().setLocal(true).get().getState();
|
||||
assertFalse(clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint().startObject();
|
||||
assertTrue("security index mapping and template not sufficient to read:\n" +
|
||||
assertTrue("security index mapping not sufficient to read:\n" +
|
||||
clusterState.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject().string(),
|
||||
securityIndexMappingAndTemplateSufficientToRead(clusterState, logger));
|
||||
Index securityIndex = resolveSecurityIndex(clusterState.metaData());
|
||||
if (securityIndex != null) {
|
||||
IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(securityIndex);
|
||||
if (indexRoutingTable != null) {
|
||||
assertTrue(indexRoutingTable.allPrimaryShardsActive());
|
||||
}
|
||||
}
|
||||
}, 30L, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
public void assertSecurityIndexWriteable() throws Exception {
|
||||
assertSecurityIndexWriteable(cluster());
|
||||
}
|
||||
|
||||
public void assertSecurityIndexWriteable(TestCluster testCluster) throws Exception {
|
||||
for (Client client : testCluster.getClients()) {
|
||||
assertBusy(() -> {
|
||||
ClusterState clusterState = client.admin().cluster().prepareState().setLocal(true).get().getState();
|
||||
assertFalse(clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK));
|
||||
XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint().startObject();
|
||||
assertTrue("security index mapping and template not up to date:\n" +
|
||||
clusterState.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject().string(),
|
||||
securityIndexMappingAndTemplateUpToDate(clusterState, logger));
|
||||
securityIndexMappingSufficientToRead(clusterState, logger));
|
||||
Index securityIndex = resolveSecurityIndex(clusterState.metaData());
|
||||
if (securityIndex != null) {
|
||||
IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(securityIndex);
|
||||
|
|
|
@ -18,6 +18,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.elasticsearch.xpack.XPackFeatureSet;
|
||||
import org.elasticsearch.license.XPackInfoResponse.FeatureSetsInfo.FeatureSet;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
|
@ -53,7 +54,7 @@ public class TransportXPackInfoActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportXPackInfoAction action = new TransportXPackInfoAction(Settings.EMPTY, mock(ThreadPool.class), transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), licenseService, featureSets);
|
||||
|
||||
|
|
|
@ -7,14 +7,13 @@ package org.elasticsearch.xpack.ml.action;
|
|||
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.AbstractStreamableTestCase;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEventTests;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -47,13 +46,16 @@ public class PostCalendarEventActionRequestTests extends AbstractStreamableTestC
|
|||
PostCalendarEventsAction.Request sourceRequest = createTestInstance();
|
||||
|
||||
StringBuilder requestString = new StringBuilder();
|
||||
requestString.append("{\"events\": [");
|
||||
for (ScheduledEvent event: sourceRequest.getScheduledEvents()) {
|
||||
requestString.append(Strings.toString(event)).append("\r\n");
|
||||
requestString.append(Strings.toString(event)).append(',');
|
||||
}
|
||||
requestString.replace(requestString.length() -1, requestString.length(), "]");
|
||||
requestString.append('}');
|
||||
|
||||
BytesArray data = new BytesArray(requestString.toString().getBytes(StandardCharsets.UTF_8), 0, requestString.length());
|
||||
XContentParser parser = createParser(XContentType.JSON.xContent(), requestString.toString());
|
||||
PostCalendarEventsAction.Request parsedRequest = PostCalendarEventsAction.Request.parseRequest(
|
||||
sourceRequest.getCalendarId(), data, XContentType.JSON);
|
||||
sourceRequest.getCalendarId(), parser);
|
||||
|
||||
assertEquals(sourceRequest, parsedRequest);
|
||||
}
|
||||
|
@ -62,15 +64,17 @@ public class PostCalendarEventActionRequestTests extends AbstractStreamableTestC
|
|||
PostCalendarEventsAction.Request sourceRequest = createTestInstance("foo");
|
||||
PostCalendarEventsAction.Request request = new PostCalendarEventsAction.Request("bar", sourceRequest.getScheduledEvents());
|
||||
|
||||
|
||||
StringBuilder requestString = new StringBuilder();
|
||||
requestString.append("{\"events\": [");
|
||||
for (ScheduledEvent event: sourceRequest.getScheduledEvents()) {
|
||||
requestString.append(Strings.toString(event)).append("\r\n");
|
||||
requestString.append(Strings.toString(event)).append(',');
|
||||
}
|
||||
requestString.replace(requestString.length() -1, requestString.length(), "]");
|
||||
requestString.append('}');
|
||||
|
||||
BytesArray data = new BytesArray(requestString.toString().getBytes(StandardCharsets.UTF_8), 0, requestString.length());
|
||||
XContentParser parser = createParser(XContentType.JSON.xContent(), requestString.toString());
|
||||
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> PostCalendarEventsAction.Request.parseRequest(request.getCalendarId(), data, XContentType.JSON));
|
||||
() -> PostCalendarEventsAction.Request.parseRequest("bar", parser));
|
||||
assertEquals("Inconsistent calendar_id; 'foo' specified in the body differs from 'bar' specified as a URL argument",
|
||||
e.getMessage());
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.xpack.ml.job.config.Job;
|
|||
import org.elasticsearch.xpack.ml.job.config.JobState;
|
||||
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
|
||||
import static org.elasticsearch.xpack.ml.action.OpenJobActionTests.addJobTask;
|
||||
|
@ -87,7 +88,7 @@ public class StartDatafeedActionTests extends ESTestCase {
|
|||
StartDatafeedAction.DatafeedParams params,
|
||||
DatafeedManager datafeedManager) {
|
||||
TransportStartDatafeedAction.DatafeedTask task = new TransportStartDatafeedAction.DatafeedTask(id, type, action, parentTaskId,
|
||||
params);
|
||||
params, Collections.emptyMap());
|
||||
task.datafeedManager = datafeedManager;
|
||||
return task;
|
||||
}
|
||||
|
|
|
@ -7,6 +7,8 @@ package org.elasticsearch.xpack.ml.action;
|
|||
|
||||
import org.elasticsearch.test.AbstractStreamableTestCase;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobUpdate;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilterTests;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -29,7 +31,11 @@ public class UpdateProcessActionRequestTests extends AbstractStreamableTestCase<
|
|||
updates.add(new JobUpdate.DetectorUpdate(randomInt(), randomAlphaOfLength(10), null));
|
||||
}
|
||||
}
|
||||
return new UpdateProcessAction.Request(randomAlphaOfLength(10), config, updates, randomBoolean());
|
||||
MlFilter filter = null;
|
||||
if (randomBoolean()) {
|
||||
filter = MlFilterTests.createTestFilter();
|
||||
}
|
||||
return new UpdateProcessAction.Request(randomAlphaOfLength(10), config, updates, filter, randomBoolean());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,25 +28,37 @@ import org.elasticsearch.xpack.ml.action.util.QueryPage;
|
|||
import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzerTests;
|
||||
import org.elasticsearch.xpack.ml.job.config.AnalysisConfig;
|
||||
import org.elasticsearch.xpack.ml.job.config.DataDescription;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.Detector;
|
||||
import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobState;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.config.RuleCondition;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.UpdateParams;
|
||||
import org.elasticsearch.xpack.ml.notifications.Auditor;
|
||||
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.xpack.ml.action.OpenJobActionTests.addJobTask;
|
||||
import static org.elasticsearch.xpack.ml.job.config.JobTests.buildJobBuilder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class JobManagerTests extends ESTestCase {
|
||||
|
@ -57,6 +69,7 @@ public class JobManagerTests extends ESTestCase {
|
|||
private ClusterService clusterService;
|
||||
private JobProvider jobProvider;
|
||||
private Auditor auditor;
|
||||
private UpdateJobProcessNotifier updateJobProcessNotifier;
|
||||
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
|
@ -67,6 +80,7 @@ public class JobManagerTests extends ESTestCase {
|
|||
clusterService = mock(ClusterService.class);
|
||||
jobProvider = mock(JobProvider.class);
|
||||
auditor = mock(Auditor.class);
|
||||
updateJobProcessNotifier = mock(UpdateJobProcessNotifier.class);
|
||||
}
|
||||
|
||||
public void testGetJobOrThrowIfUnknown_GivenUnknownJob() {
|
||||
|
@ -160,6 +174,98 @@ public class JobManagerTests extends ESTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
public void testUpdateProcessOnFilterChanged() {
|
||||
Detector.Builder detectorReferencingFilter = new Detector.Builder("count", null);
|
||||
detectorReferencingFilter.setByFieldName("foo");
|
||||
RuleCondition.createCategorical("foo", "foo_filter");
|
||||
DetectionRule filterRule = new DetectionRule.Builder(Collections.singletonList(
|
||||
RuleCondition.createCategorical("foo", "foo_filter"))).build();
|
||||
detectorReferencingFilter.setRules(Collections.singletonList(filterRule));
|
||||
AnalysisConfig.Builder filterAnalysisConfig = new AnalysisConfig.Builder(Collections.singletonList(
|
||||
detectorReferencingFilter.build()));
|
||||
|
||||
Job.Builder jobReferencingFilter1 = buildJobBuilder("job-referencing-filter-1");
|
||||
jobReferencingFilter1.setAnalysisConfig(filterAnalysisConfig);
|
||||
Job.Builder jobReferencingFilter2 = buildJobBuilder("job-referencing-filter-2");
|
||||
jobReferencingFilter2.setAnalysisConfig(filterAnalysisConfig);
|
||||
Job.Builder jobReferencingFilter3 = buildJobBuilder("job-referencing-filter-3");
|
||||
jobReferencingFilter3.setAnalysisConfig(filterAnalysisConfig);
|
||||
Job.Builder jobWithoutFilter = buildJobBuilder("job-without-filter");
|
||||
|
||||
MlMetadata.Builder mlMetadata = new MlMetadata.Builder();
|
||||
mlMetadata.putJob(jobReferencingFilter1.build(), false);
|
||||
mlMetadata.putJob(jobReferencingFilter2.build(), false);
|
||||
mlMetadata.putJob(jobReferencingFilter3.build(), false);
|
||||
mlMetadata.putJob(jobWithoutFilter.build(), false);
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask(jobReferencingFilter1.getId(), "node_id", JobState.OPENED, tasksBuilder);
|
||||
addJobTask(jobReferencingFilter2.getId(), "node_id", JobState.OPENED, tasksBuilder);
|
||||
addJobTask(jobWithoutFilter.getId(), "node_id", JobState.OPENED, tasksBuilder);
|
||||
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder()
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())
|
||||
.putCustom(MLMetadataField.TYPE, mlMetadata.build()))
|
||||
.build();
|
||||
when(clusterService.state()).thenReturn(clusterState);
|
||||
|
||||
JobManager jobManager = createJobManager();
|
||||
|
||||
MlFilter filter = new MlFilter("foo_filter", Arrays.asList("a", "b"));
|
||||
|
||||
jobManager.updateProcessOnFilterChanged(filter);
|
||||
|
||||
ArgumentCaptor<UpdateParams> updateParamsCaptor = ArgumentCaptor.forClass(UpdateParams.class);
|
||||
verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture());
|
||||
|
||||
List<UpdateParams> capturedUpdateParams = updateParamsCaptor.getAllValues();
|
||||
assertThat(capturedUpdateParams.size(), equalTo(2));
|
||||
assertThat(capturedUpdateParams.get(0).getJobId(), equalTo(jobReferencingFilter1.getId()));
|
||||
assertThat(capturedUpdateParams.get(0).getFilter(), equalTo(filter));
|
||||
assertThat(capturedUpdateParams.get(1).getJobId(), equalTo(jobReferencingFilter2.getId()));
|
||||
assertThat(capturedUpdateParams.get(1).getFilter(), equalTo(filter));
|
||||
}
|
||||
|
||||
public void testUpdateProcessOnCalendarChanged() {
|
||||
Job.Builder job1 = buildJobBuilder("job-1");
|
||||
Job.Builder job2 = buildJobBuilder("job-2");
|
||||
Job.Builder job3 = buildJobBuilder("job-3");
|
||||
Job.Builder job4 = buildJobBuilder("job-4");
|
||||
|
||||
MlMetadata.Builder mlMetadata = new MlMetadata.Builder();
|
||||
mlMetadata.putJob(job1.build(), false);
|
||||
mlMetadata.putJob(job2.build(), false);
|
||||
mlMetadata.putJob(job3.build(), false);
|
||||
mlMetadata.putJob(job4.build(), false);
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask(job1.getId(), "node_id", JobState.OPENED, tasksBuilder);
|
||||
addJobTask(job2.getId(), "node_id", JobState.OPENED, tasksBuilder);
|
||||
addJobTask(job3.getId(), "node_id", JobState.OPENED, tasksBuilder);
|
||||
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder()
|
||||
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())
|
||||
.putCustom(MLMetadataField.TYPE, mlMetadata.build()))
|
||||
.build();
|
||||
when(clusterService.state()).thenReturn(clusterState);
|
||||
|
||||
JobManager jobManager = createJobManager();
|
||||
|
||||
jobManager.updateProcessOnCalendarChanged(Arrays.asList("job-1", "job-3", "job-4"));
|
||||
|
||||
ArgumentCaptor<UpdateParams> updateParamsCaptor = ArgumentCaptor.forClass(UpdateParams.class);
|
||||
verify(updateJobProcessNotifier, times(2)).submitJobUpdate(updateParamsCaptor.capture());
|
||||
|
||||
List<UpdateParams> capturedUpdateParams = updateParamsCaptor.getAllValues();
|
||||
assertThat(capturedUpdateParams.size(), equalTo(2));
|
||||
assertThat(capturedUpdateParams.get(0).getJobId(), equalTo(job1.getId()));
|
||||
assertThat(capturedUpdateParams.get(0).isUpdateScheduledEvents(), is(true));
|
||||
assertThat(capturedUpdateParams.get(1).getJobId(), equalTo(job3.getId()));
|
||||
assertThat(capturedUpdateParams.get(1).isUpdateScheduledEvents(), is(true));
|
||||
}
|
||||
|
||||
private Job.Builder createJob() {
|
||||
Detector.Builder d1 = new Detector.Builder("info_content", "domain");
|
||||
d1.setOverFieldName("client");
|
||||
|
@ -176,8 +282,7 @@ public class JobManagerTests extends ESTestCase {
|
|||
ClusterSettings clusterSettings = new ClusterSettings(environment.settings(),
|
||||
Collections.singleton(MachineLearningClientActionPlugin.MAX_MODEL_MEMORY_LIMIT));
|
||||
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
|
||||
UpdateJobProcessNotifier notifier = mock(UpdateJobProcessNotifier.class);
|
||||
return new JobManager(environment, environment.settings(), jobProvider, clusterService, auditor, client, notifier);
|
||||
return new JobManager(environment, environment.settings(), jobProvider, clusterService, auditor, client, updateJobProcessNotifier);
|
||||
}
|
||||
|
||||
private ClusterState createClusterState() {
|
||||
|
|
|
@ -32,8 +32,6 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams;
|
|||
import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.params.TimeRange;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.InOrder;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
|
@ -96,38 +94,20 @@ public class AutodetectCommunicatorTests extends ESTestCase {
|
|||
List<RuleCondition> conditions = Collections.singletonList(
|
||||
RuleCondition.createCategorical("foo", "bar"));
|
||||
|
||||
DetectionRule updatedRule = new DetectionRule.Builder(conditions).build();
|
||||
List<JobUpdate.DetectorUpdate> detectorUpdates = Collections.singletonList(
|
||||
new JobUpdate.DetectorUpdate(0, "updated description",
|
||||
Collections.singletonList(new DetectionRule.Builder(conditions).build())));
|
||||
new JobUpdate.DetectorUpdate(0, "updated description", Collections.singletonList(updatedRule)));
|
||||
|
||||
UpdateParams updateParams = new UpdateParams(null, detectorUpdates, true);
|
||||
List<ScheduledEvent> events = Collections.singletonList(ScheduledEventTests.createScheduledEvent(randomAlphaOfLength(10)));
|
||||
UpdateParams updateParams = UpdateParams.builder("foo").detectorUpdates(detectorUpdates).build();
|
||||
List<ScheduledEvent> events = Collections.singletonList(
|
||||
ScheduledEventTests.createScheduledEvent(randomAlphaOfLength(10)));
|
||||
|
||||
communicator.writeUpdateProcessMessage(updateParams, events, ((aVoid, e) -> {}));
|
||||
|
||||
// There are 2 detectors both will be updated with the rule for the scheduled event.
|
||||
// The first has an additional update rule
|
||||
ArgumentCaptor<List> captor = ArgumentCaptor.forClass(List.class);
|
||||
InOrder inOrder = Mockito.inOrder(process);
|
||||
inOrder.verify(process).writeUpdateDetectorRulesMessage(eq(0), captor.capture());
|
||||
assertEquals(2, captor.getValue().size());
|
||||
inOrder.verify(process).writeUpdateDetectorRulesMessage(eq(1), captor.capture());
|
||||
assertEquals(1, captor.getValue().size());
|
||||
verify(process).writeUpdateDetectorRulesMessage(eq(0), eq(Collections.singletonList(updatedRule)));
|
||||
verify(process).writeUpdateScheduledEventsMessage(events, AnalysisConfig.Builder.DEFAULT_BUCKET_SPAN);
|
||||
verify(process).isProcessAlive();
|
||||
verifyNoMoreInteractions(process);
|
||||
|
||||
|
||||
// This time there is a single detector update and no scheduled events
|
||||
detectorUpdates = Collections.singletonList(
|
||||
new JobUpdate.DetectorUpdate(1, "updated description",
|
||||
Collections.singletonList(new DetectionRule.Builder(conditions).build())));
|
||||
updateParams = new UpdateParams(null, detectorUpdates, true);
|
||||
communicator.writeUpdateProcessMessage(updateParams, Collections.emptyList(), ((aVoid, e) -> {}));
|
||||
|
||||
inOrder = Mockito.inOrder(process);
|
||||
inOrder.verify(process).writeUpdateDetectorRulesMessage(eq(1), captor.capture());
|
||||
assertEquals(1, captor.getValue().size());
|
||||
verify(process, times(2)).isProcessAlive();
|
||||
}
|
||||
|
||||
public void testFlushJob() throws IOException {
|
||||
|
|
|
@ -475,7 +475,7 @@ public class AutodetectProcessManagerTests extends ESTestCase {
|
|||
verify(manager).setJobState(any(), eq(JobState.FAILED));
|
||||
}
|
||||
|
||||
public void testwriteUpdateProcessMessage() {
|
||||
public void testWriteUpdateProcessMessage() {
|
||||
AutodetectCommunicator communicator = mock(AutodetectCommunicator.class);
|
||||
AutodetectProcessManager manager = createManagerAndCallProcessData(communicator, "foo");
|
||||
ModelPlotConfig modelConfig = mock(ModelPlotConfig.class);
|
||||
|
@ -483,9 +483,9 @@ public class AutodetectProcessManagerTests extends ESTestCase {
|
|||
List<JobUpdate.DetectorUpdate> detectorUpdates = Collections.singletonList(new JobUpdate.DetectorUpdate(2, null, rules));
|
||||
JobTask jobTask = mock(JobTask.class);
|
||||
when(jobTask.getJobId()).thenReturn("foo");
|
||||
UpdateParams updateParams = new UpdateParams(modelConfig, detectorUpdates, false);
|
||||
UpdateParams updateParams = UpdateParams.builder("foo").modelPlotConfig(modelConfig).detectorUpdates(detectorUpdates).build();
|
||||
manager.writeUpdateProcessMessage(jobTask, updateParams, e -> {});
|
||||
verify(communicator).writeUpdateProcessMessage(same(updateParams), eq(Collections.emptyList()), any());
|
||||
verify(communicator).writeUpdateProcessMessage(same(updateParams), eq(null), any());
|
||||
}
|
||||
|
||||
public void testJobHasActiveAutodetectProcess() {
|
||||
|
|
|
@ -5,10 +5,13 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect.writer;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
import org.elasticsearch.xpack.ml.job.config.Condition;
|
||||
import org.elasticsearch.xpack.ml.job.config.Connective;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
import org.elasticsearch.xpack.ml.job.config.Operator;
|
||||
import org.elasticsearch.xpack.ml.job.config.RuleCondition;
|
||||
|
@ -21,6 +24,7 @@ import org.mockito.InOrder;
|
|||
import org.mockito.Mockito;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -205,6 +209,66 @@ public class ControlMsgToProcessWriterTests extends ESTestCase {
|
|||
verifyNoMoreInteractions(lengthEncodedWriter);
|
||||
}
|
||||
|
||||
public void testWriteUpdateFiltersMessage() throws IOException {
|
||||
ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(lengthEncodedWriter, 2);
|
||||
|
||||
MlFilter filter1 = new MlFilter("filter_1", Arrays.asList("a"));
|
||||
MlFilter filter2 = new MlFilter("filter_2", Arrays.asList("b", "c"));
|
||||
|
||||
writer.writeUpdateFiltersMessage(Arrays.asList(filter1, filter2));
|
||||
|
||||
InOrder inOrder = inOrder(lengthEncodedWriter);
|
||||
inOrder.verify(lengthEncodedWriter).writeNumFields(2);
|
||||
inOrder.verify(lengthEncodedWriter, times(1)).writeField("");
|
||||
inOrder.verify(lengthEncodedWriter).writeField("u[filters]\nfilter.filter_1 = [\"a\"]\nfilter.filter_2 = [\"b\",\"c\"]\n");
|
||||
verifyNoMoreInteractions(lengthEncodedWriter);
|
||||
}
|
||||
|
||||
public void testWriteUpdateScheduledEventsMessage() throws IOException {
|
||||
ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(lengthEncodedWriter, 2);
|
||||
|
||||
ScheduledEvent.Builder event1 = new ScheduledEvent.Builder();
|
||||
event1.calendarId("moon");
|
||||
event1.description("new year");
|
||||
event1.startTime(ZonedDateTime.parse("2018-01-01T00:00:00Z"));
|
||||
event1.endTime(ZonedDateTime.parse("2018-01-02T00:00:00Z"));
|
||||
|
||||
ScheduledEvent.Builder event2 = new ScheduledEvent.Builder();
|
||||
event2.calendarId("moon");
|
||||
event2.description("Jan maintenance day");
|
||||
event2.startTime(ZonedDateTime.parse("2018-01-06T00:00:00Z"));
|
||||
event2.endTime(ZonedDateTime.parse("2018-01-07T00:00:00Z"));
|
||||
|
||||
writer.writeUpdateScheduledEventsMessage(Arrays.asList(event1.build(), event2.build()), TimeValue.timeValueHours(1));
|
||||
|
||||
InOrder inOrder = inOrder(lengthEncodedWriter);
|
||||
inOrder.verify(lengthEncodedWriter).writeNumFields(2);
|
||||
inOrder.verify(lengthEncodedWriter, times(1)).writeField("");
|
||||
inOrder.verify(lengthEncodedWriter).writeField("u[scheduledEvents]\n"
|
||||
+ "scheduledevent.0.description = new year\n"
|
||||
+ "scheduledevent.0.rules = [{\"actions\":[\"filter_results\",\"skip_sampling\"],\"conditions_connective\":\"and\","
|
||||
+ "\"conditions\":[{\"type\":\"time\",\"condition\":{\"operator\":\"gte\",\"value\":\"1514764800\"}},"
|
||||
+ "{\"type\":\"time\",\"condition\":{\"operator\":\"lt\",\"value\":\"1514851200\"}}]}]\n"
|
||||
+ "scheduledevent.1.description = Jan maintenance day\n"
|
||||
+ "scheduledevent.1.rules = [{\"actions\":[\"filter_results\",\"skip_sampling\"],\"conditions_connective\":\"and\","
|
||||
+ "\"conditions\":[{\"type\":\"time\",\"condition\":{\"operator\":\"gte\",\"value\":\"1515196800\"}},"
|
||||
+ "{\"type\":\"time\",\"condition\":{\"operator\":\"lt\",\"value\":\"1515283200\"}}]}]\n"
|
||||
);
|
||||
verifyNoMoreInteractions(lengthEncodedWriter);
|
||||
}
|
||||
|
||||
public void testWriteUpdateScheduledEventsMessage_GivenEmpty() throws IOException {
|
||||
ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(lengthEncodedWriter, 2);
|
||||
|
||||
writer.writeUpdateScheduledEventsMessage(Collections.emptyList(), TimeValue.timeValueHours(1));
|
||||
|
||||
InOrder inOrder = inOrder(lengthEncodedWriter);
|
||||
inOrder.verify(lengthEncodedWriter).writeNumFields(2);
|
||||
inOrder.verify(lengthEncodedWriter, times(1)).writeField("");
|
||||
inOrder.verify(lengthEncodedWriter).writeField("u[scheduledEvents]\nclear = true\n");
|
||||
verifyNoMoreInteractions(lengthEncodedWriter);
|
||||
}
|
||||
|
||||
private static List<RuleCondition> createRule(String value) {
|
||||
Condition condition = new Condition(Operator.GT, value);
|
||||
return Collections.singletonList(RuleCondition.createNumerical(RuleConditionType.NUMERICAL_ACTUAL, null, null, condition));
|
||||
|
|
|
@ -228,9 +228,9 @@ public class FieldConfigWriterTests extends ESTestCase {
|
|||
|
||||
createFieldConfigWriter().write();
|
||||
|
||||
verify(writer).write("detector.0.clause = count\n" +
|
||||
"filter.filter_1 = [\"a\",\"b\"]\n" +
|
||||
"filter.filter_2 = [\"c\",\"d\"]\n");
|
||||
verify(writer).write("filter.filter_1 = [\"a\",\"b\"]\n" +
|
||||
"filter.filter_2 = [\"c\",\"d\"]\n" +
|
||||
"detector.0.clause = count\n");
|
||||
verifyNoMoreInteractions(writer);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect.writer;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class MlFilterWriterTests extends ESTestCase {
|
||||
|
||||
public void testWrite_GivenEmpty() throws IOException {
|
||||
StringBuilder buffer = new StringBuilder();
|
||||
new MlFilterWriter(Collections.emptyList(), buffer).write();
|
||||
|
||||
assertThat(buffer.toString().isEmpty(), is(true));
|
||||
}
|
||||
|
||||
public void testWrite() throws IOException {
|
||||
List<MlFilter> filters = new ArrayList<>();
|
||||
filters.add(new MlFilter("filter_1", Arrays.asList("a", "b")));
|
||||
filters.add(new MlFilter("filter_2", Arrays.asList("c", "d")));
|
||||
|
||||
StringBuilder buffer = new StringBuilder();
|
||||
new MlFilterWriter(filters, buffer).write();
|
||||
|
||||
assertThat(buffer.toString(), equalTo("filter.filter_1 = [\"a\",\"b\"]\nfilter.filter_2 = [\"c\",\"d\"]\n"));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect.writer;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.ml.calendars.ScheduledEvent;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class ScheduledEventsWriterTests extends ESTestCase {
|
||||
|
||||
public void testWrite_GivenEmpty() throws IOException {
|
||||
StringBuilder buffer = new StringBuilder();
|
||||
new ScheduledEventsWriter(Collections.emptyList(), TimeValue.timeValueHours(1), buffer).write();
|
||||
|
||||
assertThat(buffer.toString().isEmpty(), is(true));
|
||||
}
|
||||
|
||||
public void testWrite() throws IOException {
|
||||
List<ScheduledEvent> events = new ArrayList<>();
|
||||
events.add(new ScheduledEvent.Builder().description("Black Friday")
|
||||
.startTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(1511395200000L), ZoneOffset.UTC))
|
||||
.endTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(1515369600000L), ZoneOffset.UTC))
|
||||
.calendarId("calendar_id").build());
|
||||
events.add(new ScheduledEvent.Builder().description("Blue Monday")
|
||||
.startTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(1519603200000L), ZoneOffset.UTC))
|
||||
.endTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(1519862400000L), ZoneOffset.UTC))
|
||||
.calendarId("calendar_id").build());
|
||||
|
||||
StringBuilder buffer = new StringBuilder();
|
||||
new ScheduledEventsWriter(events, TimeValue.timeValueHours(1), buffer).write();
|
||||
|
||||
String expectedString = "scheduledevent.0.description = Black Friday\n" +
|
||||
"scheduledevent.0.rules = [{\"actions\":[\"filter_results\",\"skip_sampling\"],\"conditions_connective\":\"and\"," +
|
||||
"\"conditions\":[{\"type\":\"time\",\"condition\":{\"operator\":\"gte\",\"value\":\"1511395200\"}}," +
|
||||
"{\"type\":\"time\",\"condition\":{\"operator\":\"lt\",\"value\":\"1515369600\"}}]}]\n" +
|
||||
"scheduledevent.1.description = Blue Monday\n" +
|
||||
"scheduledevent.1.rules = [{\"actions\":[\"filter_results\",\"skip_sampling\"],\"conditions_connective\":\"and\"," +
|
||||
"\"conditions\":[{\"type\":\"time\",\"condition\":{\"operator\":\"gte\",\"value\":\"1519603200\"}}," +
|
||||
"{\"type\":\"time\",\"condition\":{\"operator\":\"lt\",\"value\":\"1519862400\"}}]}]" +
|
||||
"\n";
|
||||
assertThat(buffer.toString(), equalTo(expectedString));
|
||||
}
|
||||
}
|
|
@ -21,11 +21,14 @@ import org.elasticsearch.tasks.Task;
|
|||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.Assignment;
|
||||
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.PersistentTask;
|
||||
import org.elasticsearch.xpack.persistent.TestPersistentTasksPlugin.TestParams;
|
||||
import org.elasticsearch.xpack.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -47,6 +50,23 @@ import static org.mockito.Mockito.when;
|
|||
|
||||
public class PersistentTasksNodeServiceTests extends ESTestCase {
|
||||
|
||||
private ThreadPool threadPool;
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
threadPool = new TestThreadPool(getClass().getName());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
terminate(threadPool);
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
private ClusterState createInitialClusterState(int nonLocalNodesCount, Settings settings) {
|
||||
ClusterState.Builder state = ClusterState.builder(new ClusterName("PersistentActionExecutorTests"));
|
||||
state.metaData(MetaData.builder().generateClusterUuidIfNeeded());
|
||||
|
@ -70,14 +90,14 @@ public class PersistentTasksNodeServiceTests extends ESTestCase {
|
|||
// need to account for 5 original tasks on each node and their relocations
|
||||
for (int i = 0; i < (nonLocalNodesCount + 1) * 10; i++) {
|
||||
TaskId parentId = new TaskId("cluster", i);
|
||||
when(action.createTask(anyLong(), anyString(), anyString(), eq(parentId), any())).thenReturn(
|
||||
new TestPersistentTasksPlugin.TestTask(i, "persistent", "test", "", parentId));
|
||||
when(action.createTask(anyLong(), anyString(), anyString(), eq(parentId), any(), any())).thenReturn(
|
||||
new TestPersistentTasksPlugin.TestTask(i, "persistent", "test", "", parentId, Collections.emptyMap()));
|
||||
}
|
||||
PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, Collections.singletonList(action));
|
||||
|
||||
MockExecutor executor = new MockExecutor();
|
||||
PersistentTasksNodeService coordinator = new PersistentTasksNodeService(Settings.EMPTY, persistentTasksService,
|
||||
registry, new TaskManager(Settings.EMPTY), executor);
|
||||
registry, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), executor);
|
||||
|
||||
ClusterState state = createInitialClusterState(nonLocalNodesCount, Settings.EMPTY);
|
||||
|
||||
|
@ -161,13 +181,14 @@ public class PersistentTasksNodeServiceTests extends ESTestCase {
|
|||
when(action.getExecutor()).thenReturn(ThreadPool.Names.SAME);
|
||||
when(action.getTaskName()).thenReturn(TestPersistentTasksExecutor.NAME);
|
||||
TaskId parentId = new TaskId("cluster", 1);
|
||||
AllocatedPersistentTask nodeTask = new TestPersistentTasksPlugin.TestTask(0, "persistent", "test", "", parentId);
|
||||
when(action.createTask(anyLong(), anyString(), anyString(), eq(parentId), any())).thenReturn(nodeTask);
|
||||
AllocatedPersistentTask nodeTask =
|
||||
new TestPersistentTasksPlugin.TestTask(0, "persistent", "test", "", parentId, Collections.emptyMap());
|
||||
when(action.createTask(anyLong(), anyString(), anyString(), eq(parentId), any(), any())).thenReturn(nodeTask);
|
||||
PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, Collections.singletonList(action));
|
||||
|
||||
MockExecutor executor = new MockExecutor();
|
||||
PersistentTasksNodeService coordinator = new PersistentTasksNodeService(Settings.EMPTY, persistentTasksService,
|
||||
registry, new TaskManager(Settings.EMPTY), executor);
|
||||
registry, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), executor);
|
||||
|
||||
ClusterState state = createInitialClusterState(1, Settings.EMPTY);
|
||||
|
||||
|
@ -209,13 +230,14 @@ public class PersistentTasksNodeServiceTests extends ESTestCase {
|
|||
@SuppressWarnings("unchecked") PersistentTasksExecutor<TestParams> action = mock(PersistentTasksExecutor.class);
|
||||
when(action.getExecutor()).thenReturn(ThreadPool.Names.SAME);
|
||||
when(action.getTaskName()).thenReturn("test");
|
||||
when(action.createTask(anyLong(), anyString(), anyString(), any(), any()))
|
||||
.thenReturn(new TestPersistentTasksPlugin.TestTask(1, "persistent", "test", "", new TaskId("cluster", 1)));
|
||||
when(action.createTask(anyLong(), anyString(), anyString(), any(), any(), any()))
|
||||
.thenReturn(new TestPersistentTasksPlugin.TestTask(1, "persistent", "test", "", new TaskId("cluster", 1),
|
||||
Collections.emptyMap()));
|
||||
PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, Collections.singletonList(action));
|
||||
|
||||
int nonLocalNodesCount = randomInt(10);
|
||||
MockExecutor executor = new MockExecutor();
|
||||
TaskManager taskManager = new TaskManager(Settings.EMPTY);
|
||||
TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet());
|
||||
PersistentTasksNodeService coordinator = new PersistentTasksNodeService(Settings.EMPTY, persistentTasksService,
|
||||
registry, taskManager, executor);
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -370,8 +371,8 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin {
|
|||
|
||||
@Override
|
||||
protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId,
|
||||
PersistentTask<TestParams> task) {
|
||||
return new TestTask(id, type, action, getDescription(task), parentTaskId);
|
||||
PersistentTask<TestParams> task, Map<String, String> headers) {
|
||||
return new TestTask(id, type, action, getDescription(task), parentTaskId, headers);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -399,8 +400,8 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin {
|
|||
public static class TestTask extends AllocatedPersistentTask {
|
||||
private volatile String operation;
|
||||
|
||||
public TestTask(long id, String type, String action, String description, TaskId parentTask) {
|
||||
super(id, type, action, description, parentTask);
|
||||
public TestTask(long id, String type, String action, String description, TaskId parentTask, Map<String, String> headers) {
|
||||
super(id, type, action, description, parentTask, headers);
|
||||
}
|
||||
|
||||
public String getOperation() {
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_TEMPLATE_NAME;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
||||
/**
|
||||
* A base {@link ESClientYamlSuiteTestCase} test class for the security module,
|
||||
* which depends on security template and mappings being up to date before any writes
|
||||
* to the {@code .security} index can take place.
|
||||
*/
|
||||
public abstract class SecurityClusterClientYamlTestCase extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public SecurityClusterClientYamlTestCase(ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void waitForSecuritySetup() throws Exception {
|
||||
waitForSecurity();
|
||||
}
|
||||
|
||||
public static void waitForSecurity() throws Exception {
|
||||
String masterNode = null;
|
||||
HttpEntity entity = client().performRequest("GET", "/_cat/nodes?h=id,master").getEntity();
|
||||
String catNodesResponse = EntityUtils.toString(entity, StandardCharsets.UTF_8);
|
||||
for (String line : catNodesResponse.split("\n")) {
|
||||
int indexOfStar = line.indexOf('*'); // * in the node's output denotes it is master
|
||||
if (indexOfStar != -1) {
|
||||
masterNode = line.substring(0, indexOfStar).trim();
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertNotNull(masterNode);
|
||||
final String masterNodeId = masterNode;
|
||||
|
||||
assertBusy(() -> {
|
||||
try {
|
||||
Response nodesResponse = client().performRequest("GET", "/_nodes");
|
||||
ObjectPath nodesPath = ObjectPath.createFromResponse(nodesResponse);
|
||||
Map<String, Object> nodes = nodesPath.evaluate("nodes");
|
||||
Version masterVersion = null;
|
||||
for (String nodeId : nodes.keySet()) {
|
||||
// get the ES version number master is on
|
||||
if (nodeId.startsWith(masterNodeId)) {
|
||||
masterVersion = Version.fromString(nodesPath.evaluate("nodes." + nodeId + ".version"));
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertNotNull(masterVersion);
|
||||
|
||||
Response response = client().performRequest("GET", "/_cluster/state/metadata");
|
||||
ObjectPath objectPath = ObjectPath.createFromResponse(response);
|
||||
String mappingsPath = "metadata.templates." + SECURITY_TEMPLATE_NAME + ".mappings";
|
||||
Map<String, Object> mappings = objectPath.evaluate(mappingsPath);
|
||||
assertNotNull(mappings);
|
||||
assertThat(mappings.size(), greaterThanOrEqualTo(1));
|
||||
for (String key : mappings.keySet()) {
|
||||
String templatePath = mappingsPath + "." + key + "._meta.security-version";
|
||||
Version templateVersion = Version.fromString(objectPath.evaluate(templatePath));
|
||||
assertEquals(masterVersion, templateVersion);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new AssertionError("failed to get cluster state", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -44,7 +44,7 @@ import org.junit.Before;
|
|||
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME;
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_TEMPLATE_NAME;
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.securityIndexMappingAndTemplateUpToDate;
|
||||
import static org.elasticsearch.xpack.security.SecurityLifecycleService.securityIndexMappingUpToDate;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
@ -135,7 +135,7 @@ public class SecurityLifecycleServiceTests extends ESTestCase {
|
|||
ClusterState.Builder clusterStateBuilder = createClusterStateWithMappingAndTemplate(templateString);
|
||||
final ClusterState clusterState = clusterStateBuilder.build();
|
||||
IllegalStateException exception = expectThrows(IllegalStateException.class,
|
||||
() -> securityIndexMappingAndTemplateUpToDate(clusterState, logger));
|
||||
() -> securityIndexMappingUpToDate(clusterState, logger));
|
||||
assertEquals("Cannot read security-version string in index " + SECURITY_INDEX_NAME,
|
||||
exception.getMessage());
|
||||
}
|
||||
|
|
|
@ -141,34 +141,13 @@ public class SecuritySettingsTests extends ESTestCase {
|
|||
public void testValidAutoCreateIndex() {
|
||||
Security.validateAutoCreateIndex(Settings.EMPTY);
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", true).build());
|
||||
|
||||
try {
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", false).build());
|
||||
fail("IllegalArgumentException expected");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString(SecurityLifecycleService.SECURITY_INDEX_NAME));
|
||||
assertThat(e.getMessage(), not(containsString(IndexAuditTrail.INDEX_NAME_PREFIX)));
|
||||
}
|
||||
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", false).build());
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security,.security-6").build());
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security*").build());
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", "*s*").build());
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".s*").build());
|
||||
|
||||
try {
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", "foo").build());
|
||||
fail("IllegalArgumentException expected");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString(SecurityLifecycleService.SECURITY_INDEX_NAME));
|
||||
assertThat(e.getMessage(), not(containsString(IndexAuditTrail.INDEX_NAME_PREFIX)));
|
||||
}
|
||||
|
||||
try {
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security_audit_log*").build());
|
||||
fail("IllegalArgumentException expected");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString(SecurityLifecycleService.SECURITY_INDEX_NAME));
|
||||
}
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", "foo").build());
|
||||
Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security_audit_log*").build());
|
||||
|
||||
Security.validateAutoCreateIndex(Settings.builder()
|
||||
.put("action.auto_create_index", ".security,.security-6")
|
||||
|
@ -183,7 +162,6 @@ public class SecuritySettingsTests extends ESTestCase {
|
|||
.build());
|
||||
fail("IllegalArgumentException expected");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString(SecurityLifecycleService.SECURITY_INDEX_NAME));
|
||||
assertThat(e.getMessage(), containsString(IndexAuditTrail.INDEX_NAME_PREFIX));
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ import org.mockito.invocation.InvocationOnMock;
|
|||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -41,7 +42,7 @@ public class TransportDeleteRoleActionTests extends ESTestCase {
|
|||
final String roleName = randomFrom(new ArrayList<>(ReservedRolesStore.names()));
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
(x) -> null, null);
|
||||
(x) -> null, null, Collections.emptySet());
|
||||
TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService);
|
||||
|
||||
|
@ -72,7 +73,7 @@ public class TransportDeleteRoleActionTests extends ESTestCase {
|
|||
final String roleName = randomFrom("admin", "dept_a", "restricted");
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
(x) -> null, null);
|
||||
(x) -> null, null, Collections.emptySet());
|
||||
TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService);
|
||||
|
||||
|
@ -116,7 +117,7 @@ public class TransportDeleteRoleActionTests extends ESTestCase {
|
|||
final String roleName = randomFrom("admin", "dept_a", "restricted");
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
(x) -> null, null);
|
||||
(x) -> null, null, Collections.emptySet());
|
||||
TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService);
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ public class TransportGetRolesActionTests extends ESTestCase {
|
|||
public void testReservedRoles() {
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService, new ReservedRolesStore());
|
||||
|
||||
|
@ -88,7 +88,7 @@ public class TransportGetRolesActionTests extends ESTestCase {
|
|||
final List<RoleDescriptor> storeRoleDescriptors = randomRoleDescriptors();
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService, new ReservedRolesStore());
|
||||
|
||||
|
@ -140,7 +140,7 @@ public class TransportGetRolesActionTests extends ESTestCase {
|
|||
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService, new ReservedRolesStore());
|
||||
|
||||
|
@ -204,7 +204,7 @@ public class TransportGetRolesActionTests extends ESTestCase {
|
|||
final List<RoleDescriptor> storeRoleDescriptors = randomRoleDescriptors();
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService, new ReservedRolesStore());
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ import org.mockito.invocation.InvocationOnMock;
|
|||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -42,7 +43,7 @@ public class TransportPutRoleActionTests extends ESTestCase {
|
|||
final String roleName = randomFrom(new ArrayList<>(ReservedRolesStore.names()));
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService);
|
||||
|
||||
|
@ -73,7 +74,7 @@ public class TransportPutRoleActionTests extends ESTestCase {
|
|||
final String roleName = randomFrom("admin", "dept_a", "restricted");
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService);
|
||||
|
||||
|
@ -117,7 +118,7 @@ public class TransportPutRoleActionTests extends ESTestCase {
|
|||
final String roleName = randomFrom("admin", "dept_a", "restricted");
|
||||
NativeRolesStore rolesStore = mock(NativeRolesStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), rolesStore, transportService);
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public class TransportGetRoleMappingsActionTests extends ESTestCase {
|
|||
public void setupMocks() {
|
||||
store = mock(NativeRoleMappingStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null);
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet());
|
||||
action = new TransportGetRoleMappingsAction(Settings.EMPTY, mock(ThreadPool.class),
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class),
|
||||
transportService, store);
|
||||
|
|
|
@ -40,7 +40,7 @@ public class TransportPutRoleMappingActionTests extends ESTestCase {
|
|||
public void setupMocks() {
|
||||
store = mock(NativeRoleMappingStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null);
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet());
|
||||
action = new TransportPutRoleMappingAction(Settings.EMPTY, mock(ThreadPool.class),
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class),
|
||||
transportService, store);
|
||||
|
|
|
@ -20,6 +20,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -36,7 +37,7 @@ public class TransportAuthenticateActionTests extends ESTestCase {
|
|||
SecurityContext securityContext = mock(SecurityContext.class);
|
||||
when(securityContext.getUser()).thenReturn(randomFrom(SystemUser.INSTANCE, XPackUser.INSTANCE));
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, mock(ThreadPool.class), transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), securityContext);
|
||||
|
||||
|
@ -62,7 +63,7 @@ public class TransportAuthenticateActionTests extends ESTestCase {
|
|||
public void testNullUser() {
|
||||
SecurityContext securityContext = mock(SecurityContext.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, mock(ThreadPool.class), transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), securityContext);
|
||||
|
||||
|
@ -90,7 +91,7 @@ public class TransportAuthenticateActionTests extends ESTestCase {
|
|||
SecurityContext securityContext = mock(SecurityContext.class);
|
||||
when(securityContext.getUser()).thenReturn(user);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, mock(ThreadPool.class), transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), securityContext);
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.xpack.security.user.XPackUser;
|
|||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -48,7 +49,7 @@ public class TransportChangePasswordActionTests extends ESTestCase {
|
|||
AnonymousUser anonymousUser = new AnonymousUser(settings);
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportChangePasswordAction action = new TransportChangePasswordAction(settings, mock(ThreadPool.class), transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore);
|
||||
|
||||
|
@ -79,7 +80,7 @@ public class TransportChangePasswordActionTests extends ESTestCase {
|
|||
public void testInternalUsers() {
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportChangePasswordAction action = new TransportChangePasswordAction(Settings.EMPTY, mock(ThreadPool.class), transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore);
|
||||
|
||||
|
@ -121,7 +122,7 @@ public class TransportChangePasswordActionTests extends ESTestCase {
|
|||
return null;
|
||||
}).when(usersStore).changePassword(eq(request), any(ActionListener.class));
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportChangePasswordAction action = new TransportChangePasswordAction(Settings.EMPTY, mock(ThreadPool.class), transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore);
|
||||
|
||||
|
@ -162,7 +163,7 @@ public class TransportChangePasswordActionTests extends ESTestCase {
|
|||
}
|
||||
}).when(usersStore).changePassword(eq(request), any(ActionListener.class));
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportChangePasswordAction action = new TransportChangePasswordAction(Settings.EMPTY, mock(ThreadPool.class), transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore);
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.xpack.security.user.XPackUser;
|
|||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -45,7 +46,7 @@ public class TransportDeleteUserActionTests extends ESTestCase {
|
|||
Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "superuser").build();
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportDeleteUserAction action = new TransportDeleteUserAction(settings, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
@ -74,7 +75,7 @@ public class TransportDeleteUserActionTests extends ESTestCase {
|
|||
public void testInternalUser() {
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
@ -104,7 +105,7 @@ public class TransportDeleteUserActionTests extends ESTestCase {
|
|||
final User reserved = randomFrom(new ElasticUser(true), new KibanaUser(true));
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
@ -134,7 +135,7 @@ public class TransportDeleteUserActionTests extends ESTestCase {
|
|||
final User user = new User("joe");
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
@ -175,7 +176,7 @@ public class TransportDeleteUserActionTests extends ESTestCase {
|
|||
final User user = new User("joe");
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TransportGetUsersActionTests extends ESTestCase {
|
|||
ReservedRealm reservedRealm =
|
||||
new ReservedRealm(mock(Environment.class), settings, usersStore, anonymousUser, securityLifecycleService, new ThreadContext(Settings.EMPTY));
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService, reservedRealm);
|
||||
|
||||
|
@ -115,7 +115,7 @@ public class TransportGetUsersActionTests extends ESTestCase {
|
|||
public void testInternalUser() {
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService, mock(ReservedRealm.class));
|
||||
|
||||
|
@ -158,7 +158,7 @@ public class TransportGetUsersActionTests extends ESTestCase {
|
|||
final List<User> reservedUsers = randomSubsetOf(size, allReservedUsers);
|
||||
final List<String> names = reservedUsers.stream().map(User::principal).collect(Collectors.toList());
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService, reservedRealm);
|
||||
|
||||
|
@ -198,7 +198,7 @@ public class TransportGetUsersActionTests extends ESTestCase {
|
|||
ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings),
|
||||
securityLifecycleService, new ThreadContext(Settings.EMPTY));
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService, reservedRealm);
|
||||
|
||||
|
@ -245,7 +245,7 @@ public class TransportGetUsersActionTests extends ESTestCase {
|
|||
final String[] storeUsernames = storeUsers.stream().map(User::principal).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY);
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService, mock(ReservedRealm.class));
|
||||
|
||||
|
@ -293,7 +293,7 @@ public class TransportGetUsersActionTests extends ESTestCase {
|
|||
final String[] storeUsernames = storeUsers.stream().map(User::principal).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY);
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService, mock(ReservedRealm.class));
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ public class TransportHasPrivilegesActionTests extends ESTestCase {
|
|||
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
|
||||
final TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService
|
||||
.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
|
||||
final Authentication authentication = mock(Authentication.class);
|
||||
threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication);
|
||||
|
|
|
@ -56,7 +56,7 @@ public class TransportPutUserActionTests extends ESTestCase {
|
|||
final AnonymousUser anonymousUser = new AnonymousUser(settings);
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportPutUserAction action = new TransportPutUserAction(settings, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
@ -86,7 +86,7 @@ public class TransportPutUserActionTests extends ESTestCase {
|
|||
public void testSystemUser() {
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
@ -125,7 +125,7 @@ public class TransportPutUserActionTests extends ESTestCase {
|
|||
reservedRealm.users(userFuture);
|
||||
final User reserved = randomFrom(userFuture.actionGet().toArray(new User[0]));
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
@ -155,7 +155,7 @@ public class TransportPutUserActionTests extends ESTestCase {
|
|||
final User user = new User("joe");
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
@ -201,7 +201,7 @@ public class TransportPutUserActionTests extends ESTestCase {
|
|||
final User user = new User("joe");
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore, transportService);
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.xpack.security.user.XPackUser;
|
|||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -60,7 +61,7 @@ public class TransportSetEnabledActionTests extends ESTestCase {
|
|||
when(authentication.getUser()).thenReturn(user);
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportSetEnabledAction action = new TransportSetEnabledAction(settings, threadPool, transportService, mock(ActionFilters.class),
|
||||
mock(IndexNameExpressionResolver.class), usersStore);
|
||||
|
||||
|
@ -98,7 +99,7 @@ public class TransportSetEnabledActionTests extends ESTestCase {
|
|||
when(authentication.getUser()).thenReturn(user);
|
||||
NativeUsersStore usersStore = mock(NativeUsersStore.class);
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore);
|
||||
|
||||
|
@ -152,7 +153,7 @@ public class TransportSetEnabledActionTests extends ESTestCase {
|
|||
}).when(usersStore)
|
||||
.setEnabled(eq(user.principal()), eq(request.enabled()), eq(request.getRefreshPolicy()), any(ActionListener.class));
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore);
|
||||
|
||||
|
@ -204,7 +205,7 @@ public class TransportSetEnabledActionTests extends ESTestCase {
|
|||
}).when(usersStore)
|
||||
.setEnabled(eq(user.principal()), eq(request.enabled()), eq(request.getRefreshPolicy()), any(ActionListener.class));
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore);
|
||||
|
||||
|
@ -244,7 +245,7 @@ public class TransportSetEnabledActionTests extends ESTestCase {
|
|||
request.enabled(randomBoolean());
|
||||
request.setRefreshPolicy(randomFrom(RefreshPolicy.values()));
|
||||
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
|
||||
x -> null, null);
|
||||
x -> null, null, Collections.emptySet());
|
||||
TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService,
|
||||
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore);
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.settings.MockSecureSettings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
|
@ -43,6 +44,7 @@ import org.elasticsearch.transport.TransportRequest;
|
|||
import org.elasticsearch.xpack.XPackPlugin;
|
||||
import org.elasticsearch.xpack.XPackSettings;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
import org.elasticsearch.xpack.security.SecurityLifecycleService;
|
||||
import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.Message;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationToken;
|
||||
import org.elasticsearch.xpack.security.transport.filter.IPFilter;
|
||||
|
@ -243,13 +245,13 @@ public class IndexAuditTrailTests extends SecurityIntegTestCase {
|
|||
}
|
||||
|
||||
if (remoteCluster != null) {
|
||||
remoteCluster.wipe(Collections.<String>emptySet());
|
||||
remoteCluster.wipe(excludeTemplates());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Set<String> excludeTemplates() {
|
||||
return Collections.singleton(IndexAuditTrail.INDEX_TEMPLATE_NAME);
|
||||
return Sets.newHashSet(SecurityLifecycleService.SECURITY_TEMPLATE_NAME, IndexAuditTrail.INDEX_TEMPLATE_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -10,6 +10,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
|||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
|
@ -17,6 +18,7 @@ import org.elasticsearch.test.SecurityIntegTestCase;
|
|||
import org.elasticsearch.test.SecuritySettingsSource;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
import org.elasticsearch.xpack.security.SecurityLifecycleService;
|
||||
import org.elasticsearch.xpack.security.audit.AuditTrail;
|
||||
import org.elasticsearch.xpack.security.audit.AuditTrailService;
|
||||
import org.junit.After;
|
||||
|
@ -69,7 +71,7 @@ public class RemoteIndexAuditTrailStartingTests extends SecurityIntegTestCase {
|
|||
|
||||
@Override
|
||||
protected Set<String> excludeTemplates() {
|
||||
return Collections.singleton(IndexAuditTrail.INDEX_TEMPLATE_NAME);
|
||||
return Sets.newHashSet(SecurityLifecycleService.SECURITY_TEMPLATE_NAME, IndexAuditTrail.INDEX_TEMPLATE_NAME);
|
||||
}
|
||||
|
||||
@Before
|
||||
|
@ -123,11 +125,11 @@ public class RemoteIndexAuditTrailStartingTests extends SecurityIntegTestCase {
|
|||
.forEach((auditTrail) -> ((IndexAuditTrail) auditTrail).stop()));
|
||||
// first stop both audit trails otherwise we keep on indexing
|
||||
if (remoteCluster != null) {
|
||||
toStop.add(() -> StreamSupport.stream(remoteCluster.getInstances(AuditTrailService.class).spliterator(), false)
|
||||
toStop.add(() -> StreamSupport.stream(remoteCluster.getInstances(AuditTrailService.class).spliterator(), false)
|
||||
.map(s -> s.getAuditTrails()).flatMap(List::stream)
|
||||
.filter(t -> t.name().equals(IndexAuditTrail.NAME))
|
||||
.forEach((auditTrail) -> ((IndexAuditTrail) auditTrail).stop()));
|
||||
toStop.add(() -> remoteCluster.wipe(Collections.<String>emptySet()));
|
||||
toStop.add(() -> remoteCluster.wipe(excludeTemplates()));
|
||||
toStop.add(remoteCluster::afterTest);
|
||||
toStop.add(remoteCluster);
|
||||
}
|
||||
|
|
|
@ -62,6 +62,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.test.SecurityTestsUtils.assertAuthenticationException;
|
||||
import static org.elasticsearch.xpack.security.support.Exceptions.authenticationError;
|
||||
|
@ -865,7 +866,7 @@ public class AuthenticationServiceTests extends ESTestCase {
|
|||
User user = new User("_username", "r1");
|
||||
final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null);
|
||||
String token = tokenService.getUserTokenString(tokenService.createUserToken(expected));
|
||||
when(lifecycleService.isSecurityIndexAvailable()).thenReturn(true);
|
||||
when(lifecycleService.isSecurityIndexExisting()).thenReturn(true);
|
||||
GetRequestBuilder getRequestBuilder = mock(GetRequestBuilder.class);
|
||||
when(client.prepareGet(eq(SecurityLifecycleService.SECURITY_INDEX_NAME), eq("doc"), any(String.class)))
|
||||
.thenReturn(getRequestBuilder);
|
||||
|
@ -877,6 +878,11 @@ public class AuthenticationServiceTests extends ESTestCase {
|
|||
return Void.TYPE;
|
||||
}).when(client).get(any(GetRequest.class), any(ActionListener.class));
|
||||
|
||||
doAnswer(invocationOnMock -> {
|
||||
((Runnable) invocationOnMock.getArguments()[1]).run();
|
||||
return null;
|
||||
}).when(lifecycleService).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class));
|
||||
|
||||
try (ThreadContext.StoredContext ignore = threadContext.stashContext()) {
|
||||
threadContext.putHeader("Authorization", "Bearer " + token);
|
||||
ElasticsearchSecurityException e =
|
||||
|
|
|
@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchSecurityException;
|
|||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
|
@ -18,6 +19,7 @@ import org.elasticsearch.index.query.QueryBuilders;
|
|||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.test.SecurityIntegTestCase;
|
||||
import org.elasticsearch.test.SecuritySettingsSource;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.xpack.XPackSettings;
|
||||
import org.elasticsearch.xpack.security.SecurityLifecycleService;
|
||||
import org.elasticsearch.xpack.security.action.token.CreateTokenResponse;
|
||||
|
@ -30,9 +32,11 @@ import org.junit.Before;
|
|||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.time.Instant;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
|
@ -43,8 +47,7 @@ public class TokenAuthIntegTests extends SecurityIntegTestCase {
|
|||
public Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
// turn down token expiration interval and crank up the deletion interval
|
||||
.put(TokenService.TOKEN_EXPIRATION.getKey(), TimeValue.timeValueSeconds(1L))
|
||||
// crank up the deletion interval and set timeout for delete requests
|
||||
.put(TokenService.DELETE_INTERVAL.getKey(), TimeValue.timeValueSeconds(1L))
|
||||
.put(TokenService.DELETE_TIMEOUT.getKey(), TimeValue.timeValueSeconds(2L))
|
||||
.put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true)
|
||||
|
@ -117,6 +120,7 @@ public class TokenAuthIntegTests extends SecurityIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@TestLogging("org.elasticsearch.xpack.security.authc:DEBUG")
|
||||
public void testExpiredTokensDeletedAfterExpiration() throws Exception {
|
||||
final Client client = client().filterWithHeader(Collections.singletonMap("Authorization",
|
||||
UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER,
|
||||
|
@ -132,15 +136,25 @@ public class TokenAuthIntegTests extends SecurityIntegTestCase {
|
|||
|
||||
InvalidateTokenResponse invalidateResponse = securityClient.prepareInvalidateToken(response.getTokenString()).get();
|
||||
assertTrue(invalidateResponse.isCreated());
|
||||
AtomicReference<String> docId = new AtomicReference<>();
|
||||
assertBusy(() -> {
|
||||
SearchResponse searchResponse = client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
|
||||
.setSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("doc_type", TokenService.DOC_TYPE)))
|
||||
.setSize(0)
|
||||
.setSize(1)
|
||||
.setTerminateAfter(1)
|
||||
.get();
|
||||
assertThat(searchResponse.getHits().getTotalHits(), greaterThan(0L));
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L));
|
||||
docId.set(searchResponse.getHits().getAt(0).getId());
|
||||
});
|
||||
|
||||
// hack doc to modify the time to the day before
|
||||
Instant dayBefore = created.minus(1L, ChronoUnit.DAYS);
|
||||
assertTrue(Instant.now().isAfter(dayBefore));
|
||||
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, "doc", docId.get())
|
||||
.setDoc("expiration_time", dayBefore.toEpochMilli())
|
||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
||||
.get();
|
||||
|
||||
AtomicBoolean deleteTriggered = new AtomicBoolean(false);
|
||||
assertBusy(() -> {
|
||||
assertTrue(Instant.now().isAfter(created.plusSeconds(1L).plusMillis(500L)));
|
||||
|
@ -190,7 +204,7 @@ public class TokenAuthIntegTests extends SecurityIntegTestCase {
|
|||
|
||||
@Before
|
||||
public void waitForSecurityIndexWritable() throws Exception {
|
||||
assertSecurityIndexWriteable();
|
||||
assertSecurityIndexActive();
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.authc;
|
|||
|
||||
import org.elasticsearch.ElasticsearchSecurityException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.get.GetAction;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetRequestBuilder;
|
||||
|
@ -18,6 +19,8 @@ import org.elasticsearch.common.settings.ClusterSettings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.EqualsHashCodeTestUtils;
|
||||
|
@ -38,10 +41,12 @@ import java.security.GeneralSecurityException;
|
|||
import java.time.Clock;
|
||||
import java.util.Base64;
|
||||
import java.util.Collections;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -60,19 +65,22 @@ public class TokenServiceTests extends ESTestCase {
|
|||
.put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build();
|
||||
|
||||
@Before
|
||||
public void setupClient() throws GeneralSecurityException {
|
||||
public void setupClient() {
|
||||
client = mock(Client.class);
|
||||
when(client.threadPool()).thenReturn(threadPool);
|
||||
when(client.settings()).thenReturn(settings);
|
||||
lifecycleService = mock(SecurityLifecycleService.class);
|
||||
when(lifecycleService.isSecurityIndexWriteable()).thenReturn(true);
|
||||
doAnswer(invocationOnMock -> {
|
||||
ActionListener<GetResponse> listener = (ActionListener<GetResponse>) invocationOnMock.getArguments()[2];
|
||||
GetResponse response = mock(GetResponse.class);
|
||||
when(response.isExists()).thenReturn(false);
|
||||
listener.onResponse(response);
|
||||
return Void.TYPE;
|
||||
}).when(client).execute(eq(GetAction.INSTANCE), any(GetRequest.class), any(ActionListener.class));
|
||||
}).when(client).get(any(GetRequest.class), any(ActionListener.class));
|
||||
doAnswer(invocationOnMock -> {
|
||||
((Runnable) invocationOnMock.getArguments()[1]).run();
|
||||
return null;
|
||||
}).when(lifecycleService).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class));
|
||||
when(client.threadPool()).thenReturn(threadPool);
|
||||
this.clusterService = new ClusterService(settings, new ClusterSettings(settings, ClusterSettings
|
||||
.BUILT_IN_CLUSTER_SETTINGS), threadPool, Collections.emptyMap());
|
||||
|
@ -286,7 +294,7 @@ public class TokenServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testInvalidatedToken() throws Exception {
|
||||
when(lifecycleService.isSecurityIndexAvailable()).thenReturn(true);
|
||||
when(lifecycleService.isSecurityIndexExisting()).thenReturn(true);
|
||||
TokenService tokenService =
|
||||
new TokenService(tokenServiceEnabledSettings, Clock.systemUTC(), client, lifecycleService, clusterService);
|
||||
Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null);
|
||||
|
@ -438,6 +446,13 @@ public class TokenServiceTests extends ESTestCase {
|
|||
ThreadContext requestContext = new ThreadContext(Settings.EMPTY);
|
||||
requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token));
|
||||
|
||||
doAnswer(invocationOnMock -> {
|
||||
ActionListener<GetResponse> listener = (ActionListener<GetResponse>) invocationOnMock.getArguments()[1];
|
||||
listener.onFailure(new NoShardAvailableActionException(new ShardId(new Index("foo", "uuid"), 0), "shard oh shard"));
|
||||
return Void.TYPE;
|
||||
}).when(client).get(any(GetRequest.class), any(ActionListener.class));
|
||||
when(client.prepareGet(anyString(), anyString(), anyString())).thenReturn(new GetRequestBuilder(client, GetAction.INSTANCE));
|
||||
|
||||
try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) {
|
||||
PlainActionFuture<UserToken> future = new PlainActionFuture<>();
|
||||
tokenService.getAndValidateToken(requestContext, future);
|
||||
|
|
|
@ -11,6 +11,7 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -231,20 +232,19 @@ public class NativeUsersStoreTests extends ESTestCase {
|
|||
|
||||
actionRespond(GetRequest.class, new GetResponse(getResult));
|
||||
}
|
||||
|
||||
private NativeUsersStore startNativeUsersStore() {
|
||||
SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class);
|
||||
when(securityLifecycleService.isSecurityIndexAvailable()).thenReturn(true);
|
||||
when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(true);
|
||||
when(securityLifecycleService.isSecurityIndexWriteable()).thenReturn(true);
|
||||
when(securityLifecycleService.isSecurityIndexMappingUpToDate()).thenReturn(true);
|
||||
when(securityLifecycleService.isSecurityIndexOutOfDate()).thenReturn(false);
|
||||
when(securityLifecycleService.isSecurityIndexUpToDate()).thenReturn(true);
|
||||
doAnswer((i) -> {
|
||||
Runnable action = (Runnable) i.getArguments()[1];
|
||||
action.run();
|
||||
ActionListener listener = (ActionListener) i.getArguments()[0];
|
||||
listener.onResponse(null);
|
||||
return null;
|
||||
}).when(securityLifecycleService).createIndexIfNeededThenExecute(any(ActionListener.class), any(Runnable.class));
|
||||
}).when(securityLifecycleService).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class));
|
||||
return new NativeUsersStore(Settings.EMPTY, client, securityLifecycleService);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ import static org.hamcrest.Matchers.instanceOf;
|
|||
public class SecurityScrollTests extends SecurityIntegTestCase {
|
||||
|
||||
public void testScrollIsPerUser() throws Exception {
|
||||
assertSecurityIndexWriteable();
|
||||
assertSecurityIndexActive();
|
||||
securityClient().preparePutRole("scrollable")
|
||||
.addIndices(new String[] { randomAlphaOfLengthBetween(4, 12) }, new String[] { "read" }, null, null, null)
|
||||
.get();
|
||||
|
|
|
@ -24,7 +24,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
public class IndexLifecycleManagerIntegTests extends SecurityIntegTestCase {
|
||||
|
||||
public void testConcurrentOperationsTryingToCreateSecurityIndexAndAlias() throws Exception {
|
||||
assertSecurityIndexWriteable();
|
||||
assertSecurityIndexActive();
|
||||
final int processors = Runtime.getRuntime().availableProcessors();
|
||||
final int numThreads = scaledRandomIntBetween((processors + 1) / 2, 4 * processors);
|
||||
final int maxNumRequests = 100 / numThreads; // bound to a maximum of 100 requests
|
||||
|
|
|
@ -86,7 +86,7 @@ public class IndexLifecycleManagerTests extends ESTestCase {
|
|||
actions.put(action, map);
|
||||
}
|
||||
};
|
||||
manager = new IndexLifecycleManager(Settings.EMPTY, client, INDEX_NAME, TEMPLATE_NAME);
|
||||
manager = new IndexLifecycleManager(Settings.EMPTY, client, INDEX_NAME);
|
||||
}
|
||||
|
||||
public void testIndexWithUpToDateMappingAndTemplate() throws IOException {
|
||||
|
@ -98,7 +98,7 @@ public class IndexLifecycleManagerTests extends ESTestCase {
|
|||
|
||||
assertThat(manager.indexExists(), Matchers.equalTo(true));
|
||||
assertThat(manager.isAvailable(), Matchers.equalTo(true));
|
||||
assertThat(manager.isWritable(), Matchers.equalTo(true));
|
||||
assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true));
|
||||
}
|
||||
|
||||
public void testIndexWithoutPrimaryShards() throws IOException {
|
||||
|
@ -245,13 +245,13 @@ public class IndexLifecycleManagerTests extends ESTestCase {
|
|||
private void assertInitialState() {
|
||||
assertThat(manager.indexExists(), Matchers.equalTo(false));
|
||||
assertThat(manager.isAvailable(), Matchers.equalTo(false));
|
||||
assertThat(manager.isWritable(), Matchers.equalTo(false));
|
||||
assertThat(manager.isMappingUpToDate(), Matchers.equalTo(false));
|
||||
}
|
||||
|
||||
private void assertIndexUpToDateButNotAvailable() {
|
||||
assertThat(manager.indexExists(), Matchers.equalTo(true));
|
||||
assertThat(manager.isAvailable(), Matchers.equalTo(false));
|
||||
assertThat(manager.isWritable(), Matchers.equalTo(true));
|
||||
assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true));
|
||||
}
|
||||
|
||||
public static ClusterState.Builder createClusterState(String indexName, String templateName) throws IOException {
|
||||
|
|
|
@ -21,7 +21,6 @@ import org.elasticsearch.xpack.ml.MlMetaIndex;
|
|||
import org.elasticsearch.xpack.ml.integration.MlRestTestStateCleaner;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.AnomalyDetectorsIndex;
|
||||
import org.elasticsearch.xpack.ml.notifications.Auditor;
|
||||
import org.elasticsearch.xpack.security.SecurityLifecycleService;
|
||||
import org.elasticsearch.xpack.watcher.support.WatcherIndexTemplateRegistry;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
@ -81,7 +80,6 @@ public class XPackRestIT extends ESClientYamlSuiteTestCase {
|
|||
private void waitForTemplates() throws Exception {
|
||||
if (installTemplates()) {
|
||||
List<String> templates = new ArrayList<>();
|
||||
templates.add(SecurityLifecycleService.SECURITY_TEMPLATE_NAME);
|
||||
templates.addAll(Arrays.asList(Auditor.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME, AnomalyDetectorsIndex.jobStateIndexName(),
|
||||
AnomalyDetectorsIndex.jobResultsIndexPrefix()));
|
||||
templates.addAll(Arrays.asList(WatcherIndexTemplateRegistry.TEMPLATE_NAMES));
|
||||
|
|
|
@ -250,11 +250,14 @@
|
|||
- do:
|
||||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "events"
|
||||
body: >
|
||||
{ "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "events" }
|
||||
{ "description": "event 2", "start_time": "2017-12-05T00:00:00Z", "end_time": "2017-12-06T00:00:00Z", "calendar_id": "events" }
|
||||
{ "description": "event 3", "start_time": "2017-12-12T00:00:00Z", "end_time": "2017-12-13T00:00:00Z", "calendar_id": "events" }
|
||||
{ "description": "event 4", "start_time": "2017-12-12T00:00:00Z", "end_time": "2017-12-15T00:00:00Z", "calendar_id": "events" }
|
||||
body: >
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "events" },
|
||||
{ "description": "event 2", "start_time": "2017-12-05T00:00:00Z", "end_time": "2017-12-06T00:00:00Z", "calendar_id": "events" },
|
||||
{ "description": "event 3", "start_time": "2017-12-12T00:00:00Z", "end_time": "2017-12-13T00:00:00Z", "calendar_id": "events" },
|
||||
{ "description": "event 4", "start_time": "2017-12-12T00:00:00Z", "end_time": "2017-12-15T00:00:00Z", "calendar_id": "events" }]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.get_calendar_events:
|
||||
|
@ -309,15 +312,21 @@
|
|||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "events-2"
|
||||
body: >
|
||||
{ "description": "event 21", "start_time": "2017-12-02T00:00:00Z", "end_time": "2017-12-02T05:00:00Z"}
|
||||
{ "description": "event 22", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z"}
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "event 21", "start_time": "2017-12-02T00:00:00Z", "end_time": "2017-12-02T05:00:00Z"},
|
||||
{ "description": "event 22", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z"}]
|
||||
}
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "events-2"
|
||||
body: >
|
||||
{ "description": "event 21", "start_time": "2017-12-02T00:00:00Z", "end_time": "2017-12-03T00:00:00Z", "calendar_id": "events"}
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "event 21", "start_time": "2017-12-02T00:00:00Z", "end_time": "2017-12-03T00:00:00Z", "calendar_id": "events"}]
|
||||
}
|
||||
|
||||
# Event is not in calendar events-2
|
||||
- do:
|
||||
|
@ -349,9 +358,12 @@
|
|||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "cal-foo"
|
||||
body: >
|
||||
{ "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z" }
|
||||
{ "description": "event 2", "start_time": "2017-12-05T00:00:00Z", "end_time": "2017-12-06T00:00:00Z" }
|
||||
{ "description": "event 2", "start_time": "2017-12-05T00:00:00Z", "end_time": "2017-12-06T00:00:00Z" }
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z" },
|
||||
{ "description": "event 2", "start_time": "2017-12-05T00:00:00Z", "end_time": "2017-12-06T00:00:00Z" },
|
||||
{ "description": "event 2", "start_time": "2017-12-05T00:00:00Z", "end_time": "2017-12-06T00:00:00Z" }]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
|
@ -361,8 +373,11 @@
|
|||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "cal-bar"
|
||||
body: >
|
||||
{ "description": "event 21", "start_time": "2017-12-02T00:00:00Z", "end_time": "2017-12-02T05:00:00Z"}
|
||||
{ "description": "event 22", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z"}
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "event 21", "start_time": "2017-12-02T00:00:00Z", "end_time": "2017-12-02T05:00:00Z"},
|
||||
{ "description": "event 22", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z"}]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.delete_calendar:
|
||||
|
@ -404,8 +419,11 @@
|
|||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "dave-holidays"
|
||||
body: >
|
||||
{ "description": "xmas", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" }
|
||||
{ "description": "ny", "start_time": "2018-01-01T00:00:00Z", "end_time": "2018-01-02T00:00:00Z" }
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "xmas", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" },
|
||||
{ "description": "ny", "start_time": "2018-01-01T00:00:00Z", "end_time": "2018-01-02T00:00:00Z" }]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
|
@ -415,8 +433,11 @@
|
|||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "tom-holidays"
|
||||
body: >
|
||||
{ "description": "xmas", "start_time": "2017-12-20T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" }
|
||||
{ "description": "other", "start_time": "2017-12-27T00:00:00Z", "end_time": "2018-01-02T00:00:00Z" }
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "xmas", "start_time": "2017-12-20T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" },
|
||||
{ "description": "other", "start_time": "2017-12-27T00:00:00Z", "end_time": "2018-01-02T00:00:00Z" }]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.get_calendar_events:
|
||||
|
@ -450,8 +471,10 @@
|
|||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "dave-holidays"
|
||||
body: >
|
||||
{ "description": "xmas", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" }
|
||||
{ "description": "ny", "start_time": "2018-01-01T00:00:00Z", "end_time": "2018-01-02T00:00:00Z" }
|
||||
{
|
||||
"events" : [{ "description": "xmas", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" },
|
||||
{ "description": "ny", "start_time": "2018-01-01T00:00:00Z", "end_time": "2018-01-02T00:00:00Z" }]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
|
@ -465,8 +488,11 @@
|
|||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "tom-holidays"
|
||||
body: >
|
||||
{ "description": "xmas", "start_time": "2017-12-20T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" }
|
||||
{ "description": "other", "start_time": "2018-01-15T00:00:00Z", "end_time": "2018-01-16T00:00:00Z" }
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "xmas", "start_time": "2017-12-20T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" },
|
||||
{ "description": "other", "start_time": "2018-01-15T00:00:00Z", "end_time": "2018-01-16T00:00:00Z" }]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
|
@ -476,8 +502,11 @@
|
|||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "not-used-by-job"
|
||||
body: >
|
||||
{ "description": "random", "start_time": "2018-01-20T00:00:00Z", "end_time": "2018-01-26T00:00:00Z" }
|
||||
{ "description": "random2", "start_time": "2018-02-20T00:00:00Z", "end_time": "2018-02-26T00:00:00Z" }
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "random", "start_time": "2018-01-20T00:00:00Z", "end_time": "2018-01-26T00:00:00Z" },
|
||||
{ "description": "random2", "start_time": "2018-02-20T00:00:00Z", "end_time": "2018-02-26T00:00:00Z" }]
|
||||
}
|
||||
|
||||
|
||||
# Calendar Id must be _all if a job id is used
|
||||
|
@ -532,8 +561,11 @@
|
|||
xpack.ml.post_calendar_events:
|
||||
calendar_id: "ben-holidays"
|
||||
body: >
|
||||
{ "description": "ski", "start_time": "2018-01-20T00:00:00Z", "end_time": "2018-01-27T00:00:00Z" }
|
||||
{ "description": "snow", "start_time": "2018-01-30T00:00:00Z", "end_time": "2018-02-01T00:00:00Z" }
|
||||
{
|
||||
"events" : [
|
||||
{ "description": "ski", "start_time": "2018-01-20T00:00:00Z", "end_time": "2018-01-27T00:00:00Z" },
|
||||
{ "description": "snow", "start_time": "2018-01-30T00:00:00Z", "end_time": "2018-02-01T00:00:00Z" }]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.get_calendar_events:
|
||||
|
|
|
@ -21,7 +21,6 @@ import org.elasticsearch.test.StreamsUtils;
|
|||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.xpack.watcher.common.text.TextTemplate;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
|
||||
import org.elasticsearch.xpack.security.SecurityClusterClientYamlTestCase;
|
||||
import org.elasticsearch.xpack.security.support.IndexLifecycleManager;
|
||||
import org.elasticsearch.xpack.test.rest.XPackRestTestHelper;
|
||||
import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction;
|
||||
|
@ -61,11 +60,6 @@ public class FullClusterRestartIT extends ESRestTestCase {
|
|||
private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster"));
|
||||
private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
|
||||
|
||||
@Before
|
||||
public void waitForSecuritySetup() throws Exception {
|
||||
SecurityClusterClientYamlTestCase.waitForSecurity();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void waitForMlTemplates() throws Exception {
|
||||
XPackRestTestHelper.waitForMlTemplates(client());
|
||||
|
|
|
@ -16,7 +16,7 @@ import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
|||
|
||||
import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
|
||||
|
||||
public class MultiClusterSearchWithSecurityYamlTestSuiteIT extends SecurityClusterClientYamlTestCase {
|
||||
public class MultiClusterSearchWithSecurityYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
private static final String USER = "test_user";
|
||||
private static final String PASS = "x-pack-test-password";
|
||||
|
|
|
@ -14,7 +14,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
|
||||
import org.elasticsearch.xpack.security.SecurityClusterClientYamlTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
import org.elasticsearch.xpack.test.rest.XPackRestTestHelper;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -29,7 +29,7 @@ import static java.util.Collections.singletonMap;
|
|||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs
|
||||
public class UpgradeClusterClientYamlTestSuiteIT extends SecurityClusterClientYamlTestCase {
|
||||
public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
/**
|
||||
* Waits for the Machine Learning templates to be created by {@link org.elasticsearch.plugins.MetaDataUpgrader}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue