Merge branch 'master' into feature/sql

Original commit: elastic/x-pack-elasticsearch@1ada71a4f4
This commit is contained in:
Nik Everett 2017-07-24 15:04:59 -04:00
commit 39b9b55e19
201 changed files with 2782 additions and 4071 deletions

View File

@ -12,6 +12,7 @@ directly to configure and access {xpack} features.
* <<ml-apis, Machine Learning APIs>>
* <<security-api,Security APIs>>
* <<watcher-api, Watcher APIs>>
* <<migration-api, Migration APIs>>
--
@ -20,4 +21,5 @@ include::graph/explore.asciidoc[]
include::ml-api.asciidoc[]
include::security.asciidoc[]
include::watcher.asciidoc[]
include::migration.asciidoc[]
include::defs.asciidoc[]

View File

@ -0,0 +1,13 @@
[role="xpack"]
[[migration-api]]
== Migration APIs
The migration APIs simplify upgrading {xpack} indices from one version to another.
* <<migration-api-assistance>>
* <<migration-api-upgrade>>
* <<migration-api-deprecation>>
include::migration/assistance.asciidoc[]
include::migration/upgrade.asciidoc[]
include::migration/deprecation.asciidoc[]

View File

@ -0,0 +1,67 @@
[role="xpack"]
[[migration-api-assistance]]
=== Migration Assistance API
The Migration Assistance API analyzes existing indices in the cluster and returns the information
about indices that require some changes before the cluster can be upgraded to the next major version.
To see a list of indices that needs to be upgraded or reindexed, submit a GET request to the
`/_xpack/migration/assistance` endpoint:
[source,js]
--------------------------------------------------
GET /_xpack/migration/assistance
--------------------------------------------------
// CONSOLE
// TEST[skip:cannot create an old index in docs test]
A successful call returns a list of indices that need to updated or reindexed:
[source,js]
--------------------------------------------------
{
"indices" : {
".watches" : {
"action_required" : "upgrade"
},
".security" : {
"action_required" : "upgrade"
},
"my_old_index": {
"action_required" : "reindex"
},
"my_other_old_index": {
"action_required" : "reindex"
}
}
}
--------------------------------------------------
// NOTCONSOLE
To check a particular index or set of indices, specify this index name or mask as the last part of the
`/_xpack/migration/assistance/index_name` endpoint:
[source,js]
--------------------------------------------------
GET /_xpack/migration/assistance/my_*
--------------------------------------------------
// CONSOLE
// TEST[skip:cannot create an old index in docs test]
A successful call returns a list of indices that needs to updated or reindexed and match the index specified
on the endpoint:
[source,js]
--------------------------------------------------
{
"indices" : {
"my_old_index": {
"action_required" : "reindex"
},
"my_other_old_index": {
"action_required" : "reindex"
}
}
}
--------------------------------------------------
// NOTCONSOLE

View File

@ -0,0 +1,87 @@
[role="xpack"]
[[migration-api-deprecation]]
== Deprecation Info APIs
The deprecation API is to be used to retrieve information about different cluster, node, and index level
settings that use deprecated features that will be removed or changed in the next major version.
To see the list of offenders in your cluster, submit a GET request to the `_xpack/migration/deprecations` endpoint:
[source,js]
--------------------------------------------------
GET /_xpack/migration/deprecations
--------------------------------------------------
// CONSOLE
// TEST[skip:cannot assert tests have certain deprecations]
Example response:
[source,js]
--------------------------------------------------
{
"cluster_settings" : [
{
"level" : "info",
"message" : "Network settings changes",
"url" : "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_60_indices_changes.html#_index_templates_use_literal_index_patterns_literal_instead_of_literal_template_literal",
"details" : "templates using `template` field: watches,.monitoring-alerts,.watch-history-6,.ml-notifications,security-index-template,triggered_watches,.monitoring-es,.ml-meta,.ml-state,.monitoring-logstash,.ml-anomalies-,.monitoring-kibana"
}
],
"node_settings" : [ ],
"index_settings" : {
".monitoring-es-6-2017.07.21" : [
{
"level" : "info",
"message" : "Coercion of boolean fields",
"url" : "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_60_mappings_changes.html#_coercion_of_boolean_fields",
"details" : "[[type: doc, field: spins], [type: doc, field: mlockall], [type: doc, field: node_master], [type: doc, field: primary]]"
}
]
}
}
--------------------------------------------------
// NOTCONSOLE
The response you will receive will break down all the specific forward-incompatible settings that your
cluster should resolve before upgrading. Any offending setting will be represented as a deprecation warning.
The following is an example deprecation warning:
[source,js]
--------------------------------------------------
{
"level" : "info",
"message" : "This is the generic descriptive message of the breaking change",
"url" : "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_60_indices_changes.html",
"details" : "more information, like which nodes, indices, or settings are to blame"
}
--------------------------------------------------
// NOTCONSOLE
As is shown, there is a `level` property that describes how significant the issue may be.
|=======
|none | everything is good
|info | An advisory note that something has changed. No action needed
|warning | You can upgrade directly, but you are using deprecated functionality which will not be available in the next major version
|critical | You cannot upgrade without fixing this problem
|=======
`message` and the optional `details` provide descriptive information about the deprecation warning, while the `url`
property provides a link to the Breaking Changes Documentation, where more information about this change can be found.
Any cluster-level deprecation warnings can be found under
the `cluster_settings` key. Similarly, any node-level warnings will be found under `node_settings`.
Since only a select subset of your nodes may incorporate these settings, it is important to read the
`details` section for more information about which nodes are to be updated. Index warnings are
sectioned off per index and can be filtered using an index-pattern in the query.
Example request that only shows index-level deprecations of all `logstash-*` indices:
[source,js]
--------------------------------------------------
GET /logstash-*/_xpack/migration/deprecations
--------------------------------------------------
// CONSOLE
// TEST[skip:cannot assert tests have certain deprecations]

View File

@ -0,0 +1,108 @@
[role="xpack"]
[[migration-api-upgrade]]
=== Migration Upgrade API
The Migration Upgrade API performs the upgrade of internal indices to make them compatible with the next major version.
Indices need to be upgraded one at a time by submitting a POST request to the
`/_xpack/migration/upgrade/index_name` endpoint:
[source,js]
--------------------------------------------------
POST /_xpack/migration/upgrade/.watches
--------------------------------------------------
// CONSOLE
// TEST[skip:cannot create an old index in docs test]
A successful call returns the statistics about the upgrade process:
[source,js]
--------------------------------------------------
{
"took" : 127,
"timed_out" : false,
"total" : 4,
"updated" : 0,
"created" : 4,
"deleted" : 0,
"batches" : 1,
"version_conflicts" : 0,
"noops" : 0,
"retries" : {
"bulk" : 0,
"search" : 0
},
"throttled_millis" : 0,
"failures" : [ ]
}
--------------------------------------------------
// NOTCONSOLE
By default, the upgrade call blocks until the upgrade process is finished. For large indices the upgrade can be
performed asynchronously by specifying `wait_for_completion=false` parameter:
[source,js]
--------------------------------------------------
POST /_xpack/migration/upgrade/.watches?wait_for_completion=false
--------------------------------------------------
// CONSOLE
// TEST[skip:cannot create an old index in docs test]
This call should return the id of the upgrade process task:
[source,js]
--------------------------------------------------
{
"task" : "PFvgv7T6TGumRyFF3vqTFg:1137"
}
--------------------------------------------------
// NOTCONSOLE
The status of the running or finished upgrade requests can be obtained using <<tasks,Task API>>:
[source,js]
--------------------------------------------------
GET _tasks/PFvgv7T6TGumRyFF3vqTFg:1137?detailed=true
--------------------------------------------------
// CONSOLE
// TEST[skip:cannot create an old index in docs test]
[source,js]
--------------------------------------------------
{
"completed" : true, <1>
"task" : {
"node" : "PFvgv7T6TGumRyFF3vqTFg",
"id" : 1137,
"type" : "transport",
"action" : "cluster:admin/xpack/upgrade",
"description" : "",
"start_time_in_millis" : 1500650625413,
"running_time_in_nanos" : 947456819,
"cancellable" : true
},
"response" : { <2>
"took" : 212,
"timed_out" : false,
"total" : 4,
"updated" : 0,
"created" : 4,
"deleted" : 0,
"batches" : 1,
"version_conflicts" : 0,
"noops" : 0,
"retries" : {
"bulk" : 0,
"search" : 0
},
"throttled_millis" : 0,
"failures" : [ ]
}
}
--------------------------------------------------
// NOTCONSOLE
<1> `true` in the `completed` field indicates that the upgrade request has finished, `false` means that
request is still executing
<2> the `response` field contains the status of the upgrade request

View File

@ -16,10 +16,12 @@ see <<managing-native-users, Managing Native Users>>.
{security} provides built-in user credentials to help you get up and running.
These users have a fixed set of privileges and cannot be authenticated until their
passwords have been set. The exception is the `elastic` user which can be authenticated
from a localhost rest request with an empty password. Until a password is set, the elastic
user is only authorized to perform change password requests.
Please read <<reset-built-in-user-passwords,Reset Built-in User Passwords>> below.
passwords have been set. In order to set these passwords, the `elastic` user must
have its password bootstrapped. To bootstrap the password, please read
<<bootstrap-elastic-passwords,Bootstrap Elastic Password>> below.
Once the `elastic` user has its password bootstrapped,
this user can be used to <<set-built-in-user-passwords,set all of the built-in user passwords>>.
.{security} Built-in Users
|========
@ -46,13 +48,71 @@ be disabled individually, using the
{ref}/security-api-users.html[user management API].
[float]
[[reset-built-in-user-passwords]]
==== Reset Built-in User Passwords
[[bootstrap-elastic-passwords]]
==== Bootstrap Elastic Password
The `elastic` user can have its password bootstrapped by placing a password
in the keystore of at least one node. At startup, that node will pull the
password out of the keystore and set the `elastic` password to that value. The
password will only be set if the `elastic` user password has not already been set.
As the `elastic` user is stored in the native realm, the password will be
synced to all the nodes in a cluster. It is safe to bootstrap the password with
multiple nodes as long as the password is the same. If different passwords are
set with different nodes, it is unpredictable which password will be bootstrapped.
Specifically, the setting for the bootstrap password is "bootstrap.password". If
the keystore has not been created before, it must be created first.
[source,shell]
--------------------------------------------------
bin/elasticsearch-keystore create
bin/elasticsearch-keystore add "bootstrap.password"
--------------------------------------------------
After running the "add" command, you will be prompted to enter your password.
The bootstrap password is only intended to be a transient password used to help you
set all the built-in user passwords. As the password will remain accessible in the
keystore on the machine, the `elastic` user's password should be changed to a different
password when you <set-built-in-user-passwords,set all the built-in passwords>.
[float]
[[set-built-in-user-passwords]]
==== Set Built-in User Passwords
[IMPORTANT]
=============================================================================
You must set the passwords for all built-in users.
You can update passwords from the *Management > Users* UI in Kibana or with the
{ref}/security-api-users.html#security-api-reset-user-password[Reset Password API]:
You can update passwords from the *Management > Users* UI in Kibana, using the
setup-passwords tool, or with the security user api.
The setup-passwords tool is a command line tool that is provided to assist with
setup. When it is run, it will use the `elastic` user bootstrap password to execute
api requests that will change the passwords of the `elastic`, `kibana`, and
`logstash_system` users. In "auto" mode the passwords will be generated randomly and
printed to the console.
[source,shell]
--------------------------------------------------
bin/x-pack/setup-passwords auto
--------------------------------------------------
There is also an "interactive" mode that will prompt you to manually enter passwords.
[source,shell]
--------------------------------------------------
bin/x-pack/setup-passwords interactive
--------------------------------------------------
If the node is not listening at "http://localhost:9200", you will need to pass the url parameter
to tell the tool where to submit the requests.
[source,shell]
--------------------------------------------------
bin/x-pack/setup-passwords auto -u "http://localhost:9201"
--------------------------------------------------
The {ref}/security-api-users.html#security-api-reset-user-password[Reset Password API] can
also be used to change the passwords manually.
[source,js]
---------------------------------------------------------------------

View File

@ -4,40 +4,46 @@
To secure a cluster, you must install {xpack} on every node in the
cluster. Basic authentication is enabled by default--to communicate
with the cluster, you must specify a username and password.
Unless you <<anonymous-access, enable anonymous access>>, all
Unless you {xpack-ref}/anonymous-access.html[enable anonymous access], all
requests that don't include a user name and password are rejected.
{security} provides a built-in `elastic` superuser you can use
to start setting things up. This `elastic` user has full access
to the cluster, including all indices and data, so make sure
you change the default password and protect the `elastic` user
credentials accordingly.
to the cluster, including all indices and data, so the `elastic` user
does not have a password set by default.
In order for the `elastic` user to be usable, its <<bootstrap-elastic-passwords,password must be bootstrapped>>
by at least one of the nodes in your cluster. The bootstrap password is intended
to be a temporary password to help you setup your cluster. The `elastic` user password
will be changed during the setup process.
To get started with {security}:
. <<installing-xpack, Install X-Pack>> and start Elasticsearch and Kibana.
. <<installing-xpack, Install X-Pack>>.
. Change the passwords of the built in `kibana`, `logstash_system` and `elastic` users:
. On at least one of the nodes in your cluster, set the "bootstrap.password" secure setting in the keystore.
+
--
[source,shell]
----------------------------------------------------------
curl -XPUT -u elastic 'localhost:9200/_xpack/security/user/elastic/_password' -H "Content-Type: application/json" -d '{
"password" : "elasticpassword"
}'
--------------------------------------------------
bin/elasticsearch-keystore create
bin/elasticsearch-keystore add "bootstrap.password"
--------------------------------------------------
curl -XPUT -u elastic 'localhost:9200/_xpack/security/user/kibana/_password' -H "Content-Type: application/json" -d '{
"password" : "kibanapassword"
}'
--
curl -XPUT -u elastic 'localhost:9200/_xpack/security/user/logstash_system/_password' -H "Content-Type: application/json" -d '{
"password" : "logstashpassword"
}'
----------------------------------------------------------
// NOTCONSOLE
. Start Elasticsearch and Kibana. The Elasticsearch node with the "bootstrap.password" setting will use that
setting to set the `elastic` user password on node startup.
NOTE: By default, the `elastic` user does not have a password set. Until its password is set, the `elastic` user will only be
allowed to submit change password rest requests from localhost.
. Set the passwords of the built in `elastic`, `kibana`, and `logstash_system` users using the provided setup
passwords tool. In "auto" mode this tool will randomly generate passwords and print them to the console.
+
--
[source,shell]
--------------------------------------------------
bin/x-pack/setup-passwords auto
--------------------------------------------------
--
@ -89,19 +95,19 @@ xpack.security.audit.enabled: true
By default, events are logged to a dedicated `elasticsearch-access.log` file in
`ES_HOME/logs`. You can also store the events in an Elasticsearch index for
easier analysis and control what events are logged. For more information, see
<<auditing, Configuring Auditing>>.
{xpack-ref}/auditing.html[Configuring Auditing].
--
[[moving-on]]
IMPORTANT: Once you get these basic security measures in place, we strongly
recommend that you secure communications to and from nodes by
configuring your cluster to use <<ssl-tls, SSL/TLS encryption>>.
configuring your cluster to use {xpack-ref}/ssl-tls.html[SSL/TLS encryption].
Nodes that do not have encryption enabled send passwords in plain
text!
Depending on your security requirements, you might also want to:
* Integrate with <<ldap-realm, LDAP>> or <<active-directory-realm, Active Directory>>,
or <<pki-realm, require certificates>> for authentication.
* Use <<ip-filtering, IP Filtering>> to allow or deny requests from particular
* Integrate with {xpack-ref}/ldap-realm.html[LDAP] or {xpack-ref}/active-directory-realm.html[Active Directory],
or {xpack-ref}/pki-realm.html[require certificates] for authentication.
* Use {xpack-ref}/ip-filtering.html[IP Filtering] to allow or deny requests from particular
IP addresses or address ranges.

View File

@ -0,0 +1,100 @@
[[xpack-security]]
= Securing Elasticsearch and Kibana
[partintro]
--
{security} enables you to easily secure a cluster. With Security,
you can password-protect your data as well as implement more advanced security
measures such as encrypting communications, role-based access control,
IP filtering, and auditing. This guide describes how to configure the security
features you need, and interact with your secured cluster.
Security protects Elasticsearch clusters by:
* <<preventing-unauthorized-access, Preventing unauthorized access>>
with password protection, role-based access control, and IP filtering.
* <<preserving-data-integrity, Preserving the integrity of your data>>
with message authentication and SSL/TLS encryption.
* <<maintaining-audit-trail, Maintaining an audit trail>>
so you know who's doing what to your cluster and the data it stores.
[float]
[[preventing-unauthorized-access]]
=== Preventing Unauthorized Access
To prevent unauthorized access to your Elasticsearch cluster, you must have a
way to _authenticate_ users. This simply means that you need a way to validate
that a user is who they claim to be. For example, you have to make sure only
the person named _Kelsey Andorra_ can sign in as the user `kandorra`. X-Pack
Security provides a standalone authentication mechanism that enables you to
quickly password-protect your cluster. If you're already using {xpack-ref}/ldap-realm.html[LDAP],
{xpack-ref}/active-directory-realm.html[ Active Directory], or {xpack-ref}/pki-realm.html[ PKI] to manage
users in your organization, {security} is able to integrate with those
systems to perform user authentication.
In many cases, simply authenticating users isn't enough. You also need a way to
control what data users have access to and what tasks they can perform. {security}
enables you to _authorize_ users by assigning access _privileges_ to _roles_,
and assigning those roles to users. For example, this
{xpack-ref}/authorization.html[role-based access control] mechanism (a.k.a RBAC) enables
you to specify that the user `kandorra` can only perform read operations on the
`events` index and can't do anything at all with other indices.
{security} also supports {xpack-ref}/ip-filtering.html[ IP-based authorization]. You can
whitelist and blacklist specific IP addresses or subnets to control network-level
access to a server.
[float]
[[preserving-data-integrity]]
=== Preserving Data Integrity
A critical part of security is keeping confidential data confidential.
Elasticsearch has built-in protections against accidental data loss and
corruption. However, there's nothing to stop deliberate tampering or data
interception. {security} preserves the integrity of your data by
{xpack-ref}/ssl-tls.html[encrypting communications] to and from nodes and
{xpack-ref}/enable-message-authentication.html[authenticating message] to verify that they
have not been tampered with or corrupted in transit during node-to-node
communication. For even greater protection, you can increase the
{xpack-ref}/ciphers.html[encryption strength] and
{xpack-ref}/separating-node-client-traffic.html[separate client traffic from node-to-node communications].
[float]
[[maintaining-audit-trail]]
=== Maintaining an Audit Trail
Keeping a system secure takes vigilance. By using {security} to maintain
an audit trail, you can easily see who is accessing your cluster and what they're
doing. By analyzing access patterns and failed attempts to access your cluster,
you can gain insights into attempted attacks and data breaches. Keeping an
auditable log of the activity in your cluster can also help diagnose operational
issues.
[float]
=== Where to Go Next
* <<security-getting-started, Getting Started>>
steps through how to install and start using Security for basic authentication.
* {xpack-ref}/how-security-works.html[How Security Works]
provides more information about how Security supports user authentication,
authorization, and encryption.
* {xpack-ref}/tribe-clients-integrations.html[Integrations]
shows you how to interact with an Elasticsearch cluster protected by
X-Pack Security.
* {xpack-ref}/security-reference.html[Reference]
provides detailed information about the access privileges you can grant to
users, the settings you can configure for Security in `elasticsearch.yml`,
and the files where Security configuration information is stored.
[float]
=== Have Comments, Questions, or Feedback?
Head over to our {security-forum}[Security Discussion Forum]
to share your experience, questions, and suggestions.
--
include::getting-started.asciidoc[]

View File

@ -162,6 +162,7 @@ keys. The CSRs should be provided to the CA in order to obtain the signed
certificates. The signed certificates will need to be in PEM format in order to
be used.
[[certgen-silent]]
===== Using `certgen` in Silent Mode
`certgen` supports a silent mode of operation to enable easier batch operations. In order
@ -207,6 +208,57 @@ bin/x-pack/certgen -in instances.yml <1>
contained in the YAML file. The other options to the tool can be specified in addition to the `-in` option. For all of the available
options, run `bin/x-pack/certgen -h`.
[[certgen-options]]
===== Command Line Options for `certgen`
`-out <file>`::
The path to the output file (`.zip`) that should be generated.
`-in <file>`::
Input file for running in <<certgen-silent, silent mode>>.
`-csr`::
Operate in <<generating-csr, Certificate Signing Request>> mode.
`-cert <file>`::
This option causes `certgen` to generate new instances certificates and keys
using an existing CA certificate (provided in the `file` argument).
+
_Not available in `-csr` mode._
`-key <file>`::
Provides the _private-key_ file for the CA certificate.
+
_Required whenever the `-cert` option is used._
`-pass <password>`::
Specifies the password for the CA private key.
If the `-key` option is provided, then this is the password for the existing
private key file.
Otherwise, it is the password that should be applied to the generated CA key.
+
_Not available in `-csr` mode._
`-p12 <password>`::
Generate a PKCS#12 (`.p12` or `.pfx`) container file for each of the instance
certificates and keys.
The generate file is protected by the supplied password (which may be blank).
+
_Not available in `-csr` mode._
`-dn <name>`::
The _Distinguished Name_ that should be used for the generated CA certificate.
+
_Not available in `-csr` mode, or with `-cert`._
`-keysize <bits>`::
The number of bits to be used in generates RSA keys (default `2048`).
`-days <n>`::
The number of days for which generated keys should be valid (default `1095`).
+
_Not available in `-csr` mode._
[[enable-ssl]]
==== Enabling SSL in the Node Configuration

View File

@ -17,12 +17,12 @@ condition is met.
[[log-add-input]]
=== Schedule the Watch and Define an Input
A watch <<trigger-schedule, schedule>> controls how often a watch is triggered.
The watch <<input, input>> gets the data that you want to evaluate.
A watch {xpack-ref}/trigger-schedule.html[schedule] controls how often a watch is triggered.
The watch {xpack-ref}/input.html[input] gets the data that you want to evaluate.
To periodically search log data and load the results into the
watch, you could use an <<schedule-interval, interval>> schedule and a
<<input-search, search>> input. For example, the following Watch searches
watch, you could use an {xpack-ref}/trigger-schedule.html#schedule-interval[interval] schedule and a
{xpack-ref}/input-search.html[search] input. For example, the following Watch searches
the `logs` index for errors every 10 seconds:
[source,js]
@ -75,7 +75,7 @@ GET .watcher-history*/_search?pretty
[[log-add-condition]]
=== Add a Condition
A <<condition, condition>> evaluates the data you've loaded into the watch and
A {xpack-ref}/condition.html[condition] evaluates the data you've loaded into the watch and
determines if any action is required. Now that you've loaded log errors into
the watch, you can define a condition that checks to see if any errors were
found.
@ -106,7 +106,7 @@ PUT _xpack/watcher/watch/log_error_watch
}
--------------------------------------------------
// CONSOLE
<1> The <<condition-compare, compare>> condition lets you easily compare against
<1> The {xpack-ref}/condition-compare.html[compare] condition lets you easily compare against
values in the execution context.
For this compare condition to evaluate to `true`, you need to add an event
@ -154,7 +154,7 @@ GET .watcher-history*/_search?pretty
Recording watch records in the watch history is nice, but the real power of
{watcher} is being able to do something when the watch condition is met. A
watch's <<actions, actions>> define what to do when the watch condition
watch's {xpack-ref}/actions.html[actions] define what to do when the watch condition
evaluates to `true`. You can send emails, call third-party webhooks, write
documents to an Elasticsearch index, or log messages to the standard
Elasticsearch log files.
@ -226,9 +226,9 @@ allowed to execute read-only watch operations.
[[next-steps]]
=== Where to Go Next
* See <<how-watcher-works, How {watcher} Works>> for more information about the
* See {xpack-ref}/how-watcher-works.html[How {watcher} Works] for more information about the
anatomy of a watch and the watch lifecycle.
* See <<example-watches, Example Watches>> for more examples of setting up
* See {xpack-ref}/example-watches.html[Example Watches] for more examples of setting up
a watch.
* See the https://github.com/elastic/examples/tree/master/Alerting[Example
Watches] in the Elastic Examples repo for additional sample watches you can use

View File

@ -0,0 +1,67 @@
[[xpack-alerting]]
= Alerting on Cluster and Index Events
[partintro]
--
You can watch for changes or anomalies in your data and perform the necessary
actions in response. For example, you might want to:
* Monitor social media as another way to detect failures in user-facing
automated systems like ATMs or ticketing systems. When the number of tweets
and posts in an area exceeds a threshold of significance, notify a service
technician.
* Monitor your infrastructure, tracking disk usage over time. Open a helpdesk
ticket when any servers are likely to run out of free space in the next few
days.
* Track network activity to detect malicious activity, and proactively change
firewall configuration to reject the malicious user.
* Monitor Elasticsearch, and send immediate notification to the system
administrator if nodes leave the cluster or query throughput exceeds an
expected range.
* Track application response times and if page-load time exceeds SLAs for more
than 5 minutes, open a helpdesk ticket. If SLAs are exceeded for an hour,
page the administrator on duty.
All of these use-cases share a few key properties:
* The relevant data or changes in data can be identified with a periodic
Elasticsearch query.
* The results of the query can be checked against a condition.
* One or more actions are taken if the condition is true -- an email is sent, a
3rd party system is notified, or the query results are stored.
[float]
=== How Watches Work
{xpack} provides an API for creating, managing and testing _watches_. A watch
describes a single alert and can contain multiple notification actions.
A watch is constructed from four simple building blocks:
Schedule:: A schedule for running a query and checking the condition.
Query:: The query to run as input to the condition. Watches
support the full Elasticsearch query language, including
aggregations.
Condition:: A condition that determines whether or not to execute the actions.
You can use simple conditions (always true), or use scripting for
more sophisticated scenarios.
Actions:: One or more actions, such as sending email, pushing data to
3rd party systems through a webhook, or indexing the results of
the query.
A full history of all watches is maintained in an Elasticsearch index. This
history keeps track of each time a watch is triggered and records the results
from the query, whether the condition was met, and what actions were taken.
--
include::getting-started.asciidoc[]

View File

@ -4,101 +4,15 @@
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
SCRIPT="$0"
source "`dirname "$0"`"/../elasticsearch-env
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
# determine elasticsearch home
ES_HOME=`dirname "$SCRIPT"`/../..
# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd "$ES_HOME"; pwd`
# If an include wasn't specified in the environment, then search for one...
if [ "x$ES_INCLUDE" = "x" ]; then
# Locations (in order) to use when searching for an include file.
for include in /usr/share/elasticsearch/elasticsearch.in.sh \
/usr/local/share/elasticsearch/elasticsearch.in.sh \
/opt/elasticsearch/elasticsearch.in.sh \
~/.elasticsearch.in.sh \
"`dirname "$0"`"/../elasticsearch.in.sh \
"$ES_HOME/bin/elasticsearch.in.sh"; do
if [ -r "$include" ]; then
. "$include"
break
fi
done
# ...otherwise, source the specified include.
elif [ -r "$ES_INCLUDE" ]; then
. "$ES_INCLUDE"
fi
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
if [ -z "$ES_CLASSPATH" ]; then
echo "You must set the ES_CLASSPATH var" >&2
exit 1
fi
if [ -z "$CONF_DIR" ]; then
# Try to read package config files
if [ -f "/etc/sysconfig/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/sysconfig/elasticsearch"
elif [ -f "/etc/default/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/default/elasticsearch"
fi
fi
export HOSTNAME=`hostname -s`
# include x-pack jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/plugins/x-pack/*"
# don't let JAVA_TOOL_OPTIONS slip in (e.g. crazy agents in ubuntu)
# works around https://bugs.launchpad.net/ubuntu/+source/jayatana/+bug/1441487
if [ "x$JAVA_TOOL_OPTIONS" != "x" ]; then
echo "Warning: Ignoring JAVA_TOOL_OPTIONS=$JAVA_TOOL_OPTIONS"
echo "Please pass JVM parameters via ES_JAVA_OPTS instead"
unset JAVA_TOOL_OPTIONS
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
source "`dirname "$0"`"/x-pack-env
declare -a args=("$@")
if [ -e "$CONF_DIR" ]; then
args=("${args[@]}" --path.conf "$CONF_DIR")
fi
args=("${args[@]}" --path.conf "$CONF_DIR")
cd "$ES_HOME" > /dev/null
"$JAVA" $ES_JAVA_OPTS -cp "$ES_CLASSPATH" -Des.path.home="$ES_HOME" org.elasticsearch.xpack.ssl.CertificateTool "${args[@]}"
"$JAVA" $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" org.elasticsearch.xpack.ssl.CertificateTool "${args[@]}"
status=$?
cd - > /dev/null
exit $status

View File

@ -4,97 +4,12 @@
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
SCRIPT="$0"
source "`dirname "$0"`"/../elasticsearch-env
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
# determine elasticsearch home
ES_HOME=`dirname "$SCRIPT"`/../..
# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd "$ES_HOME"; pwd`
# If an include wasn't specified in the environment, then search for one...
if [ "x$ES_INCLUDE" = "x" ]; then
# Locations (in order) to use when searching for an include file.
for include in /usr/share/elasticsearch/elasticsearch.in.sh \
/usr/local/share/elasticsearch/elasticsearch.in.sh \
/opt/elasticsearch/elasticsearch.in.sh \
~/.elasticsearch.in.sh \
"`dirname "$0"`"/../elasticsearch.in.sh \
"$ES_HOME/bin/elasticsearch.in.sh"; do
if [ -r "$include" ]; then
. "$include"
break
fi
done
# ...otherwise, source the specified include.
elif [ -r "$ES_INCLUDE" ]; then
. "$ES_INCLUDE"
fi
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
if [ -z "$ES_CLASSPATH" ]; then
echo "You must set the ES_CLASSPATH var" >&2
exit 1
fi
if [ -z "$CONF_DIR" ]; then
# Try to read package config files
if [ -f "/etc/sysconfig/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/sysconfig/elasticsearch"
elif [ -f "/etc/default/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/default/elasticsearch"
fi
fi
export HOSTNAME=`hostname -s`
# include watcher jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/plugins/x-pack/*"
# don't let JAVA_TOOL_OPTIONS slip in (e.g. crazy agents in ubuntu)
# works around https://bugs.launchpad.net/ubuntu/+source/jayatana/+bug/1441487
if [ "x$JAVA_TOOL_OPTIONS" != "x" ]; then
echo "Warning: Ignoring JAVA_TOOL_OPTIONS=$JAVA_TOOL_OPTIONS"
echo "Please pass JVM parameters via ES_JAVA_OPTS instead"
unset JAVA_TOOL_OPTIONS
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
declare -a args=("$@")
source "`dirname "$0"`"/x-pack-env
cd "$ES_HOME" > /dev/null
"$JAVA" $ES_JAVA_OPTS -cp "$ES_CLASSPATH" -Des.path.home="$ES_HOME" org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool "${args[@]}"
"$JAVA" $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool "$@"
status=$?
cd - > /dev/null
exit $status

View File

@ -4,91 +4,15 @@
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
SCRIPT="$0"
source "`dirname "$0"`"/../elasticsearch-env
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
# determine elasticsearch home
ES_HOME=`dirname "$SCRIPT"`/../..
# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd "$ES_HOME"; pwd`
# If an include wasn't specified in the environment, then search for one...
if [ "x$ES_INCLUDE" = "x" ]; then
# Locations (in order) to use when searching for an include file.
for include in /usr/share/elasticsearch/elasticsearch.in.sh \
/usr/local/share/elasticsearch/elasticsearch.in.sh \
/opt/elasticsearch/elasticsearch.in.sh \
~/.elasticsearch.in.sh \
"`dirname "$0"`"/../elasticsearch.in.sh \
"$ES_HOME/bin/elasticsearch.in.sh"; do
if [ -r "$include" ]; then
. "$include"
break
fi
done
# ...otherwise, source the specified include.
elif [ -r "$ES_INCLUDE" ]; then
. "$ES_INCLUDE"
fi
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
if [ -z "$ES_CLASSPATH" ]; then
echo "You must set the ES_CLASSPATH var" >&2
exit 1
fi
# Try to read package config files
if [ -f "/etc/sysconfig/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/sysconfig/elasticsearch"
elif [ -f "/etc/default/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/default/elasticsearch"
fi
export HOSTNAME=`hostname -s`
# include x-pack jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/plugins/x-pack/*"
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
source "`dirname "$0"`"/x-pack-env
declare -a args=("$@")
if [ -e "$CONF_DIR" ]; then
args=("${args[@]}" --path.conf "$CONF_DIR")
fi
args=("${args[@]}" --path.conf "$CONF_DIR")
cd "$ES_HOME" > /dev/null
"$JAVA" $ES_JAVA_OPTS -cp "$ES_CLASSPATH" -Des.path.home="$ES_HOME" org.elasticsearch.xpack.extensions.XPackExtensionCli "${args[@]}"
"$JAVA" $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" org.elasticsearch.xpack.extensions.XPackExtensionCli "${args[@]}"
status=$?
cd - > /dev/null
exit $status

View File

@ -4,101 +4,15 @@
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
SCRIPT="$0"
source "`dirname "$0"`"/../elasticsearch-env
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
# determine elasticsearch home
ES_HOME=`dirname "$SCRIPT"`/../..
# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd "$ES_HOME"; pwd`
# If an include wasn't specified in the environment, then search for one...
if [ "x$ES_INCLUDE" = "x" ]; then
# Locations (in order) to use when searching for an include file.
for include in /usr/share/elasticsearch/elasticsearch.in.sh \
/usr/local/share/elasticsearch/elasticsearch.in.sh \
/opt/elasticsearch/elasticsearch.in.sh \
~/.elasticsearch.in.sh \
"`dirname "$0"`"/../elasticsearch.in.sh \
"$ES_HOME/bin/elasticsearch.in.sh"; do
if [ -r "$include" ]; then
. "$include"
break
fi
done
# ...otherwise, source the specified include.
elif [ -r "$ES_INCLUDE" ]; then
. "$ES_INCLUDE"
fi
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
if [ -z "$ES_CLASSPATH" ]; then
echo "You must set the ES_CLASSPATH var" >&2
exit 1
fi
if [ -z "$CONF_DIR" ]; then
# Try to read package config files
if [ -f "/etc/sysconfig/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/sysconfig/elasticsearch"
elif [ -f "/etc/default/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/default/elasticsearch"
fi
fi
export HOSTNAME=`hostname -s`
# include x-pack jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/plugins/x-pack/*"
# don't let JAVA_TOOL_OPTIONS slip in (e.g. crazy agents in ubuntu)
# works around https://bugs.launchpad.net/ubuntu/+source/jayatana/+bug/1441487
if [ "x$JAVA_TOOL_OPTIONS" != "x" ]; then
echo "Warning: Ignoring JAVA_TOOL_OPTIONS=$JAVA_TOOL_OPTIONS"
echo "Please pass JVM parameters via ES_JAVA_OPTS instead"
unset JAVA_TOOL_OPTIONS
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
source "`dirname "$0"`"/x-pack-env
declare -a args=("$@")
if [ -e "$CONF_DIR" ]; then
args=("${args[@]}" --path.conf "$CONF_DIR")
fi
args=("${args[@]}" --path.conf "$CONF_DIR")
cd "$ES_HOME" > /dev/null
"$JAVA" $ES_JAVA_OPTS -cp "$ES_CLASSPATH" -Des.path.home="$ES_HOME" org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool "${args[@]}"
"$JAVA" $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool "${args[@]}"
status=$?
cd - > /dev/null
exit $status

View File

@ -4,101 +4,15 @@
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
SCRIPT="$0"
source "`dirname "$0"`"/../elasticsearch-env
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
# determine elasticsearch home
ES_HOME=`dirname "$SCRIPT"`/../..
# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd "$ES_HOME"; pwd`
# If an include wasn't specified in the environment, then search for one...
if [ "x$ES_INCLUDE" = "x" ]; then
# Locations (in order) to use when searching for an include file.
for include in /usr/share/elasticsearch/elasticsearch.in.sh \
/usr/local/share/elasticsearch/elasticsearch.in.sh \
/opt/elasticsearch/elasticsearch.in.sh \
~/.elasticsearch.in.sh \
"`dirname "$0"`"/../elasticsearch.in.sh \
"$ES_HOME/bin/elasticsearch.in.sh"; do
if [ -r "$include" ]; then
. "$include"
break
fi
done
# ...otherwise, source the specified include.
elif [ -r "$ES_INCLUDE" ]; then
. "$ES_INCLUDE"
fi
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
if [ -z "$ES_CLASSPATH" ]; then
echo "You must set the ES_CLASSPATH var" >&2
exit 1
fi
if [ -z "$CONF_DIR" ]; then
# Try to read package config files
if [ -f "/etc/sysconfig/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/sysconfig/elasticsearch"
elif [ -f "/etc/default/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/default/elasticsearch"
fi
fi
export HOSTNAME=`hostname -s`
# include x-pack jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/plugins/x-pack/*"
# don't let JAVA_TOOL_OPTIONS slip in (e.g. crazy agents in ubuntu)
# works around https://bugs.launchpad.net/ubuntu/+source/jayatana/+bug/1441487
if [ "x$JAVA_TOOL_OPTIONS" != "x" ]; then
echo "Warning: Ignoring JAVA_TOOL_OPTIONS=$JAVA_TOOL_OPTIONS"
echo "Please pass JVM parameters via ES_JAVA_OPTS instead"
unset JAVA_TOOL_OPTIONS
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
source "`dirname "$0"`"/x-pack-env
declare -a args=("$@")
if [ -e "$CONF_DIR" ]; then
args=("${args[@]}" --path.conf "$CONF_DIR")
fi
args=("${args[@]}" --path.conf "$CONF_DIR")
cd "$ES_HOME" > /dev/null
"$JAVA" $ES_JAVA_OPTS -cp "$ES_CLASSPATH" -Des.path.home="$ES_HOME" org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool "${args[@]}"
"$JAVA" $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool "${args[@]}"
status=$?
cd - > /dev/null
exit $status

View File

@ -4,101 +4,15 @@
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
SCRIPT="$0"
source "`dirname "$0"`"/../elasticsearch-env
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
# determine elasticsearch home
ES_HOME=`dirname "$SCRIPT"`/../..
# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd "$ES_HOME"; pwd`
# If an include wasn't specified in the environment, then search for one...
if [ "x$ES_INCLUDE" = "x" ]; then
# Locations (in order) to use when searching for an include file.
for include in /usr/share/elasticsearch/elasticsearch.in.sh \
/usr/local/share/elasticsearch/elasticsearch.in.sh \
/opt/elasticsearch/elasticsearch.in.sh \
~/.elasticsearch.in.sh \
"`dirname "$0"`"/../elasticsearch.in.sh \
"$ES_HOME/bin/elasticsearch.in.sh"; do
if [ -r "$include" ]; then
. "$include"
break
fi
done
# ...otherwise, source the specified include.
elif [ -r "$ES_INCLUDE" ]; then
. "$ES_INCLUDE"
fi
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
if [ -z "$ES_CLASSPATH" ]; then
echo "You must set the ES_CLASSPATH var" >&2
exit 1
fi
if [ -z "$CONF_DIR" ]; then
# Try to read package config files
if [ -f "/etc/sysconfig/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/sysconfig/elasticsearch"
elif [ -f "/etc/default/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/default/elasticsearch"
fi
fi
export HOSTNAME=`hostname -s`
# include x-pack jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/plugins/x-pack/*"
# don't let JAVA_TOOL_OPTIONS slip in (e.g. crazy agents in ubuntu)
# works around https://bugs.launchpad.net/ubuntu/+source/jayatana/+bug/1441487
if [ "x$JAVA_TOOL_OPTIONS" != "x" ]; then
echo "Warning: Ignoring JAVA_TOOL_OPTIONS=$JAVA_TOOL_OPTIONS"
echo "Please pass JVM parameters via ES_JAVA_OPTS instead"
unset JAVA_TOOL_OPTIONS
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
source "`dirname "$0"`"/x-pack-env
declare -a args=("$@")
if [ -e "$CONF_DIR" ]; then
args=("${args[@]}" --path.conf "$CONF_DIR")
fi
args=("${args[@]}" --path.conf "$CONF_DIR")
cd "$ES_HOME" > /dev/null
"$JAVA" $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" org.elasticsearch.common.settings.EncKeyTool $properties "${args[@]}"
"$JAVA" $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool "${args[@]}"
status=$?
cd - > /dev/null
exit $status

Binary file not shown.

View File

@ -4,101 +4,15 @@
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
SCRIPT="$0"
source "`dirname "$0"`"/../elasticsearch-env
# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path.
while [ -h "$SCRIPT" ] ; do
ls=`ls -ld "$SCRIPT"`
# Drop everything prior to ->
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
SCRIPT="$link"
else
SCRIPT=`dirname "$SCRIPT"`/"$link"
fi
done
# determine elasticsearch home
ES_HOME=`dirname "$SCRIPT"`/../..
# make ELASTICSEARCH_HOME absolute
ES_HOME=`cd "$ES_HOME"; pwd`
# If an include wasn't specified in the environment, then search for one...
if [ "x$ES_INCLUDE" = "x" ]; then
# Locations (in order) to use when searching for an include file.
for include in /usr/share/elasticsearch/elasticsearch.in.sh \
/usr/local/share/elasticsearch/elasticsearch.in.sh \
/opt/elasticsearch/elasticsearch.in.sh \
~/.elasticsearch.in.sh \
"`dirname "$0"`"/../elasticsearch.in.sh \
"$ES_HOME/bin/elasticsearch.in.sh"; do
if [ -r "$include" ]; then
. "$include"
break
fi
done
# ...otherwise, source the specified include.
elif [ -r "$ES_INCLUDE" ]; then
. "$ES_INCLUDE"
fi
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
if [ -z "$ES_CLASSPATH" ]; then
echo "You must set the ES_CLASSPATH var" >&2
exit 1
fi
if [ -z "$CONF_DIR" ]; then
# Try to read package config files
if [ -f "/etc/sysconfig/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/sysconfig/elasticsearch"
elif [ -f "/etc/default/elasticsearch" ]; then
CONF_DIR=/etc/elasticsearch
. "/etc/default/elasticsearch"
fi
fi
export HOSTNAME=`hostname -s`
# include x-pack jars in classpath
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/plugins/x-pack/*"
# don't let JAVA_TOOL_OPTIONS slip in (e.g. crazy agents in ubuntu)
# works around https://bugs.launchpad.net/ubuntu/+source/jayatana/+bug/1441487
if [ "x$JAVA_TOOL_OPTIONS" != "x" ]; then
echo "Warning: Ignoring JAVA_TOOL_OPTIONS=$JAVA_TOOL_OPTIONS"
echo "Please pass JVM parameters via ES_JAVA_OPTS instead"
unset JAVA_TOOL_OPTIONS
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
source "`dirname "$0"`"/x-pack-env
declare -a args=("$@")
if [ -e "$CONF_DIR" ]; then
args=("${args[@]}" --path.conf "$CONF_DIR")
fi
args=("${args[@]}" --path.conf "$CONF_DIR")
cd "$ES_HOME" > /dev/null
"$JAVA" $ES_JAVA_OPTS -cp "$ES_CLASSPATH" -Des.path.home="$ES_HOME" org.elasticsearch.xpack.security.authc.file.tool.UsersTool "${args[@]}"
"$JAVA" $ES_JAVA_OPTS -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" org.elasticsearch.xpack.security.authc.file.tool.UsersTool "${args[@]}"
status=$?
cd - > /dev/null
exit $status

View File

@ -0,0 +1,7 @@
#!/bin/bash
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/plugins/x-pack/*"

View File

@ -1 +0,0 @@
95aa3e6fb520191a0970a73cf09f62948ee614be

View File

@ -1 +0,0 @@
733db77aa8d9b2d68015189df76ab06304406e50

View File

@ -1,558 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
=========================================================================
This project includes Public Suffix List copied from
<https://publicsuffix.org/list/effective_tld_names.dat>
licensed under the terms of the Mozilla Public License, v. 2.0
Full license text: <http://mozilla.org/MPL/2.0/>
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@ -1,6 +0,0 @@
Apache HttpComponents Client
Copyright 1999-2016 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -1 +0,0 @@
e7501a1b34325abb00d17dde96150604a0658b54

View File

@ -1 +0,0 @@
f4be009e7505f6ceddf21e7960c759f413f15056

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.Environment;
@ -50,9 +51,17 @@ import java.util.concurrent.atomic.AtomicReference;
*/
public class LicenseService extends AbstractLifecycleComponent implements ClusterStateListener, SchedulerEngine.Listener {
// pkg private for tests
static final TimeValue TRIAL_LICENSE_DURATION = TimeValue.timeValueHours(30 * 24);
public static final Setting<String> SELF_GENERATED_LICENSE_TYPE = new Setting<>("xpack.license.self_generated.type",
(s) -> "trial", (s) -> {
if (validSelfGeneratedType(s)) {
return s;
} else {
throw new IllegalArgumentException("Illegal self generated license type [" + s + "]. Must be trial or basic.");
}
}, Setting.Property.NodeScope);
// pkg private for tests
static final TimeValue SELF_GENERATED_LICENSE_DURATION = TimeValue.timeValueHours(30 * 24);
/**
* Duration of grace period after a license has expired
*/
@ -60,7 +69,9 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
private final ClusterService clusterService;
/** The xpack feature state to update when license changes are made. */
/**
* The xpack feature state to update when license changes are made.
*/
private final XPackLicenseState licenseState;
/**
@ -83,7 +94,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
/**
* Max number of nodes licensed by generated trial license
*/
private int trialLicenseMaxNodes = 1000;
private int selfGeneratedLicenseMaxNodes = 1000;
public static final String LICENSE_JOB = "licenseJob";
@ -108,10 +119,10 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
private void logExpirationWarning(long expirationMillis, boolean expired) {
String expiredMsg = expired ? "expired" : "will expire";
String general = LoggerMessageFormat.format(null, "\n" +
"#\n" +
"# License [{}] on [{}]. If you have a new license, please update it.\n" +
"# Otherwise, please reach out to your support contact.\n" +
"# ", expiredMsg, DATE_FORMATTER.printer().print(expirationMillis));
"#\n" +
"# License [{}] on [{}]. If you have a new license, please update it.\n" +
"# Otherwise, please reach out to your support contact.\n" +
"# ", expiredMsg, DATE_FORMATTER.printer().print(expirationMillis));
if (expired) {
general = general.toUpperCase(Locale.ROOT);
}
@ -179,8 +190,8 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
if (!License.isAutoGeneratedLicense(currentLicense.signature()) // current license is not auto-generated
&& currentLicense.issueDate() > newLicense.issueDate()) { // and has a later issue date
acknowledgeMessages.put("license", new String[]{
"The new license is older than the currently installed license. " +
"Are you sure you want to override the current license?"});
"The new license is older than the currently installed license. " +
"Are you sure you want to override the current license?"});
}
XPackLicenseState.ACKNOWLEDGMENT_MESSAGES.forEach((feature, ackMessages) -> {
String[] messages = ackMessages.apply(currentLicense.operationMode(), newLicense.operationMode());
@ -270,42 +281,49 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
* has no signed/trial license
*/
private void registerTrialLicense() {
clusterService.submitStateUpdateTask("generate trial license for [" + TRIAL_LICENSE_DURATION + "]", new ClusterStateUpdateTask() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
LicensesMetaData licensesMetaData = newState.metaData().custom(LicensesMetaData.TYPE);
if (logger.isDebugEnabled()) {
logger.debug("registered trial license: {}", licensesMetaData);
}
}
clusterService.submitStateUpdateTask("generate trial license for [" + SELF_GENERATED_LICENSE_DURATION + "]",
new ClusterStateUpdateTask() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
LicensesMetaData licensesMetaData = newState.metaData().custom(LicensesMetaData.TYPE);
if (logger.isDebugEnabled()) {
logger.debug("registered trial license: {}", licensesMetaData);
}
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
final MetaData metaData = currentState.metaData();
final LicensesMetaData currentLicensesMetaData = metaData.custom(LicensesMetaData.TYPE);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
// do not generate a trial license if any license is present
if (currentLicensesMetaData == null) {
long issueDate = clock.millis();
License.Builder specBuilder = License.builder()
.uid(UUID.randomUUID().toString())
.issuedTo(clusterService.getClusterName().value())
.maxNodes(trialLicenseMaxNodes)
.issueDate(issueDate)
.expiryDate(issueDate + TRIAL_LICENSE_DURATION.getMillis());
License trialLicense = TrialLicense.create(specBuilder);
mdBuilder.putCustom(LicensesMetaData.TYPE, new LicensesMetaData(trialLicense));
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
return currentState;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
final MetaData metaData = currentState.metaData();
final LicensesMetaData currentLicensesMetaData = metaData.custom(LicensesMetaData.TYPE);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
// do not generate a trial license if any license is present
if (currentLicensesMetaData == null) {
long issueDate = clock.millis();
String type = SELF_GENERATED_LICENSE_TYPE.get(settings);
if (validSelfGeneratedType(type) == false) {
throw new IllegalArgumentException("Illegal self generated license type [" + type +
"]. Must be trial or basic.");
}
License.Builder specBuilder = License.builder()
.uid(UUID.randomUUID().toString())
.issuedTo(clusterService.getClusterName().value())
.maxNodes(selfGeneratedLicenseMaxNodes)
.issueDate(issueDate)
.type(type)
.expiryDate(issueDate + SELF_GENERATED_LICENSE_DURATION.getMillis());
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder);
mdBuilder.putCustom(LicensesMetaData.TYPE, new LicensesMetaData(selfGeneratedLicense));
return ClusterState.builder(currentState).metaData(mdBuilder).build();
}
return currentState;
}
@Override
public void onFailure(String source, @Nullable Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
@Override
public void onFailure(String source, @Nullable Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
});
}
@Override
@ -367,8 +385,8 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
// auto-generate license if no licenses ever existed
// this will trigger a subsequent cluster changed event
if (currentClusterState.getNodes().isLocalNodeElectedMaster() &&
prevLicensesMetaData == null &&
(currentLicensesMetaData == null || currentLicensesMetaData.getLicense() == null)) {
prevLicensesMetaData == null &&
(currentLicensesMetaData == null || currentLicensesMetaData.getLicense() == null)) {
registerTrialLicense();
}
} else if (logger.isDebugEnabled()) {
@ -385,7 +403,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
if (license != null) {
long time = clock.millis();
boolean active = time >= license.issueDate() &&
time < license.expiryDate() + GRACE_PERIOD_DURATION.getMillis();
time < license.expiryDate() + GRACE_PERIOD_DURATION.getMillis();
licenseState.update(license.operationMode(), active);
if (active) {
@ -458,7 +476,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
return license;
} else if (license != null) {
boolean autoGeneratedLicense = License.isAutoGeneratedLicense(license.signature());
if ((autoGeneratedLicense && TrialLicense.verify(license))
if ((autoGeneratedLicense && SelfGeneratedLicense.verify(license))
|| (!autoGeneratedLicense && LicenseVerifier.verifyLicense(license))) {
return license;
}
@ -466,4 +484,8 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
}
return null;
}
private static boolean validSelfGeneratedType(String type) {
return "basic".equals(type) || "trial".equals(type);
}
}

View File

@ -21,11 +21,10 @@ import java.util.Collections;
import static org.elasticsearch.license.CryptUtils.decrypt;
import static org.elasticsearch.license.CryptUtils.encrypt;
class TrialLicense {
class SelfGeneratedLicense {
public static License create(License.Builder specBuilder) {
License spec = specBuilder
.type("trial")
.issuer("elasticsearch")
.version(License.VERSION_CURRENT)
.build();

View File

@ -285,8 +285,8 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
try {
components.addAll(security.createComponents(internalClient, threadPool, clusterService, resourceWatcherService,
extensionsService.getExtensions()));
} catch (Exception e) {
throw new Error("security initialization failed", e);
} catch (final Exception e) {
throw new IllegalStateException("security initialization failed", e);
}
components.addAll(monitoring.createComponents(internalClient, threadPool, clusterService, licenseService, sslService));
@ -384,6 +384,8 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
settings.addAll(licensing.getSettings());
settings.addAll(XPackSettings.getAllSettings());
settings.add(LicenseService.SELF_GENERATED_LICENSE_TYPE);
// we add the `xpack.version` setting to all internal indices
settings.add(Setting.simpleString("index.xpack.version", Setting.Property.IndexScope));
@ -519,7 +521,11 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
@Override
public UnaryOperator<Map<String, IndexTemplateMetaData>> getIndexTemplateMetaDataUpgrader() {
return watcher.getIndexTemplateMetaDataUpgrader();
return templates -> {
templates = watcher.getIndexTemplateMetaDataUpgrader().apply(templates);
templates = security.getIndexTemplateMetaDataUpgrader().apply(templates);
return templates;
};
}
public void onIndexModule(IndexModule module) {

View File

@ -5,31 +5,31 @@
*/
package org.elasticsearch.xpack.common.http;
import org.apache.http.Header;
import org.apache.http.HttpHeaders;
import org.apache.http.HttpHost;
import org.apache.http.NameValuePair;
import org.apache.http.auth.AuthScope;
import org.apache.http.client.AuthCache;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.client.protocol.HttpClientContext;
import org.apache.http.client.utils.URIUtils;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.conn.ssl.DefaultHostnameVerifier;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.auth.BasicScheme;
import org.apache.http.impl.client.BasicAuthCache;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.message.BasicNameValuePair;
import org.elasticsearch.client.http.Header;
import org.elasticsearch.client.http.HttpHeaders;
import org.elasticsearch.client.http.HttpHost;
import org.elasticsearch.client.http.NameValuePair;
import org.elasticsearch.client.http.auth.AuthScope;
import org.elasticsearch.client.http.client.AuthCache;
import org.elasticsearch.client.http.client.CredentialsProvider;
import org.elasticsearch.client.http.client.config.RequestConfig;
import org.elasticsearch.client.http.client.methods.CloseableHttpResponse;
import org.elasticsearch.client.http.client.methods.HttpEntityEnclosingRequestBase;
import org.elasticsearch.client.http.client.methods.HttpHead;
import org.elasticsearch.client.http.client.methods.HttpRequestBase;
import org.elasticsearch.client.http.client.protocol.HttpClientContext;
import org.elasticsearch.client.http.client.utils.URIUtils;
import org.elasticsearch.client.http.client.utils.URLEncodedUtils;
import org.elasticsearch.client.http.conn.ssl.DefaultHostnameVerifier;
import org.elasticsearch.client.http.conn.ssl.NoopHostnameVerifier;
import org.elasticsearch.client.http.conn.ssl.SSLConnectionSocketFactory;
import org.elasticsearch.client.http.entity.StringEntity;
import org.elasticsearch.client.http.impl.auth.BasicScheme;
import org.elasticsearch.client.http.impl.client.BasicAuthCache;
import org.elasticsearch.client.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.client.http.impl.client.CloseableHttpClient;
import org.elasticsearch.client.http.impl.client.HttpClientBuilder;
import org.elasticsearch.client.http.message.BasicNameValuePair;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.Streams;

View File

@ -5,8 +5,8 @@
*/
package org.elasticsearch.xpack.common.http.auth;
import org.apache.http.auth.AuthScope;
import org.apache.http.client.CredentialsProvider;
import org.elasticsearch.client.http.auth.AuthScope;
import org.elasticsearch.client.http.client.CredentialsProvider;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;

View File

@ -5,9 +5,9 @@
*/
package org.elasticsearch.xpack.common.http.auth.basic;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.elasticsearch.client.http.auth.AuthScope;
import org.elasticsearch.client.http.auth.UsernamePasswordCredentials;
import org.elasticsearch.client.http.client.CredentialsProvider;
import org.elasticsearch.xpack.common.http.auth.ApplicableHttpAuth;
import org.elasticsearch.xpack.security.crypto.CryptoService;

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.MlMetadata;
import org.elasticsearch.xpack.ml.job.messages.Messages;
import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager;
@ -130,9 +131,7 @@ public class KillProcessAction extends Action<KillProcessAction.Request, KillPro
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
AutodetectProcessManager processManager, Auditor auditor) {
super(settings, NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
Request::new, Response::new, ThreadPool.Names.SAME, processManager);
// ThreadPool.Names.SAME
Request::new, Response::new, MachineLearning.UTILITY_THREAD_POOL_NAME, processManager);
this.auditor = auditor;
}

View File

@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
@ -56,6 +57,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.time.Duration;
import java.time.ZonedDateTime;
import java.util.Arrays;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
@ -96,7 +98,8 @@ public class AutodetectProcessManager extends AbstractComponent {
private final JobResultsPersister jobResultsPersister;
private final JobDataCountsPersister jobDataCountsPersister;
private final ConcurrentMap<Long, AutodetectCommunicator> autoDetectCommunicatorByJob;
private final ConcurrentMap<Long, AutodetectCommunicator> autoDetectCommunicatorByOpenJob = new ConcurrentHashMap<>();
private final ConcurrentMap<Long, AutodetectCommunicator> autoDetectCommunicatorByClosingJob = new ConcurrentHashMap<>();
private final int maxAllowedRunningJobs;
@ -120,39 +123,49 @@ public class AutodetectProcessManager extends AbstractComponent {
this.jobProvider = jobProvider;
this.jobResultsPersister = jobResultsPersister;
this.jobDataCountsPersister = jobDataCountsPersister;
this.autoDetectCommunicatorByJob = new ConcurrentHashMap<>();
this.auditor = auditor;
}
public synchronized void closeAllJobsOnThisNode(String reason) throws IOException {
int numJobs = autoDetectCommunicatorByJob.size();
int numJobs = autoDetectCommunicatorByOpenJob.size();
if (numJobs != 0) {
logger.info("Closing [{}] jobs, because [{}]", numJobs, reason);
for (AutodetectCommunicator communicator : autoDetectCommunicatorByJob.values()) {
for (AutodetectCommunicator communicator : autoDetectCommunicatorByOpenJob.values()) {
closeJob(communicator.getJobTask(), false, reason);
}
}
}
public void killProcess(JobTask jobTask, boolean awaitCompletion, String reason) {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.remove(jobTask.getAllocationId());
String extraInfo;
AutodetectCommunicator communicator = autoDetectCommunicatorByOpenJob.remove(jobTask.getAllocationId());
if (communicator == null) {
extraInfo = " while closing";
// if there isn't an open job, check for a closing job
communicator = autoDetectCommunicatorByClosingJob.remove(jobTask.getAllocationId());
} else {
extraInfo = "";
}
if (communicator != null) {
if (reason == null) {
logger.info("Killing job [{}]", jobTask.getJobId());
logger.info("Killing job [{}]{}", jobTask.getJobId(), extraInfo);
} else {
logger.info("Killing job [{}], because [{}]", jobTask.getJobId(), reason);
logger.info("Killing job [{}]{}, because [{}]", jobTask.getJobId(), extraInfo, reason);
}
killProcess(communicator, jobTask.getJobId(), awaitCompletion, true);
}
}
public void killAllProcessesOnThisNode() {
Iterator<AutodetectCommunicator> iter = autoDetectCommunicatorByJob.values().iterator();
while (iter.hasNext()) {
AutodetectCommunicator communicator = iter.next();
iter.remove();
killProcess(communicator, communicator.getJobTask().getJobId(), false, false);
// first kill open jobs, then closing jobs
for (Iterator<AutodetectCommunicator> iter : Arrays.asList(autoDetectCommunicatorByOpenJob.values().iterator(),
autoDetectCommunicatorByClosingJob.values().iterator())) {
while (iter.hasNext()) {
AutodetectCommunicator communicator = iter.next();
iter.remove();
killProcess(communicator, communicator.getJobTask().getJobId(), false, false);
}
}
}
@ -164,7 +177,6 @@ public class AutodetectProcessManager extends AbstractComponent {
}
}
/**
* Passes data to the native process.
* This is a blocking call that won't return until all the data has been
@ -186,7 +198,7 @@ public class AutodetectProcessManager extends AbstractComponent {
*/
public void processData(JobTask jobTask, InputStream input, XContentType xContentType,
DataLoadParams params, BiConsumer<DataCounts, Exception> handler) {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobTask.getAllocationId());
AutodetectCommunicator communicator = autoDetectCommunicatorByOpenJob.get(jobTask.getAllocationId());
if (communicator == null) {
throw ExceptionsHelper.conflictStatusException("Cannot process data because job [" + jobTask.getJobId() + "] is not open");
}
@ -204,7 +216,7 @@ public class AutodetectProcessManager extends AbstractComponent {
*/
public void flushJob(JobTask jobTask, FlushJobParams params, ActionListener<FlushAcknowledgement> handler) {
logger.debug("Flushing job {}", jobTask.getJobId());
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobTask.getAllocationId());
AutodetectCommunicator communicator = autoDetectCommunicatorByOpenJob.get(jobTask.getAllocationId());
if (communicator == null) {
String message = String.format(Locale.ROOT, "Cannot flush because job [%s] is not open", jobTask.getJobId());
logger.debug(message);
@ -225,7 +237,7 @@ public class AutodetectProcessManager extends AbstractComponent {
public void writeUpdateProcessMessage(JobTask jobTask, List<JobUpdate.DetectorUpdate> updates, ModelPlotConfig config,
Consumer<Exception> handler) {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobTask.getAllocationId());
AutodetectCommunicator communicator = autoDetectCommunicatorByOpenJob.get(jobTask.getAllocationId());
if (communicator == null) {
String message = "Cannot process update model debug config because job [" + jobTask.getJobId() + "] is not open";
logger.debug(message);
@ -263,7 +275,7 @@ public class AutodetectProcessManager extends AbstractComponent {
@Override
protected void doRun() throws Exception {
try {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.computeIfAbsent(jobTask.getAllocationId(),
AutodetectCommunicator communicator = autoDetectCommunicatorByOpenJob.computeIfAbsent(jobTask.getAllocationId(),
id -> create(jobTask, params, handler));
communicator.init(params.modelSnapshot());
setJobState(jobTask, JobState.OPENED);
@ -271,7 +283,7 @@ public class AutodetectProcessManager extends AbstractComponent {
// No need to log here as the persistent task framework will log it
try {
// Don't leave a partially initialised process hanging around
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.remove(jobTask.getAllocationId());
AutodetectCommunicator communicator = autoDetectCommunicatorByOpenJob.remove(jobTask.getAllocationId());
if (communicator != null) {
communicator.killProcess(false, false);
}
@ -288,7 +300,12 @@ public class AutodetectProcessManager extends AbstractComponent {
}
AutodetectCommunicator create(JobTask jobTask, AutodetectParams autodetectParams, Consumer<Exception> handler) {
if (autoDetectCommunicatorByJob.size() == maxAllowedRunningJobs) {
// Closing jobs can still be using some or all threads in MachineLearning.AUTODETECT_THREAD_POOL_NAME
// that an open job uses, so include them too when considering if enough threads are available.
// There's a slight possibility that the same key is in both sets, hence it's not sufficient to simply
// add the two map sizes.
int currentRunningJobs = Sets.union(autoDetectCommunicatorByOpenJob.keySet(), autoDetectCommunicatorByClosingJob.keySet()).size();
if (currentRunningJobs >= maxAllowedRunningJobs) {
throw new ElasticsearchStatusException("max running job capacity [" + maxAllowedRunningJobs + "] reached",
RestStatus.TOO_MANY_REQUESTS);
}
@ -368,45 +385,58 @@ public class AutodetectProcessManager extends AbstractComponent {
}
/**
* Stop the running job and mark it as finished.<br>
* Stop the running job and mark it as finished.
*
* @param jobTask The job to stop
* @param jobTask The job to stop
* @param restart Whether the job should be restarted by persistent tasks
* @param reason The reason for closing the job
*/
public void closeJob(JobTask jobTask, boolean restart, String reason) {
logger.debug("Attempting to close job [{}], because [{}]", jobTask.getJobId(), reason);
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.remove(jobTask.getAllocationId());
String jobId = jobTask.getJobId();
long allocationId = jobTask.getAllocationId();
logger.debug("Attempting to close job [{}], because [{}]", jobId, reason);
// don't remove the communicator immediately, because we need to ensure it's in the
// map of closing communicators before it's removed from the map of running ones
AutodetectCommunicator communicator = autoDetectCommunicatorByOpenJob.get(allocationId);
if (communicator == null) {
logger.debug("Cannot close: no active autodetect process for job {}", jobTask.getJobId());
logger.debug("Cannot close: no active autodetect process for job [{}]", jobId);
return;
}
// keep a record of the job, so that it can still be killed while closing
autoDetectCommunicatorByClosingJob.putIfAbsent(allocationId, communicator);
communicator = autoDetectCommunicatorByOpenJob.remove(allocationId);
if (communicator == null) {
// if we get here a simultaneous close request beat us to the remove() call
logger.debug("Already closing autodetect process for job [{}]", jobId);
return;
}
if (reason == null) {
logger.info("Closing job [{}]", jobTask.getJobId());
logger.info("Closing job [{}]", jobId);
} else {
logger.info("Closing job [{}], because [{}]", jobTask.getJobId(), reason);
logger.info("Closing job [{}], because [{}]", jobId, reason);
}
try {
communicator.close(restart, reason);
autoDetectCommunicatorByClosingJob.remove(allocationId);
} catch (Exception e) {
logger.warn("Exception closing stopped process input stream", e);
logger.warn("[" + jobId + "] Exception closing autodetect process", e);
setJobState(jobTask, JobState.FAILED);
throw ExceptionsHelper.serverError("Exception closing stopped process input stream", e);
throw ExceptionsHelper.serverError("Exception closing autodetect process", e);
}
}
int numberOfOpenJobs() {
return autoDetectCommunicatorByJob.size();
return autoDetectCommunicatorByOpenJob.size();
}
boolean jobHasActiveAutodetectProcess(JobTask jobTask) {
return autoDetectCommunicatorByJob.get(jobTask.getAllocationId()) != null;
return autoDetectCommunicatorByOpenJob.get(jobTask.getAllocationId()) != null;
}
public Optional<Duration> jobOpenTime(JobTask jobTask) {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobTask.getAllocationId());
AutodetectCommunicator communicator = autoDetectCommunicatorByOpenJob.get(jobTask.getAllocationId());
if (communicator == null) {
return Optional.empty();
}
@ -452,7 +482,7 @@ public class AutodetectProcessManager extends AbstractComponent {
}
public Optional<Tuple<DataCounts, ModelSizeStats>> getStatistics(JobTask jobTask) {
AutodetectCommunicator communicator = autoDetectCommunicatorByJob.get(jobTask.getAllocationId());
AutodetectCommunicator communicator = autoDetectCommunicatorByOpenJob.get(jobTask.getAllocationId());
if (communicator == null) {
return Optional.empty();
}
@ -523,7 +553,7 @@ public class AutodetectProcessManager extends AbstractComponent {
try {
runnable.run();
} catch (Exception e) {
logger.error("error handeling job operation", e);
logger.error("error handling job operation", e);
}
}
}

View File

@ -229,7 +229,7 @@ class NativeAutodetectProcess implements AutodetectProcess {
}
}
private void deleteAssociatedFiles() throws IOException {
private synchronized void deleteAssociatedFiles() throws IOException {
if (filesToDelete == null) {
return;
}
@ -241,6 +241,8 @@ class NativeAutodetectProcess implements AutodetectProcess {
LOGGER.warn("[{}] Failed to delete file {}", jobId, fileToDelete.toString());
}
}
filesToDelete.clear();
}
@Override

View File

@ -139,18 +139,8 @@ public class AutoDetectResultProcessor {
LOGGER.error(new ParameterizedMessage("[{}] error parsing autodetect output", jobId), e);
}
} finally {
try {
if (processKilled == false) {
waitUntilRenormalizerIsIdle();
persister.commitResultWrites(jobId);
persister.commitStateWrites(jobId);
}
} catch (IndexNotFoundException e) {
LOGGER.error("[{}] Error while closing: no such index [{}]", jobId, e.getIndex().getName());
} finally {
flushListener.clear();
completionLatch.countDown();
}
flushListener.clear();
completionLatch.countDown();
}
}
@ -281,6 +271,12 @@ public class AutoDetectResultProcessor {
// Wait for any updateModelSnapshotIdOnJob calls to complete.
updateModelSnapshotIdSemaphore.acquire();
updateModelSnapshotIdSemaphore.release();
// These lines ensure that the "completion" we're awaiting includes making the results searchable
waitUntilRenormalizerIsIdle();
persister.commitResultWrites(jobId);
persister.commitStateWrites(jobId);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOGGER.info("[{}] Interrupted waiting for results processor to complete", jobId);

View File

@ -58,7 +58,8 @@ public class ClusterAlertsUtil {
"elasticsearch_cluster_status",
"elasticsearch_version_mismatch",
"kibana_version_mismatch",
"logstash_version_mismatch"
"logstash_version_mismatch",
"xpack_license_expiration"
};
/**

View File

@ -1,137 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
/**
* Creates aliases for monitoring indexes created by Marvel 2.3+.
*/
public class BackwardsCompatibilityAliasesResource extends HttpResource {
private static final Logger logger = Loggers.getLogger(BackwardsCompatibilityAliasesResource.class);
private final TimeValue masterTimeout;
/**
* Create a new {@link TemplateHttpResource}.
*
* @param resourceOwnerName The user-recognizable name.
* @param masterTimeout Master timeout to use with any request.
*/
public BackwardsCompatibilityAliasesResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout) {
super(resourceOwnerName);
this.masterTimeout = masterTimeout;
}
@Override
protected boolean doCheckAndPublish(RestClient client) {
boolean needNewAliases = false;
XContentBuilder request;
try {
Response response = client.performRequest("GET", "/.marvel-es-1-*", Collections.singletonMap("filter_path", "*.aliases"));
Map<String, Object> indices = XContentHelper.convertToMap(JsonXContent.jsonXContent, response.getEntity().getContent(), false);
request = JsonXContent.contentBuilder();
request.startObject().startArray("actions");
for (Map.Entry<String, Object> e : indices.entrySet()) {
String index = e.getKey();
// we add a suffix so that it will not collide with today's monitoring index following an upgrade
String alias = ".monitoring-es-2-" + index.substring(".marvel-es-1-".length()) + "-alias";
if (false == aliasesForIndex(e.getValue()).contains(alias)) {
needNewAliases = true;
addAlias(request, index, alias);
}
}
request.endArray().endObject();
} catch (ResponseException e) {
int statusCode = e.getResponse().getStatusLine().getStatusCode();
if (statusCode == RestStatus.NOT_FOUND.getStatus()) {
logger.debug("no 2.x monitoring indexes found so no need to create backwards compatibility aliases");
return true;
}
logger.error((Supplier<?>) () ->
new ParameterizedMessage("failed to check for 2.x monitoring indexes with [{}]", statusCode),
e);
return false;
} catch (IOException | RuntimeException e) {
logger.error("failed to check for 2.x monitoring indexes", e);
return false;
}
if (false == needNewAliases) {
// Hurray! Nothing to do!
return true;
}
/* Looks like we have to create some new aliases. Note that this is a race with all other exporters on other nodes of Elasticsearch
* targeting this cluster. That is fine because this request is idemopotent, meaning that if it has no work to do it'll just return
* 200 OK { "acknowledged": true }. */
try {
BytesRef bytes = request.bytes().toBytesRef();
HttpEntity body = new ByteArrayEntity(bytes.bytes, bytes.offset, bytes.length, ContentType.APPLICATION_JSON);
Response response = client.performRequest("POST", "/_aliases", parameters(), body);
Map<String, Object> aliasesResponse = XContentHelper.convertToMap(JsonXContent.jsonXContent, response.getEntity().getContent(),
false);
Boolean acked = (Boolean) aliasesResponse.get("acknowledged");
if (acked == null) {
logger.error("Unexpected response format from _aliases action {}", aliasesResponse);
return false;
}
return acked;
} catch (IOException | RuntimeException e) {
logger.error("failed to create aliases for 2.x monitoring indexes", e);
return false;
}
}
private Set<?> aliasesForIndex(Object indexInfo) {
Map<?, ?> info = (Map<?, ?>) indexInfo;
Map<?, ?> aliases = (Map<?, ?>) info.get("aliases");
return aliases.keySet();
}
/**
* Parameters to use for all requests.
*/
Map<String, String> parameters() {
Map<String, String> parameters = new HashMap<>();
if (masterTimeout != null) {
parameters.put("master_timeout", masterTimeout.getStringRep());
}
return parameters;
}
private void addAlias(XContentBuilder request, String index, String alias) throws IOException {
request.startObject().startObject("add");
{
request.field("index", index);
request.field("alias", alias);
}
request.endObject().endObject();
}
}

View File

@ -8,9 +8,9 @@ package org.elasticsearch.xpack.monitoring.exporter.http;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.client.http.HttpEntity;
import org.elasticsearch.client.http.entity.ContentType;
import org.elasticsearch.client.http.entity.StringEntity;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;

View File

@ -5,9 +5,9 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.elasticsearch.client.http.HttpEntity;
import org.elasticsearch.client.http.entity.ByteArrayEntity;
import org.elasticsearch.client.http.entity.ContentType;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;

View File

@ -5,15 +5,15 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.client.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.apache.logging.log4j.Logger;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.message.BasicHeader;
import org.elasticsearch.client.http.Header;
import org.elasticsearch.client.http.HttpHost;
import org.elasticsearch.client.http.auth.AuthScope;
import org.elasticsearch.client.http.auth.UsernamePasswordCredentials;
import org.elasticsearch.client.http.client.CredentialsProvider;
import org.elasticsearch.client.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.client.http.message.BasicHeader;
import org.elasticsearch.Version;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
@ -39,8 +39,6 @@ import org.elasticsearch.xpack.monitoring.resolver.MonitoringIndexNameResolver;
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
import org.elasticsearch.xpack.ssl.SSLService;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
import javax.net.ssl.SSLContext;
import java.io.IOException;
import java.util.ArrayList;
@ -132,10 +130,6 @@ public class HttpExporter extends Exporter {
* ES level timeout used when checking and writing pipelines (used to speed up tests)
*/
public static final String PIPELINE_CHECK_TIMEOUT_SETTING = "index.pipeline.master_timeout";
/**
* ES level timeout used when checking and writing aliases (used to speed up tests)
*/
public static final String ALIAS_TIMEOUT_SETTING = "index.alias.master_timeout";
/**
* Minimum supported version of the remote monitoring cluster.
@ -329,10 +323,6 @@ public class HttpExporter extends Exporter {
// load the pipeline (this will get added to as the monitoring API version increases)
configurePipelineResources(config, resourceOwnerName, resources);
// alias .marvel-es-1-* indices
resources.add(new BackwardsCompatibilityAliasesResource(resourceOwnerName,
config.settings().getAsTime(ALIAS_TIMEOUT_SETTING, timeValueSeconds(30))));
// load the watches for cluster alerts if Watcher is available
configureClusterAlertsResources(config, resourceOwnerName, resources);

View File

@ -5,7 +5,7 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpHost;
import org.elasticsearch.client.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;

View File

@ -5,7 +5,7 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpHost;
import org.elasticsearch.client.http.HttpHost;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.client.RestClient;

View File

@ -5,9 +5,9 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType;
import org.elasticsearch.client.http.HttpEntity;
import org.elasticsearch.client.http.entity.ByteArrayEntity;
import org.elasticsearch.client.http.entity.ContentType;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.Nullable;

View File

@ -5,7 +5,7 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.elasticsearch.client.http.HttpEntity;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;

View File

@ -5,9 +5,9 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.client.http.client.CredentialsProvider;
import org.elasticsearch.client.http.impl.nio.client.HttpAsyncClientBuilder;
import org.elasticsearch.client.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.common.Nullable;

View File

@ -5,9 +5,9 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.client.http.HttpEntity;
import org.elasticsearch.client.http.entity.ContentType;
import org.elasticsearch.client.http.entity.StringEntity;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.Nullable;

View File

@ -5,7 +5,7 @@
*/
package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.client.config.RequestConfig.Builder;
import org.elasticsearch.client.http.client.config.RequestConfig.Builder;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;

View File

@ -10,10 +10,6 @@ import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
@ -253,11 +249,6 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
}
}
if (null != prepareAddAliasesTo2xIndices(clusterState)) {
logger.debug("old monitoring indexes exist without aliases, waiting for them to get new aliases");
return false;
}
logger.trace("monitoring index templates and pipelines are installed, service can start");
// everything is setup
@ -326,38 +317,6 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
}
}
IndicesAliasesRequest addAliasesTo2xIndices = prepareAddAliasesTo2xIndices(clusterState);
if (addAliasesTo2xIndices == null) {
logger.trace("there are no 2.x monitoring indices or they have all the aliases they need");
} else {
final List<String> monitoringIndices2x = addAliasesTo2xIndices.getAliasActions().stream()
.flatMap((a) -> Arrays.stream(a.indices()))
.collect(Collectors.toList());
logger.debug("there are 2.x monitoring indices {} and they are missing some aliases to make them compatible with 5.x",
monitoringIndices2x);
asyncActions.add(() -> client.execute(IndicesAliasesAction.INSTANCE, addAliasesTo2xIndices,
new ActionListener<IndicesAliasesResponse>() {
@Override
public void onResponse(IndicesAliasesResponse response) {
responseReceived(pendingResponses, true, null);
if (response.isAcknowledged()) {
logger.info("Added modern aliases to 2.x monitoring indices {}", monitoringIndices2x);
} else {
logger.info("Unable to add modern aliases to 2.x monitoring indices {}, response not acknowledged.",
monitoringIndices2x);
}
}
@Override
public void onFailure(Exception e) {
responseReceived(pendingResponses, false, null);
logger.error((Supplier<?>)
() -> new ParameterizedMessage("Unable to add modern aliases to 2.x monitoring indices {}",
monitoringIndices2x), e);
}
}));
}
// avoid constantly trying to setup Watcher, which requires a lot of overhead and avoid attempting to setup during a cluster state
// change
if (state.get() == State.RUNNING && clusterStateChange == false && canUseWatcher()) {
@ -614,23 +573,6 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
});
}
private IndicesAliasesRequest prepareAddAliasesTo2xIndices(ClusterState clusterState) {
IndicesAliasesRequest request = null;
for (IndexMetaData index : clusterState.metaData()) {
String name = index.getIndex().getName();
if (name.startsWith(".marvel-es-1-")) {
// we add a suffix so that it will not collide with today's monitoring index following an upgrade
String alias = ".monitoring-es-2-" + name.substring(".marvel-es-1-".length()) + "-alias";
if (index.getAliases().containsKey(alias)) continue;
if (request == null) {
request = new IndicesAliasesRequest();
}
request.addAliasAction(AliasActions.add().index(name).alias(alias));
}
}
return request;
}
enum State {
INITIALIZED,
RUNNING,

View File

@ -5,7 +5,7 @@
*/
package org.elasticsearch.xpack.notification.jira;
import org.apache.http.HttpStatus;
import org.elasticsearch.client.http.HttpStatus;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;

View File

@ -5,27 +5,9 @@
*/
package org.elasticsearch.xpack.security;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
@ -33,6 +15,7 @@ import org.elasticsearch.action.support.ActionFilter;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.bootstrap.BootstrapCheck;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Booleans;
@ -57,6 +40,10 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.index.IndexModule;
@ -124,7 +111,6 @@ import org.elasticsearch.xpack.security.audit.index.IndexNameResolver;
import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail;
import org.elasticsearch.xpack.security.authc.AuthenticationFailureHandler;
import org.elasticsearch.xpack.security.authc.AuthenticationService;
import org.elasticsearch.xpack.security.authc.ContainerSettings;
import org.elasticsearch.xpack.security.authc.DefaultAuthenticationFailureHandler;
import org.elasticsearch.xpack.security.authc.InternalRealms;
import org.elasticsearch.xpack.security.authc.Realm;
@ -149,7 +135,6 @@ import org.elasticsearch.xpack.security.authz.store.FileRolesStore;
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
import org.elasticsearch.xpack.security.authz.store.ReservedRolesStore;
import org.elasticsearch.xpack.security.bootstrap.BootstrapElasticPassword;
import org.elasticsearch.xpack.security.bootstrap.ContainerPasswordBootstrapCheck;
import org.elasticsearch.xpack.security.rest.SecurityRestFilter;
import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction;
import org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction;
@ -168,6 +153,7 @@ import org.elasticsearch.xpack.security.rest.action.user.RestGetUsersAction;
import org.elasticsearch.xpack.security.rest.action.user.RestHasPrivilegesAction;
import org.elasticsearch.xpack.security.rest.action.user.RestPutUserAction;
import org.elasticsearch.xpack.security.rest.action.user.RestSetEnabledAction;
import org.elasticsearch.xpack.security.support.IndexLifecycleManager;
import org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor;
import org.elasticsearch.xpack.security.transport.filter.IPFilter;
import org.elasticsearch.xpack.security.transport.netty4.SecurityNetty4HttpServerTransport;
@ -176,12 +162,34 @@ import org.elasticsearch.xpack.security.user.AnonymousUser;
import org.elasticsearch.xpack.ssl.SSLBootstrapCheck;
import org.elasticsearch.xpack.ssl.SSLConfigurationSettings;
import org.elasticsearch.xpack.ssl.SSLService;
import org.elasticsearch.xpack.template.TemplateUtils;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.security.GeneralSecurityException;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList;
import static org.elasticsearch.xpack.XPackSettings.HTTP_SSL_ENABLED;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_TEMPLATE_NAME;
public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
@ -310,14 +318,12 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
this.auditTrailService.set(auditTrailService);
final SecurityLifecycleService securityLifecycleService =
new SecurityLifecycleService(settings, clusterService, threadPool, client, licenseState, indexAuditTrail);
new SecurityLifecycleService(settings, clusterService, threadPool, client, indexAuditTrail);
final TokenService tokenService = new TokenService(settings, Clock.systemUTC(), client, securityLifecycleService);
components.add(tokenService);
final ContainerSettings containerSettings = ContainerSettings.parseAndCreate();
// realms construction
final NativeUsersStore nativeUsersStore = new NativeUsersStore(settings, client, securityLifecycleService, containerSettings);
final NativeUsersStore nativeUsersStore = new NativeUsersStore(settings, client, securityLifecycleService);
final NativeRoleMappingStore nativeRoleMappingStore = new NativeRoleMappingStore(settings, client, securityLifecycleService);
final AnonymousUser anonymousUser = new AnonymousUser(settings);
final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore,
@ -388,7 +394,7 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
securityInterceptor.set(new SecurityServerTransportInterceptor(settings, threadPool, authcService.get(), authzService, licenseState,
sslService, securityContext.get(), destructiveOperations));
BootstrapElasticPassword bootstrapElasticPassword = new BootstrapElasticPassword(settings, logger, clusterService, reservedRealm,
BootstrapElasticPassword bootstrapElasticPassword = new BootstrapElasticPassword(settings, clusterService, reservedRealm,
securityLifecycleService);
bootstrapElasticPassword.initiatePasswordBootstrap();
@ -502,8 +508,7 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
new SSLBootstrapCheck(sslService, settings, env),
new TokenPassphraseBootstrapCheck(settings),
new TokenSSLBootstrapCheck(settings),
new PkiRealmBootstrapCheck(settings, sslService),
new ContainerPasswordBootstrapCheck()
new PkiRealmBootstrapCheck(settings, sslService)
);
checks.addAll(InternalRealms.getBootstrapChecks(settings));
return checks;
@ -857,4 +862,21 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
}
return Collections.emptyList();
}
public UnaryOperator<Map<String, IndexTemplateMetaData>> getIndexTemplateMetaDataUpgrader() {
return templates -> {
final byte[] securityTemplate = TemplateUtils.loadTemplate("/" + SECURITY_TEMPLATE_NAME + ".json",
Version.CURRENT.toString(), IndexLifecycleManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8);
final XContent xContent = XContentFactory.xContent(XContentType.JSON);
try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, securityTemplate)) {
templates.put(SECURITY_TEMPLATE_NAME, IndexTemplateMetaData.Builder.fromXContent(parser, SECURITY_TEMPLATE_NAME));
} catch (IOException e) {
// TODO: should we handle this with a thrown exception?
logger.error("Error loading security template [{}] as part of metadata upgrading", SECURITY_TEMPLATE_NAME);
}
return templates;
};
}
}

View File

@ -19,12 +19,11 @@ import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail;
import org.elasticsearch.xpack.security.authc.esnative.NativeRealmMigrator;
import org.elasticsearch.xpack.security.support.IndexLifecycleManager;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.BiConsumer;
@ -47,6 +46,7 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
public static final String SECURITY_INDEX_NAME = ".security";
public static final String SECURITY_TEMPLATE_NAME = "security-index-template";
public static final String NEW_SECURITY_INDEX_NAME = SECURITY_INDEX_NAME + "-" + IndexLifecycleManager.NEW_INDEX_VERSION;
private static final Version MIN_READ_VERSION = Version.V_5_0_0;
@ -58,21 +58,12 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
public SecurityLifecycleService(Settings settings, ClusterService clusterService,
ThreadPool threadPool, InternalClient client,
XPackLicenseState licenseState,
@Nullable IndexAuditTrail indexAuditTrail) {
this(settings, clusterService, threadPool, client,
new NativeRealmMigrator(settings, licenseState, client), indexAuditTrail);
}
// package private for testing
SecurityLifecycleService(Settings settings, ClusterService clusterService, ThreadPool threadPool, InternalClient client,
NativeRealmMigrator migrator, @Nullable IndexAuditTrail indexAuditTrail) {
super(settings);
this.settings = settings;
this.threadPool = threadPool;
this.indexAuditTrail = indexAuditTrail;
this.securityIndex = new IndexLifecycleManager(settings, client, clusterService, threadPool, SECURITY_INDEX_NAME,
SECURITY_TEMPLATE_NAME, migrator);
this.securityIndex = new IndexLifecycleManager(settings, client, SECURITY_INDEX_NAME, SECURITY_TEMPLATE_NAME);
clusterService.addListener(this);
clusterService.addLifecycleListener(new LifecycleListener() {
@Override
@ -188,7 +179,7 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
}
public static List<String> indexNames() {
return Collections.singletonList(SECURITY_INDEX_NAME);
return Collections.unmodifiableList(Arrays.asList(SECURITY_INDEX_NAME, NEW_SECURITY_INDEX_NAME));
}
/**
@ -200,7 +191,8 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
securityIndex.createIndexIfNeededThenExecute(listener, andThen);
} else {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
}
}

View File

@ -1,65 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.authc;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.xpack.security.authc.support.Hasher;
/**
* Parses and stores environment settings relevant to running Elasticsearch in a container.
*/
public final class ContainerSettings {
public static final String BOOTSTRAP_PASSWORD_ENV_VAR = "BOOTSTRAP_PWD";
public static final String CONTAINER_ENV_VAR = "ELASTIC_CONTAINER";
private final boolean inContainer;
private final char[] passwordHash;
public ContainerSettings(boolean inContainer, char[] passwordHash) {
this.inContainer = inContainer;
this.passwordHash = passwordHash;
}
/**
* Returns a boolean indicating if Elasticsearch is deployed in a container (such as Docker). The way
* we determine if Elasticsearch is deployed in a container is by reading the ELASTIC_CONTAINER env
* variable. This should be set to true if Elasticsearch is in a container.
*
* @return if elasticsearch is running in a container
*/
public boolean inContainer() {
return inContainer;
}
/**
* Returns the hash for the bootstrap password. This is the password passed as an environmental variable
* for use when elasticsearch is deployed in a container.
*
* @return the password hash
*/
public char[] getPasswordHash() {
return passwordHash;
}
public static ContainerSettings parseAndCreate() {
String inContainerString = System.getenv(CONTAINER_ENV_VAR);
boolean inContainer = inContainerString != null && Booleans.parseBoolean(inContainerString);
char[] passwordHash;
String passwordString = System.getenv(BOOTSTRAP_PASSWORD_ENV_VAR);
if (passwordString != null) {
SecureString password = new SecureString(passwordString.toCharArray());
passwordHash = Hasher.BCRYPT.hash(password);
password.close();
} else {
passwordHash = null;
}
return new ContainerSettings(inContainer, passwordHash);
}
}

View File

@ -256,7 +256,8 @@ public final class TokenService extends AbstractComponent {
ensureEnabled();
if (lifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
} else if (lifecycleService.isSecurityIndexWriteable() == false) {
listener.onFailure(new IllegalStateException("cannot write to the tokens index"));
@ -322,7 +323,8 @@ public final class TokenService extends AbstractComponent {
if (lifecycleService.isSecurityIndexAvailable()) {
if (lifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
internalClient.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, getDocumentId(userToken))

View File

@ -1,223 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.authc.esnative;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.GroupedActionListener;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import org.elasticsearch.xpack.security.authc.support.Hasher;
import org.elasticsearch.xpack.security.client.SecurityClient;
import org.elasticsearch.xpack.security.support.IndexLifecycleManager;
import org.elasticsearch.xpack.security.user.BeatsSystemUser;
import org.elasticsearch.xpack.security.user.BuiltinUserInfo;
import org.elasticsearch.xpack.security.user.LogstashSystemUser;
import org.elasticsearch.xpack.security.user.User;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.BiConsumer;
import static java.util.Collections.emptyList;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME;
import static org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.INDEX_TYPE;
import static org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.RESERVED_USER_TYPE;
/**
* Performs migration steps for the {@link NativeRealm} and {@link ReservedRealm}.
* When upgrading an Elasticsearch/X-Pack installation from a previous version, this class is responsible for ensuring that user/role
* data stored in the security index is converted to a format that is appropriate for the newly installed version.
*/
public class NativeRealmMigrator implements IndexLifecycleManager.IndexDataMigrator {
private final XPackLicenseState licenseState;
private final Logger logger;
private InternalClient client;
private final BuiltinUserInfo[] builtinUsers = new BuiltinUserInfo[] {
LogstashSystemUser.USER_INFO,
BeatsSystemUser.USER_INFO
};
public NativeRealmMigrator(Settings settings, XPackLicenseState licenseState, InternalClient internalClient) {
this.licenseState = licenseState;
this.logger = Loggers.getLogger(getClass(), settings);
this.client = internalClient;
}
/**
* Special care must be taken because this upgrade happens <strong>before</strong> the security-mapping is updated.
* We do it in that order because the version of the security-mapping controls the behaviour of the
* reserved and native realm
*
* @param listener A listener for the results of the upgrade. Calls {@link ActionListener#onFailure(Exception)} if a problem occurs,
* {@link ActionListener#onResponse(Object) onResponse(true)} if an upgrade is performed, or
* {@link ActionListener#onResponse(Object) onResponse(false)} if no upgrade was required.
* @see SecurityLifecycleService#securityIndexMappingAndTemplateSufficientToRead(ClusterState, Logger)
* @see SecurityLifecycleService#isSecurityIndexWriteable
* @see IndexLifecycleManager#mappingVersion
*/
@Override
public void performUpgrade(@Nullable Version previousVersion, ActionListener<Boolean> listener) {
try {
List<BiConsumer<Version, ActionListener<Void>>> tasks = collectUpgradeTasks(previousVersion);
if (tasks.isEmpty()) {
listener.onResponse(false);
} else {
final GroupedActionListener<Void> countDownListener = new GroupedActionListener<>(
ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure), tasks.size(), emptyList()
);
logger.info("Performing {} security migration task(s) from version {}", tasks.size(), previousVersion);
tasks.forEach(t -> t.accept(previousVersion, countDownListener));
}
} catch (Exception e) {
listener.onFailure(e);
}
}
private List<BiConsumer<Version, ActionListener<Void>>> collectUpgradeTasks(@Nullable Version previousVersion) {
List<BiConsumer<Version, ActionListener<Void>>> tasks = new ArrayList<>();
for (BuiltinUserInfo info : builtinUsers) {
if (isNewUser(previousVersion, info)) {
tasks.add((v,l) -> createUserAsDisabled(info, v, l));
}
}
if (shouldConvertDefaultPasswords(previousVersion)) {
tasks.add(this::doConvertDefaultPasswords);
}
return tasks;
}
/**
* If we're upgrading from a security version where the new user did not exist, then we mark the user as disabled.
* Otherwise the user will exist with a default password, which is desirable for an <em>out-of-the-box</em> experience in fresh
* installs but problematic for already-locked-down upgrades.
*/
private boolean isNewUser(@Nullable Version previousVersion, BuiltinUserInfo info) {
return previousVersion != null
&& previousVersion.before(info.getDefinedSince())
&& previousVersion.onOrAfter(Version.V_5_0_0);
}
private void createUserAsDisabled(BuiltinUserInfo info, @Nullable Version previousVersion, ActionListener<Void> listener) {
logger.info("Upgrading security from version [{}] - new reserved user [{}] will default to disabled",
previousVersion, info.getName());
// Only clear the cache is authentication is allowed by the current license
// otherwise the license management checks will prevent it from completing successfully.
final boolean clearCache = licenseState.isAuthAllowed();
final String userName = info.getName();
client.prepareGet(SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(userName)).execute(
ActionListener.wrap(getResponse -> {
if (getResponse.isExists()) {
// the document exists - we shouldn't do anything
listener.onResponse(null);
} else {
client.prepareIndex(SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(userName))
.setSource(Requests.INDEX_CONTENT_TYPE, User.Fields.ENABLED.getPreferredName(), false,
User.Fields.PASSWORD.getPreferredName(), "",
User.Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE)
.setCreate(true)
.execute(ActionListener.wrap(r -> {
if (clearCache) {
new SecurityClient(client).prepareClearRealmCache()
.usernames(userName)
.execute(ActionListener.wrap(re -> listener.onResponse(null), listener::onFailure));
} else {
listener.onResponse(null);
}
}, listener::onFailure));
}
}, listener::onFailure));
}
/**
* Old versions of X-Pack security would assign the default password content to a user if it was enabled/disabled before the
* password was explicitly set to another value. If upgrading from one of those versions, then we want to change those users to be
* flagged as having a "default password" (which is stored as blank) so that {@link ReservedRealm#ACCEPT_DEFAULT_PASSWORD_SETTING}
* does the right thing.
*/
private boolean shouldConvertDefaultPasswords(@Nullable Version previousVersion) {
return previousVersion != null
&& previousVersion.before(Version.V_6_0_0_alpha1)
&& previousVersion.onOrAfter(Version.V_5_0_0);
}
@SuppressWarnings("unused")
private void doConvertDefaultPasswords(@Nullable Version previousVersion, ActionListener<Void> listener) {
client.prepareSearch(SECURITY_INDEX_NAME)
.setQuery(QueryBuilders.termQuery(User.Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE))
.setFetchSource(true)
.execute(ActionListener.wrap(searchResponse -> {
assert searchResponse.getHits().getTotalHits() <= 10 :
"there are more than 10 reserved users we need to change this to retrieve them all!";
Set<String> toConvert = new HashSet<>();
for (SearchHit searchHit : searchResponse.getHits()) {
Map<String, Object> sourceMap = searchHit.getSourceAsMap();
if (hasOldStyleDefaultPassword(sourceMap)) {
toConvert.add(searchHit.getId());
}
}
if (toConvert.isEmpty()) {
listener.onResponse(null);
} else {
GroupedActionListener<UpdateResponse> countDownListener = new GroupedActionListener<>(
ActionListener.wrap((r) -> listener.onResponse(null), listener::onFailure), toConvert.size(), emptyList()
);
toConvert.forEach(username -> {
logger.debug("Upgrading security from version [{}] - marking reserved user [{}] as having default password",
previousVersion, username);
client.prepareUpdate(SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(username))
.setDoc(User.Fields.PASSWORD.getPreferredName(), "",
User.Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE)
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.execute(countDownListener);
});
}
}, listener::onFailure));
}
/**
* Determines whether the supplied source as a {@link Map} has its password explicitly set to be the default password
*/
private boolean hasOldStyleDefaultPassword(Map<String, Object> userSource) {
// TODO we should store the hash as something other than a string... bytes?
final String passwordHash = (String) userSource.get(User.Fields.PASSWORD.getPreferredName());
if (passwordHash == null) {
throw new IllegalStateException("passwordHash should never be null");
} else if (passwordHash.isEmpty()) {
// we know empty is the new style
return false;
}
try (SecureString secureString = new SecureString(passwordHash.toCharArray())) {
return Hasher.BCRYPT.verify(ReservedRealm.EMPTY_PASSWORD_TEXT, secureString.getChars());
}
}
/**
* Gets the document's id field for the given user name.
*/
private static String getIdForUser(final String userName) {
return RESERVED_USER_TYPE + "-" + userName;
}
}

View File

@ -43,10 +43,8 @@ import org.elasticsearch.xpack.security.action.user.ChangePasswordRequest;
import org.elasticsearch.xpack.security.action.user.DeleteUserRequest;
import org.elasticsearch.xpack.security.action.user.PutUserRequest;
import org.elasticsearch.xpack.security.authc.AuthenticationResult;
import org.elasticsearch.xpack.security.authc.ContainerSettings;
import org.elasticsearch.xpack.security.authc.support.Hasher;
import org.elasticsearch.xpack.security.client.SecurityClient;
import org.elasticsearch.xpack.security.user.ElasticUser;
import org.elasticsearch.xpack.security.user.SystemUser;
import org.elasticsearch.xpack.security.user.User;
import org.elasticsearch.xpack.security.user.User.Fields;
@ -69,24 +67,21 @@ import java.util.function.Consumer;
*/
public class NativeUsersStore extends AbstractComponent {
static final String INDEX_TYPE = "doc";
private static final String USER_DOC_TYPE = "user";
static final String RESERVED_USER_TYPE = "reserved-user";
public static final String INDEX_TYPE = "doc";
static final String USER_DOC_TYPE = "user";
public static final String RESERVED_USER_TYPE = "reserved-user";
private final Hasher hasher = Hasher.BCRYPT;
private final InternalClient client;
private final boolean isTribeNode;
private volatile SecurityLifecycleService securityLifecycleService;
private final ContainerSettings containerSettings;
public NativeUsersStore(Settings settings, InternalClient client, SecurityLifecycleService securityLifecycleService,
ContainerSettings containerSettings) {
public NativeUsersStore(Settings settings, InternalClient client, SecurityLifecycleService securityLifecycleService) {
super(settings);
this.client = client;
this.isTribeNode = XPackPlugin.isTribeNode(settings);
this.securityLifecycleService = securityLifecycleService;
this.containerSettings = containerSettings;
}
/**
@ -119,7 +114,8 @@ public class NativeUsersStore extends AbstractComponent {
} else {
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational " +
"until the upgrade API is run on the security index"));
return;
}
try {
@ -155,7 +151,8 @@ public class NativeUsersStore extends AbstractComponent {
private void getUserAndPassword(final String user, final ActionListener<UserAndPassword> listener) {
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
try {
@ -202,7 +199,8 @@ public class NativeUsersStore extends AbstractComponent {
return;
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
listener.onFailure(new IllegalStateException("password cannot be changed as user service cannot write until template and " +
@ -254,7 +252,8 @@ public class NativeUsersStore extends AbstractComponent {
private void createReservedUser(String username, char[] passwordHash, RefreshPolicy refresh, ActionListener<Void> listener) {
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
securityLifecycleService.createIndexIfNeededThenExecute(listener, () ->
@ -287,7 +286,8 @@ public class NativeUsersStore extends AbstractComponent {
return;
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
listener.onFailure(new IllegalStateException("user cannot be created or changed as the user service cannot write until " +
@ -389,7 +389,8 @@ public class NativeUsersStore extends AbstractComponent {
return;
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
listener.onFailure(new IllegalStateException("enabled status cannot be changed as user service cannot write until template " +
@ -476,7 +477,8 @@ public class NativeUsersStore extends AbstractComponent {
return;
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
listener.onFailure(new IllegalStateException("user cannot be deleted as user service cannot write until template and " +
@ -516,7 +518,7 @@ public class NativeUsersStore extends AbstractComponent {
void verifyPassword(String username, final SecureString password, ActionListener<AuthenticationResult> listener) {
getUserAndPassword(username, ActionListener.wrap((userAndPassword) -> {
if (userAndPassword == null || userAndPassword.passwordHash() == null) {
listener.onResponse(null);
listener.onResponse(AuthenticationResult.notHandled());
} else if (hasher.verify(password, userAndPassword.passwordHash())) {
listener.onResponse(AuthenticationResult.success(userAndPassword.user()));
} else {
@ -531,7 +533,8 @@ public class NativeUsersStore extends AbstractComponent {
return;
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm not be operational until " +
"the upgrade API is run on the security index"));
return;
}
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(RESERVED_USER_TYPE, username))
@ -546,8 +549,6 @@ public class NativeUsersStore extends AbstractComponent {
listener.onFailure(new IllegalStateException("password hash must not be null!"));
} else if (enabled == null) {
listener.onFailure(new IllegalStateException("enabled must not be null!"));
} else if (password.isEmpty() && containerSettings.inContainer() && username.equals(ElasticUser.NAME)) {
listener.onResponse(new ReservedUserInfo(containerSettings.getPasswordHash(), enabled, false));
} else if (password.isEmpty()) {
listener.onResponse(new ReservedUserInfo(ReservedRealm.EMPTY_PASSWORD_HASH, enabled, true));
} else {
@ -577,7 +578,8 @@ public class NativeUsersStore extends AbstractComponent {
void getAllReservedUserInfo(ActionListener<Map<String, ReservedUserInfo>> listener) {
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
@ -603,12 +605,6 @@ public class NativeUsersStore extends AbstractComponent {
} else if (enabled == null) {
listener.onFailure(new IllegalStateException("enabled must not be null!"));
return;
} else if (password.isEmpty() && containerSettings.inContainer() &&
ElasticUser.NAME.equals(searchHit.getId())) {
char[] passwordHash = containerSettings.getPasswordHash();
userInfos.put(searchHit.getId(), new ReservedUserInfo(passwordHash, enabled, false));
} else if (password.isEmpty()) {
userInfos.put(username, new ReservedUserInfo(ReservedRealm.EMPTY_PASSWORD_HASH, enabled, true));
} else {
userInfos.put(username, new ReservedUserInfo(password.toCharArray(), enabled, false));
}

View File

@ -27,7 +27,6 @@ import org.elasticsearch.xpack.security.authc.support.Hasher;
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
import org.elasticsearch.xpack.security.support.Exceptions;
import org.elasticsearch.xpack.security.user.AnonymousUser;
import org.elasticsearch.xpack.security.user.BeatsSystemUser;
import org.elasticsearch.xpack.security.user.ElasticUser;
import org.elasticsearch.xpack.security.user.KibanaUser;
import org.elasticsearch.xpack.security.user.LogstashSystemUser;
@ -143,7 +142,6 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
case ElasticUser.NAME:
case KibanaUser.NAME:
case LogstashSystemUser.NAME:
case BeatsSystemUser.NAME:
return XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings);
default:
return AnonymousUser.isAnonymousUsername(username, settings);
@ -184,8 +182,6 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
return new KibanaUser(userInfo.enabled);
case LogstashSystemUser.NAME:
return new LogstashSystemUser(userInfo.enabled);
case BeatsSystemUser.NAME:
return new BeatsSystemUser(userInfo.enabled);
default:
if (anonymousEnabled && anonymousUser.principal().equals(username)) {
return anonymousUser;
@ -211,9 +207,6 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
userInfo = reservedUserInfos.get(LogstashSystemUser.NAME);
users.add(new LogstashSystemUser(userInfo == null || userInfo.enabled));
userInfo = reservedUserInfos.get(BeatsSystemUser.NAME);
users.add(new BeatsSystemUser(userInfo == null || userInfo.enabled));
if (anonymousEnabled) {
users.add(anonymousUser);
}
@ -256,8 +249,6 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
switch (username) {
case LogstashSystemUser.NAME:
return LogstashSystemUser.DEFINED_SINCE;
case BeatsSystemUser.NAME:
return BeatsSystemUser.DEFINED_SINCE;
default:
return Version.V_5_0_0;
}

View File

@ -20,7 +20,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.env.Environment;
import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
import org.elasticsearch.xpack.security.user.BeatsSystemUser;
import org.elasticsearch.xpack.security.user.ElasticUser;
import org.elasticsearch.xpack.security.user.KibanaUser;
import org.elasticsearch.xpack.security.user.LogstashSystemUser;
@ -40,7 +39,7 @@ public class SetupPasswordTool extends MultiCommand {
private static final char[] CHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" +
"~!@#$%^&*-_=+?").toCharArray();
private static final String[] USERS = new String[]{ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME};
private static final String[] USERS = new String[]{ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME};
private final Function<Environment, CommandLineHttpClient> clientFunction;
private final CheckedFunction<Environment, KeyStoreWrapper, Exception> keyStoreFunction;

View File

@ -5,6 +5,15 @@
*/
package org.elasticsearch.xpack.security.authc.ldap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import com.unboundid.ldap.sdk.LDAPException;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
@ -37,13 +46,6 @@ import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingSt
import org.elasticsearch.xpack.security.user.User;
import org.elasticsearch.xpack.ssl.SSLService;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Supplier;
/**
* Authenticates username/password tokens against ldap, locates groups and maps them to roles.
@ -145,9 +147,11 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<AuthenticationResult> listener) {
// we submit to the threadpool because authentication using LDAP will execute blocking I/O for a bind request and we don't want
// network threads stuck waiting for a socket to connect. After the bind, then all interaction with LDAP should be async
final CancellableLdapRunnable cancellableLdapRunnable = new CancellableLdapRunnable(listener,
final CancellableLdapRunnable<AuthenticationResult> cancellableLdapRunnable = new CancellableLdapRunnable<>(listener,
ex -> AuthenticationResult.unsuccessful("Authentication against realm [" + this.toString() + "] failed", ex),
() -> sessionFactory.session(token.principal(), token.credentials(),
contextPreservingListener(new LdapSessionActionListener("authenticate", token.principal(), listener))), logger);
contextPreservingListener(new LdapSessionActionListener("authenticate", token.principal(), listener))), logger
);
threadPool.generic().execute(cancellableLdapRunnable);
threadPool.schedule(executionTimeout, Names.SAME, cancellableLdapRunnable::maybeTimeout);
}
@ -159,7 +163,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
// network threads stuck waiting for a socket to connect. After the bind, then all interaction with LDAP should be async
final ActionListener<AuthenticationResult> sessionListener = ActionListener.wrap(AuthenticationResult::getUser,
userActionListener::onFailure);
final CancellableLdapRunnable cancellableLdapRunnable = new CancellableLdapRunnable(userActionListener,
final CancellableLdapRunnable<User> cancellableLdapRunnable = new CancellableLdapRunnable<>(userActionListener, e -> null,
() -> sessionFactory.unauthenticatedSession(username,
contextPreservingListener(new LdapSessionActionListener("lookup", username, sessionListener))), logger);
threadPool.generic().execute(cancellableLdapRunnable);
@ -193,7 +197,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
private static void buildUser(LdapSession session, String username, ActionListener<AuthenticationResult> listener,
UserRoleMapper roleMapper) {
if (session == null) {
listener.onResponse(null);
listener.onResponse(AuthenticationResult.notHandled());
} else {
boolean loadingGroups = false;
try {
@ -250,7 +254,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
@Override
public void onResponse(LdapSession session) {
if (session == null) {
resultListener.onResponse(null);
resultListener.onResponse(AuthenticationResult.notHandled());
} else {
ldapSessionAtomicReference.set(session);
buildUser(session, username, resultListener, roleMapper);
@ -275,15 +279,17 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
* be queued and not executed for a long time or ever and this causes user requests to appear
* to hang. In these cases at least we can provide a response.
*/
static class CancellableLdapRunnable extends AbstractRunnable {
static class CancellableLdapRunnable<T> extends AbstractRunnable {
private final Runnable in;
private final ActionListener<?> listener;
private final ActionListener<T> listener;
private final Function<Exception, T> defaultValue;
private final Logger logger;
private final AtomicReference<LdapRunnableState> state = new AtomicReference<>(LdapRunnableState.AWAITING_EXECUTION);
CancellableLdapRunnable(ActionListener<?> listener, Runnable in, Logger logger) {
CancellableLdapRunnable(ActionListener<T> listener, Function<Exception, T> defaultValue, Runnable in, Logger logger) {
this.listener = listener;
this.defaultValue = Objects.requireNonNull(defaultValue);
this.in = in;
this.logger = logger;
}
@ -291,9 +297,8 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
@Override
public void onFailure(Exception e) {
logger.error("execution of ldap runnable failed", e);
// this is really a exceptional state but just call the listener and maybe another realm can authenticate, otherwise
// something as simple as a down ldap server/network error takes down auth
listener.onResponse(null);
final T result = defaultValue.apply(e);
listener.onResponse(result);
}
@Override

View File

@ -224,10 +224,10 @@ public final class LdapUtils {
boolean searching = false;
LDAPConnection ldapConnection = null;
try {
ldapConnection = ldap.getConnection();
ldapConnection = privilegedConnect(ldap::getConnection);
final LDAPConnection finalConnection = ldapConnection;
LdapSearchResultListener ldapSearchResultListener = new LdapSearchResultListener(
ldapConnection, ignoreReferralErrors,
finalConnection, ignoreReferralErrors,
ActionListener.wrap(
searchResult -> {
IOUtils.closeWhileHandlingException(
@ -523,8 +523,8 @@ public final class LdapUtils {
// in order to follow the referral we need to open a new connection and we do so using the
// referral connector on the ldap connection
final LDAPConnection referralConn = ldapConnection.getReferralConnector()
.getReferralConnection(referralURL, ldapConnection);
final LDAPConnection referralConn =
privilegedConnect(() -> ldapConnection.getReferralConnector().getReferralConnection(referralURL, ldapConnection));
final LdapSearchResultListener ldapListener = new LdapSearchResultListener(
referralConn, ignoreErrors,
ActionListener.wrap(

View File

@ -98,7 +98,8 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
void loadMappings(ActionListener<List<ExpressionRoleMapping>> listener) {
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
final QueryBuilder query = QueryBuilders.termQuery(DOC_TYPE_FIELD, DOC_TYPE_ROLE_MAPPING);
@ -153,7 +154,8 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
listener.onFailure(new UnsupportedOperationException("role-mappings may not be modified using a tribe node"));
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
listener.onFailure(new IllegalStateException("role-mappings cannot be modified until template and mappings are up to date"));
} else {
@ -198,7 +200,8 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
private void innerDeleteMapping(DeleteRoleMappingRequest request, ActionListener<Boolean> listener) throws IOException {
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
client.prepareDelete(SECURITY_INDEX_NAME, SECURITY_GENERIC_TYPE, getIdForName(request.getName()))

View File

@ -50,6 +50,7 @@ import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.index.query.TermsQueryBuilder;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
@ -137,7 +138,7 @@ public class SecurityIndexSearcherWrapper extends IndexSearcherWrapper {
.createParser(queryShardContext.getXContentRegistry(), templateResult)) {
QueryBuilder queryBuilder = queryShardContext.parseInnerQueryBuilder(parser);
verifyRoleQuery(queryBuilder);
failIfQueryUsesClient(scriptService, queryBuilder, queryShardContext);
failIfQueryUsesClient(queryBuilder, queryShardContext);
ParsedQuery parsedQuery = queryShardContext.toFilter(queryBuilder);
filter.add(parsedQuery.query(), SHOULD);
}
@ -348,18 +349,13 @@ public class SecurityIndexSearcherWrapper extends IndexSearcherWrapper {
* the DLS query until the get thread pool has been exhausted:
* https://github.com/elastic/x-plugins/issues/3145
*/
static void failIfQueryUsesClient(ScriptService scriptService, QueryBuilder queryBuilder, QueryRewriteContext original)
static void failIfQueryUsesClient(QueryBuilder queryBuilder, QueryRewriteContext original)
throws IOException {
Client client = new FilterClient(original.getClient()) {
@Override
protected <Request extends ActionRequest, Response extends ActionResponse,
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
throw new IllegalStateException("role queries are not allowed to execute additional requests");
}
};
QueryRewriteContext copy = new QueryRewriteContext(original.getIndexSettings(), original.getMapperService(), scriptService,
original.getXContentRegistry(), client, original.getIndexReader(), original::nowInMillis);
queryBuilder.rewrite(copy);
QueryRewriteContext copy = new QueryRewriteContext(
original.getXContentRegistry(), null, original::nowInMillis);
Rewriteable.rewrite(queryBuilder, copy);
if (copy.hasAsyncActions()) {
throw new IllegalStateException("role queries are not allowed to execute additional requests");
}
}
}

View File

@ -106,7 +106,8 @@ public class NativeRolesStore extends AbstractComponent {
listener::onFailure));
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
} else {
try {
@ -139,7 +140,8 @@ public class NativeRolesStore extends AbstractComponent {
return;
} else if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
} else if (securityLifecycleService.isSecurityIndexWriteable() == false) {
listener.onFailure(new IllegalStateException("role cannot be deleted as service cannot write until template and " +
@ -191,7 +193,8 @@ public class NativeRolesStore extends AbstractComponent {
void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener<Boolean> listener) {
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
try {
@ -236,7 +239,8 @@ public class NativeRolesStore extends AbstractComponent {
} else {
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
client.prepareMultiSearch()
@ -321,7 +325,8 @@ public class NativeRolesStore extends AbstractComponent {
private void executeGetRoleRequest(String role, ActionListener<GetResponse> listener) {
if (securityLifecycleService.isSecurityIndexOutOfDate()) {
listener.onFailure(new IllegalStateException(
"Security index is not on the current version - please upgrade with the upgrade api"));
"Security index is not on the current version - the native realm will not be operational until " +
"the upgrade API is run on the security index"));
return;
}
try {

View File

@ -44,7 +44,7 @@ public class ReservedRolesStore {
RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*").privileges("manage", "read", "index", "delete")
.build() }, null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put("monitoring_user", new RoleDescriptor("monitoring_user", null, new RoleDescriptor.IndicesPrivileges[] {
RoleDescriptor.IndicesPrivileges.builder().indices(".marvel-es-*", ".monitoring-*").privileges("read").build() },
RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-*").privileges("read").build() },
null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put("remote_monitoring_agent", new RoleDescriptor("remote_monitoring_agent",
new String[] {
@ -54,7 +54,7 @@ public class ReservedRolesStore {
"cluster:admin/xpack/watcher/watch/delete",
},
new RoleDescriptor.IndicesPrivileges[] {
RoleDescriptor.IndicesPrivileges.builder().indices(".marvel-es-*", ".monitoring-*").privileges("all").build() },
RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-*").privileges("all").build() },
null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put("ingest_admin", new RoleDescriptor("ingest_admin", new String[] { "manage_index_templates", "manage_pipeline" },
null, null, MetadataUtils.DEFAULT_RESERVED_METADATA))
@ -69,8 +69,6 @@ public class ReservedRolesStore {
null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put("logstash_system", new RoleDescriptor("logstash_system", new String[] { "monitor", MonitoringBulkAction.NAME},
null, null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put("beats_system", new RoleDescriptor("beats_system", new String[] { "monitor", MonitoringBulkAction.NAME},
null, null, MetadataUtils.DEFAULT_RESERVED_METADATA))
.put("machine_learning_user", new RoleDescriptor("machine_learning_user", new String[] { "monitor_ml" },
new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".ml-anomalies*",
".ml-notifications").privileges("view_index_metadata", "read").build() },

View File

@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.GatewayService;
@ -38,11 +39,11 @@ public final class BootstrapElasticPassword {
private final SecurityLifecycleService lifecycleService;
private final boolean reservedRealmDisabled;
public BootstrapElasticPassword(Settings settings, Logger logger, ClusterService clusterService, ReservedRealm reservedRealm,
public BootstrapElasticPassword(Settings settings, ClusterService clusterService, ReservedRealm reservedRealm,
SecurityLifecycleService lifecycleService) {
this.reservedRealmDisabled = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings) == false;
this.settings = settings;
this.logger = logger;
this.logger = Loggers.getLogger(BootstrapElasticPassword.class, settings);
this.clusterService = clusterService;
this.reservedRealm = reservedRealm;
this.lifecycleService = lifecycleService;
@ -98,7 +99,9 @@ public final class BootstrapElasticPassword {
@Override
public void onResponse(Boolean passwordSet) {
cleanup();
if (passwordSet == false) {
if (passwordSet) {
logger.info("elastic password was bootstrapped successfully");
} else {
logger.warn("elastic password was not bootstrapped because its password was already set");
}
semaphore.release();

View File

@ -1,41 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.bootstrap;
import org.elasticsearch.bootstrap.BootstrapCheck;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.xpack.security.authc.ContainerSettings;
/**
* A bootstrap check validating container environment variables. The bootstrap password option
* cannot be present if the container environment variable is not set to true.
*/
public final class ContainerPasswordBootstrapCheck implements BootstrapCheck {
private final ContainerSettings containerSettings;
public ContainerPasswordBootstrapCheck() {
this(ContainerSettings.parseAndCreate());
}
public ContainerPasswordBootstrapCheck(ContainerSettings containerSettings) {
this.containerSettings = containerSettings;
}
@Override
public boolean check() {
if (containerSettings.getPasswordHash() != null && containerSettings.inContainer() == false) {
return true;
}
return false;
}
@Override
public String errorMessage() {
return "Cannot use bootstrap password env variable [" + ContainerSettings.BOOTSTRAP_PASSWORD_ENV_VAR + "] if " +
"Elasticsearch is not being deployed in a container.";
}
}

View File

@ -5,15 +5,11 @@
*/
package org.elasticsearch.xpack.security.support;
import java.nio.charset.StandardCharsets;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.function.Predicate;
import java.util.regex.Pattern;
@ -21,18 +17,14 @@ import java.util.stream.Collectors;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterIndexHealth;
@ -41,22 +33,15 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.template.TemplateUtils;
import org.elasticsearch.xpack.upgrade.IndexUpgradeCheck;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING;
import static org.elasticsearch.common.xcontent.XContentHelper.convertToMap;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME;
/**
* Manages the lifecycle of a single index, its template, mapping and and data upgrades/migrations.
@ -68,21 +53,11 @@ public class IndexLifecycleManager extends AbstractComponent {
private static final String SECURITY_VERSION_STRING = "security-version";
public static final String TEMPLATE_VERSION_PATTERN =
Pattern.quote("${security.template.version}");
private static final int MAX_MIGRATE_ATTEMPTS = 10;
public static int NEW_INDEX_VERSION = IndexUpgradeCheck.UPRADE_VERSION;
private final String indexName;
private final String templateName;
private final InternalClient client;
private final IndexDataMigrator migrator;
private final ClusterService clusterService;
private final ThreadPool threadPool;
private final AtomicBoolean templateCreationPending = new AtomicBoolean(false);
private final AtomicBoolean updateMappingPending = new AtomicBoolean(false);
private final AtomicReference<UpgradeState> migrateDataState = new AtomicReference<>(UpgradeState.NOT_STARTED);
private final AtomicInteger migrateDataAttempts = new AtomicInteger(0);
private final List<BiConsumer<ClusterIndexHealth, ClusterIndexHealth>> indexHealthChangeListeners = new CopyOnWriteArrayList<>();
@ -98,47 +73,17 @@ public class IndexLifecycleManager extends AbstractComponent {
NOT_STARTED, IN_PROGRESS, COMPLETE, FAILED
}
public interface IndexDataMigrator {
void performUpgrade(@Nullable Version previousVersion, ActionListener<Boolean> listener);
}
public static final IndexDataMigrator NULL_MIGRATOR = (version, listener) -> listener.onResponse(false);
public IndexLifecycleManager(Settings settings, InternalClient client, ClusterService clusterService, ThreadPool threadPool,
String indexName, String templateName, IndexDataMigrator migrator) {
public IndexLifecycleManager(Settings settings, InternalClient client, String indexName, String templateName) {
super(settings);
this.client = client;
this.indexName = indexName;
this.templateName = templateName;
this.migrator = migrator;
this.clusterService = clusterService;
this.threadPool = threadPool;
}
public boolean isTemplateUpToDate() {
return templateIsUpToDate;
}
public boolean isTemplateCreationPending() {
return templateCreationPending.get();
}
public boolean isMappingUpToDate() {
return mappingIsUpToDate;
}
public Version getMappingVersion() {
return mappingVersion;
}
public boolean checkMappingVersion(Predicate<Version> requiredVersion) {
return this.mappingVersion == null || requiredVersion.test(this.mappingVersion);
}
public boolean isMappingUpdatePending() {
return this.updateMappingPending.get();
}
public boolean indexExists() {
return indexExists;
}
@ -155,10 +100,6 @@ public class IndexLifecycleManager extends AbstractComponent {
return canWriteToIndex;
}
public UpgradeState getMigrationState() {
return this.migrateDataState.get();
}
/**
* Adds a listener which will be notified when the security index health changes. The previous and
* current health will be provided to the listener so that the listener can determine if any action
@ -185,15 +126,6 @@ public class IndexLifecycleManager extends AbstractComponent {
this.mappingIsUpToDate = checkIndexMappingUpToDate(state);
this.canWriteToIndex = templateIsUpToDate && mappingIsUpToDate;
this.mappingVersion = oldestIndexMappingVersion(state);
if (state.nodes().isLocalNodeElectedMaster()) {
if (templateIsUpToDate == false) {
updateTemplate();
}
if (indexAvailable && mappingIsUpToDate == false) {
migrateData(state, this::updateMapping);
}
}
}
private void checkIndexHealthChange(ClusterChangedEvent event) {
@ -328,179 +260,6 @@ public class IndexLifecycleManager extends AbstractComponent {
}
}
private void updateTemplate() {
// only put the template if this is not already in progress
if (templateCreationPending.compareAndSet(false, true)) {
putTemplate();
}
}
private boolean migrateData(ClusterState state, Runnable andThen) {
// only update the data if this is not already in progress
if (migrateDataState.compareAndSet(UpgradeState.NOT_STARTED, UpgradeState.IN_PROGRESS)) {
final Version previousVersion = oldestIndexMappingVersion(state);
migrator.performUpgrade(previousVersion, new ActionListener<Boolean>() {
@Override
public void onResponse(Boolean upgraded) {
migrateDataState.set(UpgradeState.COMPLETE);
andThen.run();
}
@Override
public void onFailure(Exception e) {
migrateDataState.set(UpgradeState.FAILED);
final int attempts = migrateDataAttempts.incrementAndGet();
logger.error(new ParameterizedMessage(
"failed to upgrade security [{}] data from version [{}] (Attempt {} of {})",
indexName, previousVersion, attempts, MAX_MIGRATE_ATTEMPTS),
e);
if (attempts < MAX_MIGRATE_ATTEMPTS) {
// The first retry is (1^5)ms = 1ms
// The last retry is (9^5)ms = 59s
final TimeValue retry = TimeValue.timeValueMillis((long) Math.pow(attempts, 5));
logger.info("Will attempt upgrade again in {}", retry);
threadPool.schedule(retry, ThreadPool.Names.SAME, IndexLifecycleManager.this::retryDataMigration);
} else {
logger.error("Security migration has failed after {} attempts. Restart the master node to try again.",
MAX_MIGRATE_ATTEMPTS);
}
}
@Override
public String toString() {
return getClass() + "{" + indexName + " migrator}";
}
});
return true;
} else {
if (migrateDataState.get() == UpgradeState.COMPLETE) {
andThen.run();
}
return false;
}
}
private void retryDataMigration() {
if (migrateDataState.compareAndSet(UpgradeState.FAILED, UpgradeState.NOT_STARTED)) {
processClusterState(clusterService.state());
}
}
private void updateMapping() {
// only update the mapping if this is not already in progress
if (updateMappingPending.compareAndSet(false, true)) {
putMappings();
}
}
private void putMappings() {
String template = TemplateUtils.loadTemplate("/" + templateName + ".json",
Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN);
Map<String, Object> typeMappingMap;
try {
typeMappingMap = convertToMap(JsonXContent.jsonXContent, template, false);
} catch (ElasticsearchParseException e) {
updateMappingPending.set(false);
logger.error(new ParameterizedMessage(
"failed to parse index template {}", templateName), e);
throw new ElasticsearchException("failed to parse index template {}", e, templateName);
}
// here go over all types found in the template and update them
// we need to wait for all types
final Map<String, PutMappingResponse> updateResults =
ConcurrentCollections.newConcurrentMap();
@SuppressWarnings("unchecked")
Map<String, Object> typeMappings = (Map<String, Object>) typeMappingMap.get("mappings");
int expectedResults = typeMappings.size();
for (String type : typeMappings.keySet()) {
// get the mappings from the template definition
@SuppressWarnings("unchecked")
Map<String, Object> typeMapping = (Map<String, Object>) typeMappings.get(type);
// update the mapping
putMapping(updateResults, expectedResults, type, typeMapping);
}
}
private void putMapping(final Map<String, PutMappingResponse> updateResults,
int expectedResults, final String type,
Map<String, Object> typeMapping) {
logger.debug("updating mapping of the [{}] index for type [{}]", indexName, type);
PutMappingRequest putMappingRequest = client.admin().indices()
.preparePutMapping(indexName).setSource(typeMapping).setType(type).request();
client.admin().indices().putMapping(putMappingRequest,
new ActionListener<PutMappingResponse>() {
@Override
public void onResponse(PutMappingResponse putMappingResponse) {
if (putMappingResponse.isAcknowledged() == false) {
updateMappingPending.set(false);
throw new ElasticsearchException("update mapping for type [{}]" +
" in index [{}] was not acknowledged", type, indexName);
} else {
updateResults.put(type, putMappingResponse);
if (updateResults.size() == expectedResults) {
updateMappingPending.set(false);
}
}
}
@Override
public void onFailure(Exception e) {
updateMappingPending.set(false);
logger.warn((Supplier<?>) () -> new ParameterizedMessage(
"failed to update mapping for type [{}] on index [{}]",
type, indexName), e);
}
@Override
public String toString() {
return getClass() + "{" + indexName + " PutMapping}";
}
});
}
private void putTemplate() {
logger.debug("putting the template [{}]", templateName);
String template = TemplateUtils.loadTemplate("/" + templateName + ".json",
Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN);
PutIndexTemplateRequest putTemplateRequest = client.admin().indices()
.preparePutTemplate(templateName)
.setSource(
new BytesArray(template.getBytes(StandardCharsets.UTF_8)),
XContentType.JSON)
.request();
client.admin().indices().putTemplate(putTemplateRequest,
new ActionListener<PutIndexTemplateResponse>() {
@Override
public void onResponse(PutIndexTemplateResponse putIndexTemplateResponse) {
templateCreationPending.set(false);
if (putIndexTemplateResponse.isAcknowledged()) {
templateIsUpToDate = true;
} else {
throw new ElasticsearchException(
"put template [{}] was not acknowledged", templateName
);
}
}
@Override
public void onFailure(Exception e) {
templateCreationPending.set(false);
logger.warn(new ParameterizedMessage(
"failed to put template [{}]", templateName), e);
}
@Override
public String toString() {
return getClass() + "{" + indexName + " PutTemplate}";
}
});
}
/**
* Creates the security index, if it does not already exist, then runs the given
* action on the security index.
@ -514,7 +273,7 @@ public class IndexLifecycleManager extends AbstractComponent {
@Override
public void onResponse(CreateIndexResponse createIndexResponse) {
if (createIndexResponse.isAcknowledged()) {
andThen.run();
setSecurityIndexAlias(listener, andThen);
} else {
listener.onFailure(new ElasticsearchException("Failed to create security index"));
}
@ -533,4 +292,31 @@ public class IndexLifecycleManager extends AbstractComponent {
});
}
}
/**
* Sets the security index alias to .security after it has been created. This is required
* because we cannot add the alias as part of the security index template, as the security
* template is also used when the new security index is created during the upgrade API, at
* which point the old .security index already exists and is being reindexed from, so the
* alias cannot be added as part of the template, hence the alias creation has to happen
* manually here after index creation.
*/
private <T> void setSecurityIndexAlias(final ActionListener<T> listener, final Runnable andThen) {
client.admin().indices().prepareAliases().addAlias(INTERNAL_SECURITY_INDEX, SECURITY_INDEX_NAME)
.execute(new ActionListener<IndicesAliasesResponse>() {
@Override
public void onResponse(IndicesAliasesResponse response) {
if (response.isAcknowledged()) {
andThen.run();
} else {
listener.onFailure(new ElasticsearchException("Failed to set security index alias"));
}
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
}

View File

@ -1,20 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.user;
import org.elasticsearch.Version;
import org.elasticsearch.xpack.security.support.MetadataUtils;
public class BeatsSystemUser extends User {
public static final String NAME = "beats_system";
private static final String ROLE_NAME = "beats_system";
public static final Version DEFINED_SINCE = Version.V_6_0_0_alpha1;
public static final BuiltinUserInfo USER_INFO = new BuiltinUserInfo(NAME, ROLE_NAME, DEFINED_SINCE);
public BeatsSystemUser(boolean enabled) {
super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled);
}
}

View File

@ -17,6 +17,7 @@ import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.PosixFileAttributeView;
import java.nio.file.attribute.PosixFilePermission;
import java.security.KeyPair;
import java.security.KeyStore;
import java.security.PrivateKey;
import java.security.cert.Certificate;
import java.security.cert.X509Certificate;
@ -34,6 +35,7 @@ import java.util.regex.Pattern;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import joptsimple.ArgumentAcceptingOptionSpec;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import org.bouncycastle.asn1.DERIA5String;
@ -110,6 +112,7 @@ public class CertificateTool extends EnvironmentAwareCommand {
private final OptionSpec<Integer> keysizeSpec;
private final OptionSpec<String> inputFileSpec;
private final OptionSpec<Integer> daysSpec;
private final ArgumentAcceptingOptionSpec<String> p12Spec;
CertificateTool() {
super(DESCRIPTION);
@ -126,11 +129,17 @@ public class CertificateTool extends EnvironmentAwareCommand {
.withOptionalArg();
caDnSpec = parser.accepts("dn", "distinguished name to use for the generated ca. defaults to " + AUTO_GEN_CA_DN)
.availableUnless(caCertPathSpec)
.availableUnless(csrSpec)
.withRequiredArg();
keysizeSpec = parser.accepts("keysize", "size in bits of RSA keys").withRequiredArg().ofType(Integer.class);
inputFileSpec = parser.accepts("in", "file containing details of the instances in yaml format").withRequiredArg();
daysSpec =
parser.accepts("days", "number of days that the generated certificates are valid").withRequiredArg().ofType(Integer.class);
daysSpec = parser.accepts("days", "number of days that the generated certificates are valid")
.availableUnless(csrSpec)
.withRequiredArg()
.ofType(Integer.class);
p12Spec = parser.accepts("p12", "output a p12 (PKCS#12) version for each certificate/key pair, with optional password")
.availableUnless(csrSpec)
.withOptionalArg();
}
public static void main(String[] args) throws Exception {
@ -152,10 +161,18 @@ public class CertificateTool extends EnvironmentAwareCommand {
final boolean prompt = options.has(caPasswordSpec);
final char[] keyPass = options.hasArgument(caPasswordSpec) ? caPasswordSpec.value(options).toCharArray() : null;
final int days = options.hasArgument(daysSpec) ? daysSpec.value(options) : DEFAULT_DAYS;
final char[] p12Password;
if (options.hasArgument(p12Spec)) {
p12Password = p12Spec.value(options).toCharArray();
} else if (options.has(p12Spec)) {
p12Password = new char[0];
} else {
p12Password = null;
}
CAInfo caInfo = getCAInfo(terminal, dn, caCertPathSpec.value(options), caKeyPathSpec.value(options), keyPass, prompt, env,
keysize, days);
Collection<CertificateInformation> certificateInformations = getCertificateInformationList(terminal, inputFile);
generateAndWriteSignedCertificates(outputFile, certificateInformations, caInfo, keysize, days);
generateAndWriteSignedCertificates(outputFile, certificateInformations, caInfo, keysize, days, p12Password);
}
printConclusion(terminal, csrOnly, outputFile);
}
@ -348,7 +365,7 @@ public class CertificateTool extends EnvironmentAwareCommand {
* @param days the number of days that the certificate should be valid for
*/
static void generateAndWriteSignedCertificates(Path outputFile, Collection<CertificateInformation> certificateInformations,
CAInfo caInfo, int keysize, int days) throws Exception {
CAInfo caInfo, int keysize, int days, char[] pkcs12Password) throws Exception {
fullyWriteFile(outputFile, (outputStream, pemWriter) -> {
// write out the CA info first if it was generated
writeCAInfoIfGenerated(outputStream, pemWriter, caInfo);
@ -366,16 +383,28 @@ public class CertificateTool extends EnvironmentAwareCommand {
outputStream.putNextEntry(zipEntry);
// write cert
outputStream.putNextEntry(new ZipEntry(dirName + certificateInformation.name.filename + ".crt"));
final String entryBase = dirName + certificateInformation.name.filename;
outputStream.putNextEntry(new ZipEntry(entryBase + ".crt"));
pemWriter.writeObject(certificate);
pemWriter.flush();
outputStream.closeEntry();
// write private key
outputStream.putNextEntry(new ZipEntry(dirName + certificateInformation.name.filename + ".key"));
outputStream.putNextEntry(new ZipEntry(entryBase + ".key"));
pemWriter.writeObject(keyPair.getPrivate());
pemWriter.flush();
outputStream.closeEntry();
if (pkcs12Password != null) {
final KeyStore pkcs12 = KeyStore.getInstance("PKCS12");
pkcs12.load(null);
pkcs12.setKeyEntry(certificateInformation.name.originalName, keyPair.getPrivate(), pkcs12Password,
new Certificate[]{certificate});
outputStream.putNextEntry(new ZipEntry(entryBase + ".p12"));
pkcs12.store(outputStream, pkcs12Password);
outputStream.closeEntry();
}
}
});
}
@ -632,6 +661,13 @@ public class CertificateTool extends EnvironmentAwareCommand {
&& ALLOWED_FILENAME_CHAR_PATTERN.matcher(resolvePath(name).toString()).matches()
&& name.startsWith(".") == false;
}
@Override
public String toString() {
return getClass().getSimpleName()
+ "{original=[" + originalName + "] principal=[" + x500Principal
+ "] file=[" + filename + "] err=[" + error + "]}";
}
}
static class CAInfo {

View File

@ -10,6 +10,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@ -17,7 +18,6 @@ import java.util.Objects;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -28,12 +28,12 @@ import org.elasticsearch.env.Environment;
*/
public final class RestrictedTrustConfig extends TrustConfig {
public static final String RESTRICTIONS_KEY_SUBJECT_NAME = "trust.subject_name";
private static final String RESTRICTIONS_KEY_SUBJECT_NAME = "trust.subject_name";
private final Settings settings;
private final String groupConfigPath;
private final TrustConfig delegate;
public RestrictedTrustConfig(Settings settings, String groupConfigPath, TrustConfig delegate) {
RestrictedTrustConfig(Settings settings, String groupConfigPath, TrustConfig delegate) {
this.settings = settings;
this.groupConfigPath = Objects.requireNonNull(groupConfigPath);
this.delegate = Objects.requireNonNull(delegate);
@ -52,7 +52,9 @@ public final class RestrictedTrustConfig extends TrustConfig {
@Override
List<Path> filesToMonitor(@Nullable Environment environment) {
return Collections.singletonList(resolveGroupConfigPath(environment));
List<Path> files = new ArrayList<>(delegate.filesToMonitor(environment));
files.add(resolveGroupConfigPath(environment));
return Collections.unmodifiableList(files);
}
@Override

View File

@ -5,9 +5,7 @@
*/
package org.elasticsearch.xpack.ssl;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;

View File

@ -5,8 +5,8 @@
*/
package org.elasticsearch.xpack.ssl;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.client.http.conn.ssl.NoopHostnameVerifier;
import org.elasticsearch.client.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.apache.lucene.util.SetOnce;
import org.bouncycastle.operator.OperatorCreationException;
import org.elasticsearch.ElasticsearchException;
@ -813,30 +813,36 @@ public class SSLService extends AbstractComponent {
/**
* This is an empty trust manager that is used in case a loaded trust manager is null
*/
private static final class EmptyX509TrustManager extends X509ExtendedTrustManager {
static final class EmptyX509TrustManager extends X509ExtendedTrustManager {
@Override
public void checkClientTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException {
throw new CertificateException("no certificates are trusted");
}
@Override
public void checkServerTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException {
throw new CertificateException("no certificates are trusted");
}
@Override
public void checkClientTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException {
throw new CertificateException("no certificates are trusted");
}
@Override
public void checkServerTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException {
throw new CertificateException("no certificates are trusted");
}
@Override
public void checkClientTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException {
throw new CertificateException("no certificates are trusted");
}
@Override
public void checkServerTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException {
throw new CertificateException("no certificates are trusted");
}
@Override

View File

@ -33,10 +33,10 @@ import java.util.function.Consumer;
* A component that performs the following upgrade procedure:
* <p>
* - Check that all data and master nodes are running running the same version
* - Create a new index .{name}-v6
* - Create a new index .{name}-6
* - Make index .{name} read only
* - Reindex from .{name} to .{name}-v6 with transform
* - Delete index .{name} and add alias .{name} to .{name}-v6
* - Reindex from .{name} to .{name}-6 with transform
* - Delete index .{name} and add alias .{name} to .{name}-6
*/
public class InternalIndexReindexer<T> {
@ -75,7 +75,7 @@ public class InternalIndexReindexer<T> {
private void innerUpgrade(ParentTaskAssigningClient parentAwareClient, String index, ClusterState clusterState,
ActionListener<BulkByScrollResponse> listener) {
String newIndex = index + "_v" + version;
String newIndex = index + "-" + version;
try {
checkMasterAndDataNodeVersion(clusterState);
parentAwareClient.admin().indices().prepareCreate(newIndex).execute(ActionListener.wrap(createIndexResponse ->

View File

@ -12,6 +12,10 @@ import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -19,20 +23,26 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
import org.elasticsearch.xpack.security.authc.support.Hasher;
import org.elasticsearch.xpack.security.user.User;
import org.elasticsearch.xpack.upgrade.actions.IndexUpgradeAction;
import org.elasticsearch.xpack.upgrade.actions.IndexUpgradeInfoAction;
import org.elasticsearch.xpack.upgrade.rest.RestIndexUpgradeAction;
@ -46,10 +56,17 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.BiFunction;
import java.util.function.Supplier;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME;
import static org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.INDEX_TYPE;
import static org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.RESERVED_USER_TYPE;
public class Upgrade implements ActionPlugin {
public static final Version UPGRADE_INTRODUCED = Version.V_5_6_0;
@ -65,6 +82,7 @@ public class Upgrade implements ActionPlugin {
this.settings = settings;
this.upgradeCheckFactories = new ArrayList<>();
upgradeCheckFactories.add(getWatcherUpgradeCheckFactory(settings));
upgradeCheckFactories.add(getSecurityUpgradeCheckFactory(settings));
}
public Collection<Object> createComponents(InternalClient internalClient, ClusterService clusterService, ThreadPool threadPool,
@ -103,6 +121,104 @@ public class Upgrade implements ActionPlugin {
return indexMetaData.getSettings().getAsInt(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 0) == EXPECTED_INDEX_FORMAT_VERSION;
}
static BiFunction<InternalClient, ClusterService, IndexUpgradeCheck> getSecurityUpgradeCheckFactory(Settings settings) {
return (internalClient, clusterService) ->
new IndexUpgradeCheck<Void>("security",
settings,
indexMetaData -> {
if (".security".equals(indexMetaData.getIndex().getName())
|| indexMetaData.getAliases().containsKey(".security")) {
if (checkInternalIndexFormat(indexMetaData)) {
return UpgradeActionRequired.UP_TO_DATE;
} else {
return UpgradeActionRequired.UPGRADE;
}
} else {
return UpgradeActionRequired.NOT_APPLICABLE;
}
},
internalClient,
clusterService,
new String[] { "user", "reserved-user", "role", "doc" },
new Script(ScriptType.INLINE, "painless",
"ctx._source.type = ctx._type;\n" +
"if (!ctx._type.equals(\"doc\")) {\n" +
" ctx._id = ctx._type + \"-\" + ctx._id;\n" +
" ctx._type = \"doc\";" +
"}\n",
new HashMap<>()),
listener -> listener.onResponse(null),
(success, listener) -> postSecurityUpgrade(internalClient, listener));
}
private static void postSecurityUpgrade(Client client, ActionListener<TransportResponse.Empty> listener) {
// update passwords to the new style, if they are in the old default password mechanism
client.prepareSearch(SECURITY_INDEX_NAME)
.setQuery(QueryBuilders.termQuery(User.Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE))
.setFetchSource(true)
.execute(ActionListener.wrap(searchResponse -> {
assert searchResponse.getHits().getTotalHits() <= 10 :
"there are more than 10 reserved users we need to change this to retrieve them all!";
Set<String> toConvert = new HashSet<>();
for (SearchHit searchHit : searchResponse.getHits()) {
Map<String, Object> sourceMap = searchHit.getSourceAsMap();
if (hasOldStyleDefaultPassword(sourceMap)) {
toConvert.add(searchHit.getId());
}
}
if (toConvert.isEmpty()) {
listener.onResponse(TransportResponse.Empty.INSTANCE);
} else {
final BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
for (final String id : toConvert) {
final UpdateRequest updateRequest = new UpdateRequest(SECURITY_INDEX_NAME,
INDEX_TYPE, RESERVED_USER_TYPE + "-" + id);
updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.doc(User.Fields.PASSWORD.getPreferredName(), "",
User.Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE);
bulkRequestBuilder.add(updateRequest);
}
bulkRequestBuilder.execute(new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse bulkItemResponses) {
if (bulkItemResponses.hasFailures()) {
final String msg = "failed to update old style reserved user passwords: " +
bulkItemResponses.buildFailureMessage();
listener.onFailure(new ElasticsearchException(msg));
} else {
listener.onResponse(TransportResponse.Empty.INSTANCE);
}
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
}, listener::onFailure));
}
/**
* Determines whether the supplied source as a {@link Map} has its password explicitly set to be the default password
*/
private static boolean hasOldStyleDefaultPassword(Map<String, Object> userSource) {
// TODO we should store the hash as something other than a string... bytes?
final String passwordHash = (String) userSource.get(User.Fields.PASSWORD.getPreferredName());
if (passwordHash == null) {
throw new IllegalStateException("passwordHash should never be null");
} else if (passwordHash.isEmpty()) {
// we know empty is the new style
return false;
}
try (SecureString secureString = new SecureString(passwordHash.toCharArray())) {
return Hasher.BCRYPT.verify(ReservedRealm.EMPTY_PASSWORD_TEXT, secureString.getChars());
}
}
static BiFunction<InternalClient, ClusterService, IndexUpgradeCheck> getWatcherUpgradeCheckFactory(Settings settings) {
return (internalClient, clusterService) ->
new IndexUpgradeCheck<Boolean>("watcher",

View File

@ -62,6 +62,11 @@ public class IndexUpgradeAction extends Action<IndexUpgradeAction.Request, BulkB
private String index = null;
/**
* Should this task store its result?
*/
private boolean shouldStoreResult;
// for serialization
public Request() {
@ -94,6 +99,18 @@ public class IndexUpgradeAction extends Action<IndexUpgradeAction.Request, BulkB
return UPGRADE_INDEX_OPTIONS;
}
/**
* Should this task store its result after it has finished?
*/
public Request setShouldStoreResult(boolean shouldStoreResult) {
this.shouldStoreResult = shouldStoreResult;
return this;
}
@Override
public boolean getShouldStoreResult() {
return shouldStoreResult;
}
@Override
public ActionRequestValidationException validate() {

View File

@ -6,6 +6,7 @@
package org.elasticsearch.xpack.upgrade.rest;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings;
@ -21,6 +22,8 @@ import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.RestBuilderListener;
import org.elasticsearch.tasks.LoggingTaskListener;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.xpack.upgrade.actions.IndexUpgradeAction;
import org.elasticsearch.xpack.upgrade.actions.IndexUpgradeAction.Request;
@ -54,41 +57,63 @@ public class RestIndexUpgradeAction extends BaseRestHandler {
params.put(BulkByScrollTask.Status.INCLUDE_CREATED, Boolean.toString(true));
params.put(BulkByScrollTask.Status.INCLUDE_UPDATED, Boolean.toString(true));
return channel -> client.execute(IndexUpgradeAction.INSTANCE, upgradeRequest,
new RestBuilderListener<BulkByScrollResponse>(channel) {
if (request.paramAsBoolean("wait_for_completion", true)) {
return channel -> client.execute(IndexUpgradeAction.INSTANCE, upgradeRequest,
new RestBuilderListener<BulkByScrollResponse>(channel) {
@Override
public RestResponse buildResponse(BulkByScrollResponse response, XContentBuilder builder) throws Exception {
builder.startObject();
response.toXContent(builder, new ToXContent.DelegatingMapParams(params, channel.request()));
builder.endObject();
return new BytesRestResponse(getStatus(response), builder);
}
private RestStatus getStatus(BulkByScrollResponse response) {
/*
* Return the highest numbered rest status under the assumption that higher numbered statuses are "more error"
* and thus more interesting to the user.
*/
RestStatus status = RestStatus.OK;
if (response.isTimedOut()) {
status = RestStatus.REQUEST_TIMEOUT;
@Override
public RestResponse buildResponse(BulkByScrollResponse response, XContentBuilder builder) throws Exception {
builder.startObject();
response.toXContent(builder, new ToXContent.DelegatingMapParams(params, channel.request()));
builder.endObject();
return new BytesRestResponse(getStatus(response), builder);
}
for (BulkItemResponse.Failure failure : response.getBulkFailures()) {
if (failure.getStatus().getStatus() > status.getStatus()) {
status = failure.getStatus();
private RestStatus getStatus(BulkByScrollResponse response) {
/*
* Return the highest numbered rest status under the assumption that higher numbered statuses are "more error"
* and thus more interesting to the user.
*/
RestStatus status = RestStatus.OK;
if (response.isTimedOut()) {
status = RestStatus.REQUEST_TIMEOUT;
}
}
for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) {
RestStatus failureStatus = ExceptionsHelper.status(failure.getReason());
if (failureStatus.getStatus() > status.getStatus()) {
status = failureStatus;
for (BulkItemResponse.Failure failure : response.getBulkFailures()) {
if (failure.getStatus().getStatus() > status.getStatus()) {
status = failure.getStatus();
}
}
for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) {
RestStatus failureStatus = ExceptionsHelper.status(failure.getReason());
if (failureStatus.getStatus() > status.getStatus()) {
status = failureStatus;
}
}
return status;
}
return status;
}
});
});
} else {
upgradeRequest.setShouldStoreResult(true);
/*
* Validating before forking to make sure we can catch the issues earlier
*/
ActionRequestValidationException validationException = upgradeRequest.validate();
if (validationException != null) {
throw validationException;
}
Task task = client.executeLocally(IndexUpgradeAction.INSTANCE, upgradeRequest, LoggingTaskListener.instance());
// Send task description id instead of waiting for the message
return channel -> {
try (XContentBuilder builder = channel.newBuilder()) {
builder.startObject();
builder.field("task", client.getLocalNodeId() + ":" + task.getId());
builder.endObject();
channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder));
}
};
}
}
}

View File

@ -136,6 +136,8 @@ import org.elasticsearch.xpack.watcher.transport.actions.put.PutWatchAction;
import org.elasticsearch.xpack.watcher.transport.actions.put.TransportPutWatchAction;
import org.elasticsearch.xpack.watcher.transport.actions.service.TransportWatcherServiceAction;
import org.elasticsearch.xpack.watcher.transport.actions.service.WatcherServiceAction;
import org.elasticsearch.xpack.watcher.transport.actions.stats.OldTransportWatcherStatsAction;
import org.elasticsearch.xpack.watcher.transport.actions.stats.OldWatcherStatsAction;
import org.elasticsearch.xpack.watcher.transport.actions.stats.TransportWatcherStatsAction;
import org.elasticsearch.xpack.watcher.transport.actions.stats.WatcherStatsAction;
import org.elasticsearch.xpack.watcher.trigger.TriggerEngine;
@ -413,6 +415,8 @@ public class Watcher implements ActionPlugin {
new ActionHandler<>(DeleteWatchAction.INSTANCE, TransportDeleteWatchAction.class),
new ActionHandler<>(GetWatchAction.INSTANCE, TransportGetWatchAction.class),
new ActionHandler<>(WatcherStatsAction.INSTANCE, TransportWatcherStatsAction.class),
new ActionHandler<>(OldWatcherStatsAction.INSTANCE,
OldTransportWatcherStatsAction.class),
new ActionHandler<>(AckWatchAction.INSTANCE, TransportAckWatchAction.class),
new ActionHandler<>(ActivateWatchAction.INSTANCE, TransportActivateWatchAction.class),
new ActionHandler<>(WatcherServiceAction.INSTANCE, TransportWatcherServiceAction.class),

View File

@ -165,7 +165,9 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste
if (isIndexInternalFormatTriggeredWatchIndex && isIndexInternalFormatWatchIndex) {
executor.execute(() -> start(event.state(), false));
} else {
logger.warn("Not starting watcher, the indices have not been upgraded yet. Please run the Upgrade API");
logger.warn("Not starting watcher, run the Upgrade API first.");
logger.debug("Upgrade required, matches interal index format: watches index [{}], triggered watches index [{}]",
isIndexInternalFormatWatchIndex, isIndexInternalFormatTriggeredWatchIndex);
}
}
} else {

View File

@ -5,13 +5,11 @@
*/
package org.elasticsearch.xpack.watcher.transport.actions.get;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.xpack.watcher.watch.Watch;
import java.io.IOException;
@ -61,20 +59,12 @@ public class GetWatchRequest extends MasterNodeReadRequest<GetWatchRequest> {
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
in.readLong();
in.readByte();
}
id = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
out.writeLong(1);
out.writeByte(VersionType.INTERNAL.getValue());
}
out.writeString(id);
}

View File

@ -0,0 +1,100 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.watcher.transport.actions.stats;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.license.LicenseUtils;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.watcher.WatcherLifeCycleService;
import org.elasticsearch.xpack.watcher.WatcherService;
import org.elasticsearch.xpack.watcher.execution.ExecutionService;
import org.elasticsearch.xpack.watcher.trigger.TriggerService;
/**
* Performs the stats operation required for the rolling upfrade from 5.x
*/
public class OldTransportWatcherStatsAction extends TransportMasterNodeAction<OldWatcherStatsRequest, OldWatcherStatsResponse> {
private final WatcherService watcherService;
private final ExecutionService executionService;
private final XPackLicenseState licenseState;
private final WatcherLifeCycleService lifeCycleService;
private final TriggerService triggerService;
@Inject
public OldTransportWatcherStatsAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, WatcherService watcherService,
ExecutionService executionService, XPackLicenseState licenseState,
WatcherLifeCycleService lifeCycleService, TriggerService triggerService) {
super(settings, OldWatcherStatsAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, OldWatcherStatsRequest::new);
this.watcherService = watcherService;
this.executionService = executionService;
this.licenseState = licenseState;
this.lifeCycleService = lifeCycleService;
this.triggerService = triggerService;
}
@Override
protected String executor() {
// cheap operation, no need to fork into another thread
return ThreadPool.Names.SAME;
}
@Override
protected void doExecute(Task task, OldWatcherStatsRequest request, ActionListener<OldWatcherStatsResponse> listener) {
if (licenseState.isWatcherAllowed()) {
super.doExecute(task, request, listener);
} else {
listener.onFailure(LicenseUtils.newComplianceException(XPackPlugin.WATCHER));
}
}
@Override
protected OldWatcherStatsResponse newResponse() {
return new OldWatcherStatsResponse();
}
@Override
protected void masterOperation(OldWatcherStatsRequest request, ClusterState state,
ActionListener<OldWatcherStatsResponse> listener) throws ElasticsearchException {
OldWatcherStatsResponse statsResponse = new OldWatcherStatsResponse();
statsResponse.setWatcherState(watcherService.state());
statsResponse.setThreadPoolQueueSize(executionService.executionThreadPoolQueueSize());
statsResponse.setWatchesCount(triggerService.count());
statsResponse.setThreadPoolMaxSize(executionService.executionThreadPoolMaxSize());
statsResponse.setWatcherMetaData(lifeCycleService.watcherMetaData());
if (request.includeCurrentWatches()) {
statsResponse.setSnapshots(executionService.currentExecutions());
}
if (request.includeQueuedWatches()) {
statsResponse.setQueuedWatches(executionService.queuedWatches());
}
listener.onResponse(statsResponse);
}
@Override
protected ClusterBlockException checkBlock(OldWatcherStatsRequest request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
}
}

View File

@ -0,0 +1,32 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.watcher.transport.actions.stats;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
/**
* This exists only for BWC against older 5.x nodes, which do not gather stats in a distributed fashion to support rolling upgrades
*/
public class OldWatcherStatsAction extends Action<OldWatcherStatsRequest, OldWatcherStatsResponse, OldWatcherStatsRequestBuilder> {
public static final OldWatcherStatsAction INSTANCE = new OldWatcherStatsAction();
public static final String NAME = "cluster:monitor/xpack/watcher/stats";
private OldWatcherStatsAction() {
super(NAME);
}
@Override
public OldWatcherStatsResponse newResponse() {
return new OldWatcherStatsResponse();
}
@Override
public OldWatcherStatsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new OldWatcherStatsRequestBuilder(client);
}
}

View File

@ -0,0 +1,65 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.watcher.transport.actions.stats;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
* The Request to get the watcher stats
*/
public class OldWatcherStatsRequest extends MasterNodeReadRequest<OldWatcherStatsRequest> {
private boolean includeCurrentWatches;
private boolean includeQueuedWatches;
public OldWatcherStatsRequest() {
}
public boolean includeCurrentWatches() {
return includeCurrentWatches;
}
public void includeCurrentWatches(boolean currentWatches) {
this.includeCurrentWatches = currentWatches;
}
public boolean includeQueuedWatches() {
return includeQueuedWatches;
}
public void includeQueuedWatches(boolean includeQueuedWatches) {
this.includeQueuedWatches = includeQueuedWatches;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
includeCurrentWatches = in.readBoolean();
includeQueuedWatches = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(includeCurrentWatches);
out.writeBoolean(includeQueuedWatches);
}
@Override
public String toString() {
return "watcher_stats";
}
}

View File

@ -0,0 +1,30 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.watcher.transport.actions.stats;
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
/**
* Watcher stats request builder.
*/
public class OldWatcherStatsRequestBuilder extends MasterNodeReadOperationRequestBuilder<OldWatcherStatsRequest, OldWatcherStatsResponse,
OldWatcherStatsRequestBuilder> {
public OldWatcherStatsRequestBuilder(ElasticsearchClient client) {
super(client, OldWatcherStatsAction.INSTANCE, new OldWatcherStatsRequest());
}
public OldWatcherStatsRequestBuilder setIncludeCurrentWatches(boolean includeCurrentWatches) {
request().includeCurrentWatches(includeCurrentWatches);
return this;
}
public OldWatcherStatsRequestBuilder setIncludeQueuedWatches(boolean includeQueuedWatches) {
request().includeQueuedWatches(includeQueuedWatches);
return this;
}
}

View File

@ -0,0 +1,195 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.watcher.transport.actions.stats;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.watcher.WatcherMetaData;
import org.elasticsearch.xpack.watcher.WatcherState;
import org.elasticsearch.xpack.watcher.execution.QueuedWatch;
import org.elasticsearch.xpack.watcher.execution.WatchExecutionSnapshot;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
public class OldWatcherStatsResponse extends ActionResponse implements ToXContentObject {
private long watchesCount;
private WatcherState watcherState;
private long threadPoolQueueSize;
private long threadPoolMaxSize;
private WatcherMetaData watcherMetaData;
private List<WatchExecutionSnapshot> snapshots;
private List<QueuedWatch> queuedWatches;
OldWatcherStatsResponse() {
}
/**
* @return The current execution thread pool queue size
*/
public long getThreadPoolQueueSize() {
return threadPoolQueueSize;
}
void setThreadPoolQueueSize(long threadPoolQueueSize) {
this.threadPoolQueueSize = threadPoolQueueSize;
}
/**
* @return The max number of threads in the execution thread pool
*/
public long getThreadPoolMaxSize() {
return threadPoolMaxSize;
}
void setThreadPoolMaxSize(long threadPoolMaxSize) {
this.threadPoolMaxSize = threadPoolMaxSize;
}
/**
* @return The number of watches currently registered in the system
*/
public long getWatchesCount() {
return watchesCount;
}
void setWatchesCount(long watchesCount) {
this.watchesCount = watchesCount;
}
/**
* @return The state of the watch service.
*/
public WatcherState getWatcherState() {
return watcherState;
}
void setWatcherState(WatcherState watcherServiceState) {
this.watcherState = watcherServiceState;
}
@Nullable
public List<WatchExecutionSnapshot> getSnapshots() {
return snapshots;
}
void setSnapshots(List<WatchExecutionSnapshot> snapshots) {
this.snapshots = snapshots;
}
@Nullable
public List<QueuedWatch> getQueuedWatches() {
return queuedWatches;
}
public void setQueuedWatches(List<QueuedWatch> queuedWatches) {
this.queuedWatches = queuedWatches;
}
public WatcherMetaData getWatcherMetaData() {
return watcherMetaData;
}
public void setWatcherMetaData(WatcherMetaData watcherMetaData) {
this.watcherMetaData = watcherMetaData;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
watchesCount = in.readLong();
threadPoolQueueSize = in.readLong();
threadPoolMaxSize = in.readLong();
watcherState = WatcherState.fromId(in.readByte());
if (in.readBoolean()) {
int size = in.readVInt();
snapshots = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
WatchExecutionSnapshot snapshot = new WatchExecutionSnapshot();
snapshot.readFrom(in);
snapshots.add(snapshot);
}
}
if (in.readBoolean()) {
int size = in.readVInt();
queuedWatches = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
QueuedWatch queuedWatch = new QueuedWatch();
queuedWatch.readFrom(in);
queuedWatches.add(queuedWatch);
}
}
watcherMetaData = new WatcherMetaData(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(watchesCount);
out.writeLong(threadPoolQueueSize);
out.writeLong(threadPoolMaxSize);
out.writeByte(watcherState.getId());
if (snapshots != null) {
out.writeBoolean(true);
out.writeVInt(snapshots.size());
for (WatchExecutionSnapshot snapshot : snapshots) {
snapshot.writeTo(out);
}
} else {
out.writeBoolean(false);
}
if (queuedWatches != null) {
out.writeBoolean(true);
out.writeVInt(queuedWatches.size());
for (QueuedWatch pending : this.queuedWatches) {
pending.writeTo(out);
}
} else {
out.writeBoolean(false);
}
watcherMetaData.writeTo(out);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("watcher_state", watcherState.toString().toLowerCase(Locale.ROOT));
builder.field("watch_count", watchesCount);
builder.startObject("execution_thread_pool");
builder.field("queue_size", threadPoolQueueSize);
builder.field("max_size", threadPoolMaxSize);
builder.endObject();
if (snapshots != null) {
builder.startArray("current_watches");
for (WatchExecutionSnapshot snapshot : snapshots) {
snapshot.toXContent(builder, params);
}
builder.endArray();
}
if (queuedWatches != null) {
builder.startArray("queued_watches");
for (QueuedWatch queuedWatch : queuedWatches) {
queuedWatch.toXContent(builder, params);
}
builder.endArray();
}
watcherMetaData.toXContent(builder, params);
builder.endObject();
return builder;
}
}

View File

@ -14,7 +14,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class WatcherStatsAction extends Action<WatcherStatsRequest, WatcherStatsResponse, WatcherStatsRequestBuilder> {
public static final WatcherStatsAction INSTANCE = new WatcherStatsAction();
public static final String NAME = "cluster:monitor/xpack/watcher/stats";
public static final String NAME = "cluster:monitor/xpack/watcher/stats/dist";
private WatcherStatsAction() {
super(NAME);

View File

@ -10,7 +10,6 @@ import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.xpack.watcher.transport.actions.service.WatcherServiceRequest;
import java.io.IOException;

View File

@ -5,7 +5,6 @@
*/
package org.elasticsearch.xpack.watcher.transport.actions.stats;
import org.elasticsearch.Version;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
@ -41,63 +40,14 @@ public class WatcherStatsResponse extends BaseNodesResponse<WatcherStatsResponse
@Override
public void writeTo(StreamOutput out) throws IOException {
// if (out.getVersion().after(Version.V_6_0_0_alpha1_UNRELEASED)) {
super.writeTo(out);
out.writeBoolean(watcherMetaData.manuallyStopped());
/*
} else {
// BWC layer for older versions, this is not considered exact
// this mimics the behaviour of 5.x
out.writeLong(getNodes().stream().mapToLong(Node::getWatchesCount).sum());
out.writeLong(getNodes().stream().mapToLong(Node::getThreadPoolQueueSize).sum());
out.writeLong(getNodes().stream().mapToLong(Node::getThreadPoolMaxSize).sum());
// byte, watcher state, cannot be exact, just pick the first
out.writeByte(getNodes().get(0).getWatcherState().getId());
out.writeString(Version.CURRENT.toString()); // version
out.writeString(XPackBuild.CURRENT.shortHash()); // hash
out.writeString(XPackBuild.CURRENT.shortHash()); // short hash
out.writeString(XPackBuild.CURRENT.date()); // date
List<WatchExecutionSnapshot> snapshots = getNodes().stream().map(Node::getSnapshots)
.flatMap(List::stream)
.collect(Collectors.toList());
if (snapshots != null) {
out.writeBoolean(true);
out.writeVInt(snapshots.size());
for (WatchExecutionSnapshot snapshot : snapshots) {
snapshot.writeTo(out);
}
} else {
out.writeBoolean(false);
}
List<QueuedWatch> queuedWatches = getNodes().stream().map(Node::getQueuedWatches)
.flatMap(List::stream)
.collect(Collectors.toList());
if (queuedWatches != null) {
out.writeBoolean(true);
out.writeVInt(queuedWatches.size());
for (QueuedWatch pending : queuedWatches) {
pending.writeTo(out);
}
} else {
out.writeBoolean(false);
}
watcherMetaData.writeTo(out);
}
*/
super.writeTo(out);
out.writeBoolean(watcherMetaData.manuallyStopped());
}
@Override
public void readFrom(StreamInput in) throws IOException {
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
super.readFrom(in);
watcherMetaData = new WatcherMetaData(in.readBoolean());
} else {
// TODO what to do here? create another BWC helping stuff here...
}
super.readFrom(in);
watcherMetaData = new WatcherMetaData(in.readBoolean());
}
@Override

View File

@ -31,3 +31,13 @@ grant {
// needed for Windows named pipes in machine learning
permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
};
grant codeBase "${codebase.elasticsearch-rest-client-6.0.0-beta1-SNAPSHOT.jar}" {
// rest client uses system properties which gets the default proxy
permission java.net.NetPermission "getProxySelector";
};
grant codeBase "${codebase.httpasyncclient-4.1.2.jar}" {
// rest client uses system properties which gets the default proxy
permission java.net.NetPermission "getProxySelector";
};

View File

@ -243,7 +243,7 @@
"pipelines": {
"type": "nested",
"properties": {
"name": {
"id": {
"type": "keyword"
},
"hash": {
@ -265,6 +265,9 @@
},
"duration_in_millis":{
"type": "long"
},
"queue_push_duration_in_millis": {
"type": "long"
}
}
},
@ -278,13 +281,18 @@
}
}
},
"components": {
"vertices": {
"type": "nested",
"properties": {
"id": {
"type": "keyword"
},
"long_stat": {
"pipeline_ephemeral_id": { "type": "keyword" },
"events_in": { "type": "long" },
"events_out": { "type": "long" },
"duration_in_millis": { "type": "long" },
"queue_push_duration_in_millis": { "type": "long" },
"long_counters": {
"type": "nested",
"properties": {
"name": {
@ -292,13 +300,10 @@
},
"value": {
"type": "long"
},
"metric_type": {
"type": "keyword"
}
}
},
"double_stat": {
"double_gauges": {
"type": "nested",
"properties": {
"name": {
@ -306,23 +311,6 @@
},
"value": {
"type": "double"
},
"metric_type": {
"type": "keyword"
}
}
},
"string_stat": {
"type": "nested",
"properties": {
"name": {
"type": "keyword"
},
"value": {
"type": "keyword"
},
"metric_type": {
"type": "keyword"
}
}
}
@ -376,7 +364,7 @@
},
"pipeline": {
"properties": {
"name": {
"id": {
"type": "keyword"
},
"hash": {

View File

@ -142,7 +142,7 @@
}
},
"actions": {
"trigger_alert": {
"add_to_alerts_index": {
"index": {
"index": ".monitoring-alerts-6",
"doc_type": "doc",

View File

@ -138,7 +138,7 @@
}
},
"actions": {
"trigger_alert": {
"add_to_alerts_index": {
"index": {
"index": ".monitoring-alerts-6",
"doc_type": "doc",

View File

@ -165,7 +165,7 @@
}
},
"actions": {
"trigger_alert": {
"add_to_alerts_index": {
"index": {
"index": ".monitoring-alerts-6",
"doc_type": "doc",

View File

@ -165,7 +165,7 @@
}
},
"actions": {
"trigger_alert": {
"add_to_alerts_index": {
"index": {
"index": ".monitoring-alerts-6",
"doc_type": "doc",

View File

@ -0,0 +1,155 @@
{
"metadata": {
"name": "X-Pack Monitoring: License Expiration",
"xpack": {
"link": "license",
"expires_days": [ 60, 30, 14, 7 ],
"severity": 0,
"alert_index": ".monitoring-alerts-6",
"cluster_uuid": "${monitoring.watch.cluster_uuid}",
"type": "monitoring",
"version_created": 6000026,
"watch": "${monitoring.watch.id}"
}
},
"trigger": {
"schedule": {
"interval": "1m"
}
},
"input": {
"chain": {
"inputs": [
{
"check": {
"search": {
"request": {
"indices": [
".monitoring-es-*"
],
"body": {
"size": 1,
"sort": [
{
"timestamp": {
"order": "desc"
}
}
],
"_source": [
"license.*"
],
"query": {
"bool": {
"filter": [
{
"term": {
"cluster_uuid": "{{ctx.metadata.xpack.cluster_uuid}}"
}
},
{
"term": {
"type": "cluster_stats"
}
}
]
}
}
}
}
}
}
},
{
"alert": {
"search": {
"request": {
"indices": [
".monitoring-alerts-6"
],
"body": {
"size": 1,
"terminate_after": 1,
"query": {
"bool": {
"filter": {
"term": {
"_id": "{{ctx.watch_id}}"
}
}
}
},
"sort": [
{ "timestamp": { "order": "desc" } }
]
}
}
}
}
},
{
"kibana_settings": {
"search": {
"request": {
"indices": [
".monitoring-kibana-6-*"
],
"body": {
"size": 1,
"query": {
"bool": {
"filter": {
"term": {
"type": "kibana_settings"
}
}
}
},
"sort": [
{
"timestamp": {
"order": "desc"
}
}
]
}
}
}
}
}
]
}
},
"condition": {
"script": {
"source": "if (ctx.payload.check.hits.total == 0) {return false;}def license = ctx.payload.check.hits.hits[0]._source.license;if (license == null) {return false;}ctx.vars.fails_check = false;Instant expiry = Instant.ofEpochMilli(license.expiry_date_in_millis);ctx.vars.expiry = expiry;if (license.status != 'active') {ctx.vars.expired = true;ctx.vars.fails_check = true;ctx.metadata.xpack.severity = 2001;} else {Instant now = Instant.ofEpochMilli(new Date().getTime());ctx.vars.now = now;for (int i = ctx.metadata.xpack.expires_days.length - 1;i > -1;--i) {if (license.type == 'trial' && i < 2) {break;}Instant fromNow = now.plusSeconds(ctx.metadata.xpack.expires_days[i] * 24 * 60 * 60);if (fromNow.isAfter(expiry)) {ctx.vars.fails_check = true;ctx.metadata.xpack.severity = i * 1000;break;}}}ctx.vars.not_resolved = (ctx.payload.alert.hits.total == 1 && ctx.payload.alert.hits.hits[0]._source.resolved_timestamp == null);ctx.vars.update = ctx.vars.fails_check || ctx.vars.not_resolved;"
}
},
"transform": {
"script": {
"source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.defaultAdminEmail : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def alertMessage = null;if (ctx.vars.fails_check) {alertMessage = 'Update your license.';}if (ctx.vars.not_resolved) {ctx.payload = ctx.payload.alert.hits.hits[0]._source;ctx.payload.metadata = ctx.metadata.xpack;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time,'prefix': 'This cluster\\'s license is going to expire in {{#relativeTime}}metadata.time{{/relativeTime}} at {{#absoluteTime}}metadata.time{{/absoluteTime}}.','message': alertMessage,'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.metadata.time = ctx.vars.expiry.toString();}ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;"
}
},
"actions": {
"add_to_alerts_index": {
"index": {
"index": ".monitoring-alerts-6",
"doc_type": "doc",
"doc_id": "${monitoring.watch.unique_id}"
}
},
"send_email_to_admin": {
"condition": {
"script": "return ctx.vars.email_recipient != null && (ctx.vars.is_new || ctx.vars.is_resolved)"
},
"email": {
"to": "X-Pack Admin <{{ctx.vars.email_recipient}}>",
"from": "X-Pack Admin <{{ctx.vars.email_recipient}}>",
"subject": "[{{#ctx.vars.is_new}}NEW{{/ctx.vars.is_new}}{{#ctx.vars.is_resolved}}RESOLVED{{/ctx.vars.is_resolved}}] {{ctx.metadata.name}}",
"body": {
"text": "{{#ctx.vars.is_resolved}}This cluster alert has been resolved: {{/ctx.vars.is_resolved}} This cluster's license is going to expire on {{ctx.payload.metadata.time}}. {{ctx.payload.message}}"
}
}
}
}
}

View File

@ -1,5 +1,5 @@
{
"index_patterns" : ".security-*",
"index_patterns" : [ ".security-*" ],
"order" : 1000,
"settings" : {
"number_of_shards" : 1,
@ -113,8 +113,5 @@
}
}
}
},
"aliases" : {
".security": {}
}
}

View File

@ -1,188 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.XPackFeatureSet;
import org.elasticsearch.xpack.action.XPackUsageRequestBuilder;
import org.elasticsearch.xpack.action.XPackUsageResponse;
import org.elasticsearch.xpack.security.SecurityFeatureSet;
import org.elasticsearch.xpack.security.action.role.ClearRolesCacheRequestBuilder;
import org.elasticsearch.xpack.security.action.role.ClearRolesCacheResponse;
import org.elasticsearch.xpack.security.action.role.GetRolesResponse;
import org.elasticsearch.xpack.security.action.role.PutRoleResponse;
import org.elasticsearch.xpack.security.action.user.GetUsersResponse;
import org.elasticsearch.xpack.security.action.user.PutUserResponse;
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
import org.elasticsearch.xpack.security.authz.permission.FieldPermissions;
import org.elasticsearch.xpack.security.authz.permission.FieldPermissionsDefinition;
import org.elasticsearch.xpack.security.client.SecurityClient;
import org.elasticsearch.xpack.security.user.User;
import java.util.Collections;
import java.util.List;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits;
import static org.elasticsearch.xpack.security.authc.support.UsernamePasswordTokenTests.basicAuthHeaderValue;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
/**
* Backwards compatibility test that loads some data from a pre-Version.CURRENT cluster and attempts to do some basic security stuff with
* it. It contains:
* <ul>
* <li>This user: {@code {"username": "bwc_test_user", "roles" : [ "bwc_test_role" ], "password" : "9876543210"}}</li>
* <li>This role: {@code {"name": "bwc_test_role", "cluster": ["all"]}, "run_as": [ "other_user" ], "indices": [{
* "names": [ "index1", "index2" ],
* "privileges": ["all"],
* "fields": [ "title", "body" ],
* "query": "{\"match\": {\"title\": \"foo\"}}"
* }]}</li>
* <li>This document in {@code index1}: {@code {
* "title": "foo",
* "body": "bwc_test_user should be able to see this field",
* "secured_body": "bwc_test_user should not be able to see this field"}}</li>
* <li>This document in {@code index1}: {@code {"title": "bwc_test_user should not be able to see this document"}}</li>
* <li>This document in {@code index2}: {@code {
* "title": "foo",
* "body": "bwc_test_user should be able to see this field",
* "secured_body": "bwc_test_user should not be able to see this field"}}</li>
* <li>This document in {@code index2}: {@code {"title": "bwc_test_user should not be able to see this document"}}</li>
* <li>This document in {@code index3}: {@code {"title": "bwc_test_user should not see this index"}}</li>
* </ul>
**/
// This will only work when the upgrade API is in place!
@AwaitsFix(bugUrl = "https://github.com/elastic/dev/issues/741")
public class OldSecurityIndexBackwardsCompatibilityTests extends AbstractOldXPackIndicesBackwardsCompatibilityTestCase {
protected void checkVersion(Version version) throws Exception {
// wait for service to start
SecurityClient securityClient = new SecurityClient(client());
assertSecurityIndexActive();
// make sure usage stats are still working even with old fls format
ClearRolesCacheResponse clearResponse = new ClearRolesCacheRequestBuilder(client()).get();
assertThat(clearResponse.failures().size(), equalTo(0));
XPackUsageResponse usageResponse = new XPackUsageRequestBuilder(client()).get();
List<XPackFeatureSet.Usage> usagesList = usageResponse.getUsages();
for (XPackFeatureSet.Usage usage : usagesList) {
if (usage instanceof SecurityFeatureSet.Usage) {
XContentBuilder builder = jsonBuilder();
usage.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertThat(builder.string(),
anyOf(containsString("\"roles\":{\"native\":{\"size\":1,\"fls\":true,\"dls\":true}"),
containsString("\"roles\":{\"native\":{\"size\":1,\"dls\":true,\"fls\":true}")));
}
}
// test that user and roles are there
logger.info("Getting roles...");
GetRolesResponse getRolesResponse = securityClient.prepareGetRoles("bwc_test_role").get();
assertThat(getRolesResponse.roles(), arrayWithSize(1));
RoleDescriptor role = getRolesResponse.roles()[0];
assertEquals("bwc_test_role", role.getName());
assertThat(role.getIndicesPrivileges(), arrayWithSize(1));
RoleDescriptor.IndicesPrivileges indicesPrivileges = role.getIndicesPrivileges()[0];
assertThat(indicesPrivileges.getIndices(), arrayWithSize(2));
assertArrayEquals(new String[] { "index1", "index2" }, indicesPrivileges.getIndices());
final FieldPermissions fieldPermissions = new FieldPermissions(
new FieldPermissionsDefinition(indicesPrivileges.getGrantedFields(), indicesPrivileges.getDeniedFields()));
assertTrue(fieldPermissions.grantsAccessTo("title"));
assertTrue(fieldPermissions.grantsAccessTo("body"));
assertArrayEquals(new String[] { "all" }, indicesPrivileges.getPrivileges());
assertEquals("{\"match\": {\"title\": \"foo\"}}", indicesPrivileges.getQuery().iterator().next().utf8ToString());
assertArrayEquals(new String[] { "all" }, role.getClusterPrivileges());
assertArrayEquals(new String[] { "other_user" }, role.getRunAs());
assertEquals("bwc_test_role", role.getName());
// check x-content is rendered in new format although it comes from an old index
XContentBuilder builder = jsonBuilder();
indicesPrivileges.toXContent(builder, null);
assertThat(builder.string(), containsString("\"field_security\":{\"grant\":[\"title\",\"body\"]}"));
logger.info("Getting users...");
assertSecurityIndexActive();
GetUsersResponse getUsersResponse = securityClient.prepareGetUsers("bwc_test_user").get();
assertThat(getUsersResponse.users(), arrayWithSize(1));
User user = getUsersResponse.users()[0];
assertArrayEquals(new String[] { "bwc_test_role" }, user.roles());
assertEquals("bwc_test_user", user.principal());
// check that documents are there
assertHitCount(client().prepareSearch("index1", "index2", "index3").get(), 5);
/* check that a search that misses all documents doesn't hit any alias starting with `-`. We have one in the backwards compatibility
* indices for versions before 5.1.0 because we can't create them any more. */
if (version.before(Version.V_5_1_1)) {
GetAliasesResponse aliasesResponse = client().admin().indices().prepareGetAliases().get();
List<AliasMetaData> aliases = aliasesResponse.getAliases().get("index3");
assertThat("alias doesn't exist", aliases, hasSize(1));
assertEquals("-index3", aliases.get(0).getAlias());
SearchResponse searchResponse = client().prepareSearch("does_not_exist_*")
.setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())).get();
assertNoSearchHits(searchResponse);
}
Client bwcTestUserClient = client().filterWithHeader(
singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("bwc_test_user", "9876543210")));
// check that index permissions work as expected
SearchResponse searchResponse = bwcTestUserClient.prepareSearch("index1", "index2").get();
assertEquals(2, searchResponse.getHits().getTotalHits());
assertEquals("foo", searchResponse.getHits().getHits()[0].getSourceAsMap().get("title"));
assertEquals("bwc_test_user should be able to see this field", searchResponse.getHits().getHits()[0].getSourceAsMap().get("body"));
assertNull(searchResponse.getHits().getHits()[0].getSourceAsMap().get("secured_body"));
assertEquals("foo", searchResponse.getHits().getHits()[1].getSourceAsMap().get("title"));
assertEquals("bwc_test_user should be able to see this field", searchResponse.getHits().getHits()[1].getSourceAsMap().get("body"));
assertNull(searchResponse.getHits().getHits()[1].getSourceAsMap().get("secured_body"));
Exception e = expectThrows(ElasticsearchSecurityException.class, () -> bwcTestUserClient.prepareSearch("index3").get());
assertEquals("action [indices:data/read/search] is unauthorized for user [bwc_test_user]", e.getMessage());
// try adding a user
PutRoleResponse roleResponse = securityClient.preparePutRole("test_role").addIndices(
new String[] { "index3" },
new String[] { "all" },
new String[] { "title", "body" },
null,
new BytesArray("{\"term\": {\"title\":\"not\"}}")).cluster("all")
.get();
assertTrue(roleResponse.isCreated());
PutUserResponse userResponse = securityClient.preparePutUser("another_bwc_test_user", "123123".toCharArray(), "test_role")
.email("a@b.c").get();
assertTrue(userResponse.created());
searchResponse = client().filterWithHeader(
Collections.singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER,
basicAuthHeaderValue("another_bwc_test_user", "123123")
)).prepareSearch("index3").get();
assertEquals(1, searchResponse.getHits().getTotalHits());
assertEquals("bwc_test_user should not see this index", searchResponse.getHits().getHits()[0].getSourceAsMap().get("title"));
userResponse = securityClient.preparePutUser("meta_bwc_test_user", "123123".toCharArray(), "test_role").email("a@b.c")
.metadata(singletonMap("test", 1)).get();
assertTrue(userResponse.created());
getUsersResponse = securityClient.prepareGetUsers("meta_bwc_test_user").get();
assertThat(getUsersResponse.users(), arrayWithSize(1));
user = getUsersResponse.users()[0];
assertArrayEquals(new String[] { "test_role" }, user.roles());
assertEquals("meta_bwc_test_user", user.principal());
}
}

View File

@ -5,12 +5,12 @@
*/
package org.elasticsearch.integration;
import org.apache.http.HttpEntity;
import org.apache.http.StatusLine;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.client.http.HttpEntity;
import org.elasticsearch.client.http.StatusLine;
import org.elasticsearch.client.http.entity.ContentType;
import org.elasticsearch.client.http.entity.StringEntity;
import org.elasticsearch.client.http.message.BasicHeader;
import org.elasticsearch.client.http.util.EntityUtils;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.common.settings.SecureString;

View File

@ -5,11 +5,11 @@
*/
package org.elasticsearch.integration;
import org.apache.http.Header;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.client.http.Header;
import org.elasticsearch.client.http.entity.ContentType;
import org.elasticsearch.client.http.entity.StringEntity;
import org.elasticsearch.client.http.message.BasicHeader;
import org.elasticsearch.client.http.util.EntityUtils;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.get.GetResponse;

View File

@ -5,8 +5,8 @@
*/
package org.elasticsearch.integration;
import org.apache.http.message.BasicHeader;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.client.http.message.BasicHeader;
import org.elasticsearch.client.http.util.EntityUtils;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Response;

View File

@ -5,7 +5,7 @@
*/
package org.elasticsearch.integration;
import org.apache.http.message.BasicHeader;
import org.elasticsearch.client.http.message.BasicHeader;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.SecureString;

Some files were not shown because too many files have changed in this diff Show More