Merge branch 'master' into feature/sql_2

Original commit: elastic/x-pack-elasticsearch@348f0468b0
This commit is contained in:
Costin Leau 2018-01-24 19:19:51 +02:00
commit 3f4c789993
78 changed files with 1233 additions and 730 deletions

View File

@ -23,6 +23,7 @@ buildRestTests.expectedUnconvertedCandidates = [
'en/rest-api/security/users.asciidoc',
'en/rest-api/security/tokens.asciidoc',
'en/rest-api/watcher/put-watch.asciidoc',
'en/rest-api/ml/delete-calendar-event.asciidoc',
'en/rest-api/ml/post-data.asciidoc',
'en/security/authentication/user-cache.asciidoc',
'en/security/authorization/field-and-document-access-control.asciidoc',
@ -434,5 +435,43 @@ setups['server_metrics_startdf'] = '''
- do:
xpack.ml.start_datafeed:
datafeed_id: "datafeed-total-requests"
'''
setups['calendar_outages'] = '''
- do:
xpack.ml.put_calendar:
calendar_id: "planned-outages"
'''
setups['calendar_outages_addevent'] = setups['calendar_outages'] + '''
- do:
xpack.ml.post_calendar_events:
calendar_id: "planned-outages"
body: >
{ "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" }
'''
setups['calendar_outages_openjob'] = setups['server_metrics_openjob'] + '''
- do:
xpack.ml.put_calendar:
calendar_id: "planned-outages"
'''
setups['calendar_outages_addjob'] = setups['server_metrics_openjob'] + '''
- do:
xpack.ml.put_calendar:
calendar_id: "planned-outages"
body: >
{
"job_ids": ["total-requests"]
}
'''
setups['calendar_outages_addevent'] = setups['calendar_outages_addjob'] + '''
- do:
xpack.ml.post_calendar_events:
calendar_id: "planned-outages"
body: >
{ "events" : [
{ "description": "event 1", "start_time": "1513641600000", "end_time": "1513728000000"},
{ "description": "event 2", "start_time": "1513814400000", "end_time": "1513900800000"},
{ "description": "event 3", "start_time": "1514160000000", "end_time": "1514246400000"}
]}
'''

View File

@ -32,6 +32,19 @@ The main {ml} resources can be accessed with a variety of endpoints:
* {ref}/ml-close-job.html[POST /anomaly_detectors/<job_id>/_close]: Close a job
* {ref}/ml-delete-job.html[DELETE /anomaly_detectors/<job_id+++>+++]: Delete a job
[float]
[[ml-api-calendars]]
=== /calendars/
* {ref}/ml-put-calendar.html[PUT /calendars/<calendar_id+++>+++]: Create a calendar
* {ref}/ml-post-calendar-event.html[POST /calendars/<calendar_id+++>+++/events]: Add a scheduled event to a calendar
* {ref}/ml-put-calendar-job.html[PUT /calendars/<calendar_id+++>+++/jobs/<job_id+++>+++]: Associate a job with a calendar
* {ref}/ml-get-calendar.html[GET /calendars/<calendar_id+++>+++]: Get calendar details
* {ref}/ml-get-calendar-event.html[GET /calendars/<calendar_id+++>+++/events]: Get scheduled event details
* {ref}/ml-delete-calendar-event.html[DELETE /calendars/<calendar_id+++>+++/events/<event_id+++>+++]: Remove a scheduled event from a calendar
* {ref}/ml-delete-calendar-job.html[DELETE /calendars/<calendar_id+++>+++/jobs/<job_id+++>+++]: Disassociate a job from a calendar
* {ref}/ml-delete-calendar.html[DELETE /calendars/<calendar_id+++>+++]: Delete a calendar
[float]
[[ml-api-datafeeds]]
=== /datafeeds/

View File

@ -21,10 +21,7 @@ events appropriately.
If you want to add multiple scheduled events at once, you can import an
iCalendar (`.ics`) file in {kib} or a JSON file in the
//{ref}/ml-post-calendar-event.html[
add events to calendar API
//]
.
{ref}/ml-post-calendar-event.html[add events to calendar API].
[NOTE]
--

View File

@ -5,13 +5,17 @@
These resource definitions are used in {ml} APIs and in {kib} advanced
job configuration options.
* <<ml-calendar-resource,Calendars>>
* <<ml-datafeed-resource,{dfeeds-cap}>>
* <<ml-datafeed-counts,{dfeed-cap} counts>>
* <<ml-job-resource,Jobs>>
* <<ml-jobstats,Job statistics>>
* <<ml-snapshot-resource,Model snapshots>>
* <<ml-results-resource,Results>>
* <<ml-event-resource,Scheduled Events>>
[role="xpack"]
include::ml/calendarresource.asciidoc[]
[role="xpack"]
include::ml/datafeedresource.asciidoc[]
[role="xpack"]
@ -22,3 +26,5 @@ include::ml/jobcounts.asciidoc[]
include::ml/snapshotresource.asciidoc[]
[role="xpack"]
include::ml/resultsresource.asciidoc[]
[role="xpack"]
include::ml/eventresource.asciidoc[]

View File

@ -12,7 +12,8 @@ machine learning APIs and in advanced job configuration options in Kibana.
* <<ml-put-calendar,Create calendar>>, <<ml-delete-calendar,Delete calendar>>
* <<ml-put-calendar-job,Add job to calendar>>, <<ml-delete-calendar-job,Delete job from calendar>>
* <<ml-get-calendar,Get calendars>>
* <<ml-post-calendar-event,Add scheduled events to calendar>>, <<ml-delete-calendar-event,Delete scheduled events from calendar>>
* <<ml-get-calendar,Get calendars>>, <<ml-get-calendar-event,Get scheduled events>>
[float]
[[ml-api-datafeed-endpoint]]
@ -61,6 +62,7 @@ machine learning APIs and in advanced job configuration options in Kibana.
* <<ml-get-record,Get records>>
//ADD
include::ml/post-calendar-event.asciidoc[]
include::ml/put-calendar-job.asciidoc[]
//CLOSE
include::ml/close-job.asciidoc[]
@ -71,6 +73,7 @@ include::ml/put-job.asciidoc[]
//DELETE
include::ml/delete-calendar.asciidoc[]
include::ml/delete-datafeed.asciidoc[]
include::ml/delete-calendar-event.asciidoc[]
include::ml/delete-job.asciidoc[]
include::ml/delete-calendar-job.asciidoc[]
include::ml/delete-snapshot.asciidoc[]
@ -89,6 +92,7 @@ include::ml/get-influencer.asciidoc[]
include::ml/get-job.asciidoc[]
include::ml/get-job-stats.asciidoc[]
include::ml/get-snapshot.asciidoc[]
include::ml/get-calendar-event.asciidoc[]
include::ml/get-record.asciidoc[]
//OPEN
include::ml/open-job.asciidoc[]

View File

@ -0,0 +1,14 @@
[role="xpack"]
[[ml-calendar-resource]]
=== Calendar Resources
A calendar resource has the following properties:
`calendar_id`::
(string) A numerical character string that uniquely identifies the calendar.
`job_ids`::
(array) An array of job identifiers. For example: `["total-requests"]`.
For more information, see
{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events].

View File

@ -0,0 +1,55 @@
[role="xpack"]
[[ml-delete-calendar-event]]
=== Delete Events from Calendar API
++++
<titleabbrev>Delete Events from Calendar</titleabbrev>
++++
This API enables you to delete scheduled events from a calendar.
==== Request
`DELETE _xpack/ml/calendars/<calendar_id>/events/<event_id>`
==== Description
This API removes individual events from a calendar. To remove all scheduled
events and delete the calendar, see the
<<ml-delete-calendar,delete calendar API>>.
==== Path Parameters
`calendar_id`(required)::
(string) Identifier for the calendar.
`event_id` (required)::
(string) Identifier for the scheduled event. You can obtain this identifier
by using the <<ml-get-calendar-event,get calendar events API>>.
==== Authorization
You must have `manage_ml`, or `manage` cluster privileges to use this API.
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
==== Examples
The following example deletes a scheduled event from the `planned-outages`
calendar:
[source,js]
--------------------------------------------------
DELETE _xpack/ml/calendars/planned-outages/events/LS8LJGEBMTCMA-qz49st
--------------------------------------------------
// CONSOLE
// TEST[skip:automatically-generated ID]
When the event is removed, you receive the following results:
[source,js]
----
{
"acknowledged": true
}
----

View File

@ -5,7 +5,7 @@
<titleabbrev>Delete Jobs from Calendar</titleabbrev>
++++
This API enables you to remove jobs from a calendar.
This API enables you to delete jobs from a calendar.
==== Request
@ -13,9 +13,6 @@ This API enables you to remove jobs from a calendar.
`DELETE _xpack/ml/calendars/<calendar_id>/jobs/<job_id>`
//==== Description
==== Path Parameters
`calendar_id`(required)::
@ -24,12 +21,32 @@ This API enables you to remove jobs from a calendar.
`job_id` (required)::
(string) Identifier for the job.
//===== Query Parameters
==== Authorization
You must have `manage_ml`, or `manage` cluster privileges to use this API.
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
//==== Examples
==== Examples
The following example removes the association between the `planned-outages`
calendar and `total-requests` job:
[source,js]
--------------------------------------------------
DELETE _xpack/ml/calendars/planned-outages/jobs/total-requests
--------------------------------------------------
// CONSOLE
// TEST[setup:calendar_outages_addjob]
When the job is removed from the calendar, you receive the following
results:
[source,js]
----
{
"calendar_id": "planned-outages",
"job_ids": []
}
----
//TESTRESPONSE

View File

@ -13,18 +13,40 @@ This API enables you to delete a calendar.
`DELETE _xpack/ml/calendars/<calendar_id>`
//==== Description
==== Description
This API removes all scheduled events from the calendar then deletes the
calendar.
==== Path Parameters
`calendar_id` (required)::
(string) Identifier for the calendar.
//===== Query Parameters
==== Authorization
You must have `manage_ml`, or `manage` cluster privileges to use this API.
For more information, see {xpack-ref}/security-privileges.html[Security Privileges].
//==== Examples
==== Examples
The following example deletes the `planned-outages` calendar:
[source,js]
--------------------------------------------------
DELETE _xpack/ml/calendars/planned-outages
--------------------------------------------------
// CONSOLE
// TEST[setup:calendar_outages]
When the calendar is deleted, you receive the following results:
[source,js]
----
{
"acknowledged": true
}
----
//TESTRESPONSE

View File

@ -0,0 +1,26 @@
[role="xpack"]
[[ml-event-resource]]
=== Scheduled Event Resources
An events resource has the following properties:
`calendar_id`::
(string) An identifier for the calendar that contains the scheduled
event. This property is optional in the <<ml-post-calendar-event>>.
`description`::
(string) A description of the scheduled event.
`end_time`::
(string) The timestamp for the end of the scheduled event. The datetime string
is in ISO 8601 format.
`event_id`::
(string) An automatically-generated identifier for the scheduled event.
`start_time`::
(string) The timestamp for the beginning of the scheduled event. The datetime
string is in ISO 8601 format.
For more information, see
{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events].

View File

@ -0,0 +1,106 @@
[role="xpack"]
[[ml-get-calendar-event]]
=== Get Scheduled Events API
++++
<titleabbrev>Get Scheduled Events</titleabbrev>
++++
This API enables you to retrieve information about the scheduled events in
calendars.
==== Request
`GET _xpack/ml/calendars/<calendar_id>/events` +
`GET _xpack/ml/calendars/_all/events`
===== Description
You can get scheduled event information for a single calendar or for all
calendars by using `_all`.
==== Path Parameters
`calendar_id` (required)::
(string) Identifier for the calendar.
==== Request Body
`end`::
(string) Specifies to get events with timestamps earlier than this time.
`from`::
(integer) Skips the specified number of events.
`size`::
(integer) Specifies the maximum number of events to obtain.
`start`::
(string) Specifies to get events with timestamps after this time.
==== Results
The API returns the following information:
`events`::
(array) An array of scheduled event resources.
For more information, see <<ml-event-resource>>.
==== Authorization
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
privileges to use this API. For more information, see
{xpack-ref}/security-privileges.html[Security Privileges].
==== Examples
The following example gets information about the scheduled events in the
`planned-outages` calendar:
[source,js]
--------------------------------------------------
GET _xpack/ml/calendars/planned-outages/events
--------------------------------------------------
// CONSOLE
// TEST[setup:calendar_outages_addevent]
The API returns the following results:
[source,js]
----
{
"count": 3,
"events": [
{
"description": "event 1",
"start_time": 1513641600000,
"end_time": 1513728000000,
"calendar_id": "planned-outages",
"event_id": "LS8LJGEBMTCMA-qz49st"
},
{
"description": "event 2",
"start_time": 1513814400000,
"end_time": 1513900800000,
"calendar_id": "planned-outages",
"event_id": "Li8LJGEBMTCMA-qz49st"
},
{
"description": "event 3",
"start_time": 1514160000000,
"end_time": 1514246400000,
"calendar_id": "planned-outages",
"event_id": "Ly8LJGEBMTCMA-qz49st"
}
]
}
----
// TESTRESPONSE[s/LS8LJGEBMTCMA-qz49st/$body.$_path/]
// TESTRESPONSE[s/Li8LJGEBMTCMA-qz49st/$body.$_path/]
// TESTRESPONSE[s/Ly8LJGEBMTCMA-qz49st/$body.$_path/]
For more information about these properties, see <<ml-event-resource>>.

View File

@ -5,36 +5,26 @@
<titleabbrev>Get Calendars</titleabbrev>
++++
This API enables you to retrieve configuration information for
calendars.
This API enables you to retrieve configuration information for calendars.
==== Request
`GET _xpack/ml/calendars/<calendar_id>` +
`GET _xpack/ml/calendars/<calendar_id>,<calendar_id>` +
`GET _xpack/ml/calendars/` +
`GET _xpack/ml/calendars/_all`
//===== Description
===== Description
You can get information for a single calendar or for all calendars by using
`_all`.
////
You can get information for multiple jobs in a single API request by using a
group name, a comma-separated list of jobs, or a wildcard expression. You can
get information for all jobs by using `_all`, by specifying `*` as the
`<job_id>`, or by omitting the `<job_id>`.
////
==== Path Parameters
`calendar_id`::
(string) Identifier for the calendar. It can be a calendar identifier or a
wildcard expression. If you do not specify one of these options, the API
returns information for all calendars.
(string) Identifier for the calendar.
==== Request Body
@ -45,15 +35,15 @@ get information for all jobs by using `_all`, by specifying `*` as the
`size`:::
(integer) Specifies the maximum number of calendars to obtain.
//==== Results
////
==== Results
The API returns the following information:
`jobs`::
(array) An array of job resources.
For more information, see <<ml-job-resource,Job Resources>>.
////
`calendars`::
(array) An array of calendar resources.
For more information, see <<ml-calendar-resource>>.
==== Authorization
@ -62,4 +52,31 @@ privileges to use this API. For more information, see
{xpack-ref}/security-privileges.html[Security Privileges].
//==== Examples
==== Examples
The following example gets configuration information for the `planned-outages`
calendar:
[source,js]
--------------------------------------------------
GET _xpack/ml/calendars/planned-outages
--------------------------------------------------
// CONSOLE
// TEST[setup:calendar_outages_addjob]
The API returns the following results:
[source,js]
----
{
"count": 1,
"calendars": [
{
"calendar_id": "planned-outages",
"job_ids": [
"total-requests"
]
}
]
}
----
//TESTRESPONSE

View File

@ -0,0 +1,87 @@
[role="xpack"]
[[ml-post-calendar-event]]
=== Add Events to Calendar API
++++
<titleabbrev>Add Events to Calendar</titleabbrev>
++++
This API enables you to post scheduled events in a calendar.
==== Request
`POST _xpack/ml/calendars/<calendar_id>/events`
==== Description
This API accepts a list of {xpack-ref}/ml-calendars.html[scheduled events], each
of which must have a start time, end time, and description.
==== Path Parameters
`calendar_id` (required)::
(string) Identifier for the calendar.
==== Request Body
`events`::
(array) A list of one of more scheduled events. See <<ml-event-resource>>.
==== Authorization
You must have `manage_ml`, or `manage` cluster privileges to use this API.
For more information, see
{xpack-ref}/security-privileges.html[Security Privileges].
==== Examples
You can add scheduled events to the `planned-outages` calendar as follows:
[source,js]
--------------------------------------------------
POST _xpack/ml/calendars/planned-outages/events
{
"events" : [
{"description": "event 1", "start_time": 1513641600000, "end_time": 1513728000000},
{"description": "event 2", "start_time": 1513814400000, "end_time": 1513900800000},
{"description": "event 3", "start_time": 1514160000000, "end_time": 1514246400000}
]
}
--------------------------------------------------
// CONSOLE
// TEST[setup:calendar_outages_addjob]
The API returns the following results:
[source,js]
----
{
"events": [
{
"description": "event 1",
"start_time": 1513641600000,
"end_time": 1513728000000,
"calendar_id": "planned-outages"
},
{
"description": "event 2",
"start_time": 1513814400000,
"end_time": 1513900800000,
"calendar_id": "planned-outages"
},
{
"description": "event 3",
"start_time": 1514160000000,
"end_time": 1514246400000,
"calendar_id": "planned-outages"
}
]
}
----
//TESTRESPONSE
For more information about these properties, see
<<ml-event-resource,Scheduled Event Resources>>.

View File

@ -5,26 +5,21 @@
<titleabbrev>Add Jobs to Calendar</titleabbrev>
++++
This API enables you to add jobs to a calendar.
This API enables you to add a job to a calendar.
==== Request
`PUT _xpack/ml/calendars/<calendar_id>/jobs/<job_id>`
//===== Description
==== Path Parameters
`calendar_id` (required)::
(string) Identifier for the calendar.
//==== Request Body
`job_id` (required)::
(string) Identifier for the job.
`description`::
(string) A description of the calendar.
==== Authorization
@ -33,4 +28,27 @@ For more information, see
{xpack-ref}/security-privileges.html[Security Privileges].
//==== Examples
==== Examples
The following example associates the `planned-outages` calendar with the
`total-requests` job:
[source,js]
--------------------------------------------------
PUT _xpack/ml/calendars/planned-outages/jobs/total-requests
--------------------------------------------------
// CONSOLE
// TEST[setup:calendar_outages_openjob]
The API returns the following results:
[source,js]
----
{
"calendar_id": "planned-outages",
"job_ids": [
"total-requests"
]
}
----
//TESTRESPONSE

View File

@ -11,7 +11,10 @@ This API enables you to instantiate a calendar.
`PUT _xpack/ml/calendars/<calendar_id>`
//===== Description
===== Description
For more information, see
{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events].
==== Path Parameters
@ -24,6 +27,7 @@ This API enables you to instantiate a calendar.
`description`::
(string) A description of the calendar.
==== Authorization
You must have `manage_ml`, or `manage` cluster privileges to use this API.
@ -31,5 +35,22 @@ For more information, see
{xpack-ref}/security-privileges.html[Security Privileges].
//==== Examples
//See plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml
==== Examples
The following example creates the `planned-outages` calendar:
[source,js]
--------------------------------------------------
PUT _xpack/ml/calendars/planned-outages
--------------------------------------------------
// CONSOLE
When the calendar is created, you receive the following results:
[source,js]
----
{
"calendar_id": "planned-outages",
"job_ids": []
}
----
//TESTRESPONSE

View File

@ -2,10 +2,15 @@
=== PKI User Authentication
You can configure {security} to use Public Key Infrastructure (PKI) certificates
to authenticate users. This requires clients to present X.509 certificates. To
use PKI, you configure a PKI realm, enable client authentication on the desired
network layers (transport or http), and map the Distinguished Names (DNs) from
the user certificates to {security} roles in the <<mapping-roles, role mapping file>>.
to authenticate users in {es}. This requires clients to present X.509
certificates.
NOTE: You cannot use PKI certificates to authenticate users in {kib}.
To use PKI in {es}, you configure a PKI realm, enable client authentication on
the desired network layers (transport or http), and map the Distinguished Names
(DNs) from the user certificates to {security} roles in the
<<mapping-roles, role mapping file>>.
You can also use a combination of PKI and username/password authentication. For
example, you can enable SSL/TLS on the transport layer and define a PKI realm to
@ -22,7 +27,7 @@ IMPORTANT: You must enable SSL/TLS and enabled client authentication to use PKI
Like other realms, you configure options for a `pki` realm under the
`xpack.security.authc.realms` namespace in `elasticsearch.yml`.
To configure `pki` realm:
To configure a `pki` realm:
. Add a realm configuration of type `pki` to `elasticsearch.yml` under the
`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to
@ -119,35 +124,10 @@ The `certificate_authorities` option may be used as an alternative to the
[[pki-settings]]
===== PKI Realm Settings
[cols="4,^3,10"]
|=======================
| Setting | Required | Description
| `type` | yes | Indicates the realm type. Must be set to `pki`.
| `order` | no | Indicates the priority of this realm within the realm
chain. Realms with a lower order are consulted first.
Although not required, we recommend explicitly
setting this value when you configure multiple realms.
Defaults to `Integer.MAX_VALUE`.
| `enabled` | no | Indicates whether this realm is enabled or disabled.
Enables you to disable a realm without removing its
configuration. Defaults to `true`.
| `username_pattern` | no | Specifies the regular expression pattern used to extract
the username from the certificate DN. The first match
group is used as the username. Defaults to `CN=(.*?)(?:,\|$)`.
| `certificate_authorities` | no | List of paths to the PEM encoded certificate files
that should be trusted.
This setting may not be used with `truststore.path`.
| `truststore.path` | no | The path to the truststore. Defaults to the path
defined by {ref}/security-settings.html#ssl-tls-settings[SSL/TLS settings].
This setting may not be used with `certificate_authorities`.
| `truststore.password` | no/yes | Specifies the password for the truststore. Must be
provided if `truststore.path` is set.
| `truststore.algorithm` | no | Specifies the algorithm used for the truststore.
Defaults to `SunX509`.
| `files.role_mapping` | no | Specifies the <<security-files-location,location>>
for the <<pki-role-mapping, YAML role mapping configuration file>>.
Defaults to `CONFIG_DIR/x-pack/role_mapping.yml`.
|=======================
See
{ref}/security-settings.html#_settings_valid_for_all_realms[Security Settings for All Realms]
and
{ref}/security-settings.html#ref-pki-settings[PKI Realm Settings].
[[assigning-roles-pki]]
==== Mapping Roles for PKI Users

View File

@ -11,37 +11,23 @@ read-only privileged accounts. Users with document and field level
security enabled for an index should not perform write operations.
A role can define both field and document level permissions on a per-index basis.
A role that doesnt specify field level permissions grants access to ALL fields.
Similarly, a role that doesn't specify document level permissions grants access
to ALL documents in the index.
A role that doesnt specify field level permissions grants access to ALL fields.
Similarly, a role that doesn't specify document level permissions grants access
to ALL documents in the index.
[IMPORTANT]
=====================================================================
When assigning users multiple roles, be careful that you don't inadvertently
grant wider access than intended. Each user has a single set of field level and
document level permissions per index. When you assign a user multiple roles,
the permissions are ORed together. This means if you assign one role that
restricts access to particular fields in an index, and another that doesn't
specify any field level access restrictions for that index, the user will have
access to all fields. The same is true for document level permissions.
For example, let's say `role_a` only grants access to the `address`
field of the documents in `index1`, but doesn't specify any document
restrictions. Conversely, `role_b` limits access to a subset of the documents
in `index1`, but doesn't specify any field restrictions. If you assign a user
both roles, `role_a` gives the user access to all documents and `role_b` gives
the user access to all fields.
If you need to restrict access to both documents and fields, consider splitting
documents by index instead.
grant wider access than intended. Each user has a single set of field level and
document level permissions per index. See <<multiple-roles-dls-fls>>.
=====================================================================
[[field-level-security]]
==== Field Level Security
To enable field level security, you specify the fields that each role can access
as part of the indices permissions in a role definition. This binds field level
security to a well defined set of indices (and potentially a set of
To enable field level security, specify the fields that each role can access
as part of the indices permissions in a role definition. Field level security is
thus bound to a well-defined set of indices (and potentially a set of
<<document-level-security, documents>>).
The following role definition grants read access only to the `category`,
@ -69,7 +55,7 @@ you specify an empty list of fields, only these meta fields are accessible.
NOTE: Omitting the fields entry entirely disables field-level security.
You can also specify field expressions. For example, the following
example grants read access to all fields starting with `event_` prefix:
example grants read access to all fields that start with an `event_` prefix:
[source,js]
--------------------------------------------------
@ -100,7 +86,8 @@ example, assuming the following document:
}
--------------------------------------------------
The following role definition only allows access to the customer `handle` field:
The following role definition enables only read access to the customer `handle`
field:
[source,js]
--------------------------------------------------
@ -117,8 +104,8 @@ The following role definition only allows access to the customer `handle` field:
}
--------------------------------------------------
This is where wildcard support shines. For example, use `customer.*` to only
enable read access to the `customer` data:
This is where wildcard support shines. For example, use `customer.*` to enable
only read access to the `customer` data:
[source,js]
--------------------------------------------------
@ -135,8 +122,7 @@ enable read access to the `customer` data:
}
--------------------------------------------------
Similar to granting field permissions the permission to access fields can be denied with the following syntax:
You can deny permission to access fields with the following syntax:
[source,js]
--------------------------------------------------
@ -157,11 +143,14 @@ Similar to granting field permissions the permission to access fields can be den
The following rules apply:
Absence of "field_security" in a role is equivalent to * access.
Denied fields may only be provided if permission has been granted explicitly to other fields. The exceptions given must be a subset of the
fields that permissions have been granted to.
Denied and granted fields defined implies access to all granted fields except those which match the pattern in denied fields. Example:
* The absence of `field_security` in a role is equivalent to * access.
* If permission has been granted explicitly to some fields, you can specify
denied fields. The denied fields must be a subset of the fields to which
permissions were granted.
* Defining denied and granted fields implies access to all granted fields except
those which match the pattern in the denied fields.
For example:
[source,js]
--------------------------------------------------
@ -179,15 +168,17 @@ Denied and granted fields defined implies access to all granted fields except th
}
--------------------------------------------------
In the above example all fields with the prefix "customer." are allowed except for "customer.handle".
In the above example, users can read all fields with the prefix "customer."
except for "customer.handle".
An empty array for grant (eg. "grant" : []) means that no fields are granted access to.
An empty array for `grant` (for example, `"grant" : []`) means that access has
not been granted to any fields.
===== Field Level Security and Roles
When a user has several roles that specify field level permissions then the resulting field level permissions per index are the union
of the individual role permissions.
For example if these two roles are merged:
When a user has several roles that specify field level permissions, the
resulting field level permissions per index are the union of the individual role
permissions. For example, if these two roles are merged:
[source,js]
--------------------------------------------------
@ -222,7 +213,7 @@ For example if these two roles are merged:
}
--------------------------------------------------
Then the resulting permission would be equal to:
The resulting permission is equal to:
[source,js]
--------------------------------------------------
@ -247,17 +238,19 @@ Then the resulting permission would be equal to:
==== Document Level Security
Document level security restricts the documents that users have read access to.
To enable document level security, you specify a query that matches all the
To enable document level security, specify a query that matches all the
accessible documents as part of the indices permissions within a role definition.
This binds document level security to a well defined set of indices.
Document level security is thus bound to a well defined set of indices.
Enabling document level security restricts which documents can be accessed from any document based read API.
To enable document level security, you use a query to specify the documents that each role can access in the `roles.yml` file.
You specify the document query with the `query` option. The document query is associated with a particular index or index pattern and
operates in conjunction with the privileges specified for the indices.
Enabling document level security restricts which documents can be accessed from
any document-based read API. To enable document level security, you use a query
to specify the documents that each role can access in the `roles.yml` file.
You specify the document query with the `query` option. The document query is
associated with a particular index or index pattern and operates in conjunction
with the privileges specified for the indices.
The following role definition grants read access only to documents that
belong to the `click` category within all the `events-*` indices.
belong to the `click` category within all the `events-*` indices:
[source,js]
--------------------------------------------------
@ -276,10 +269,10 @@ NOTE: Omitting the `query` entry entirely disables document level security for
the respective indices permission entry.
The specified `query` expects the same format as if it was defined in the
search request and supports ELasticsearch's full {ref}/query-dsl.html[Query DSL].
search request and supports the full {es} {ref}/query-dsl.html[Query DSL].
For example, the following role grants read access to all indices, but restricts
access to documents whose `department_id` equals `12`.
For example, the following role grants read access only to the documents whose
`department_id` equals `12`:
[source,js]
--------------------------------------------------
@ -296,16 +289,16 @@ access to documents whose `department_id` equals `12`.
}
--------------------------------------------------
NOTE: `query` also accepts queries written as string values
NOTE: `query` also accepts queries written as string values.
[[templating-role-query]]
===== Templating a Role Query
You can use Mustache templates in a role query to insert the username of the
current authenticated user into the role. Like other places in Elasticsearch
that support templating or scripting, you can specify inline, stored,
or file based templates and define custom parameters. You access the current
authenticated user's details through the `_user` parameter.
current authenticated user into the role. Like other places in {es} that support
templating or scripting, you can specify inline, stored, or file-based templates
and define custom parameters. You access the details for the current
authenticated user through the `_user` parameter.
For example, the following role query uses a template to insert the username
of the current authenticated user:
@ -367,21 +360,24 @@ based on the `group.id` field in your documents:
[[set-security-user-processor]]
===== Set Security User Ingest Processor
If an index is being shared by many small users it makes sense put all these users into the same index as having a
dedicated index or shard per user is too wasteful. In order to guarantee that a user only read its own documents it
makes sense to set up document level security. In order to use document level security for this each document must have
the username or role name associated with it, so that it can be queried by the document level security's role query.
This is where the `set_security_user` ingest processor can help.
If an index is shared by many small users it makes sense to put all these users
into the same index. Having a dedicated index or shard per user is wasteful.
To guarantee that a user reads only their own documents, it makes sense to set up
document level security. In this scenario, each document must have the username
or role name associated with it, so that this information can be used by the
role query for document level security. This is a situation where the
`set_security_user` ingest processor can help.
NOTE: You need to make sure to use unique ids for each user that uses the same index, because document level security
doesn't apply on write APIs and you can overwrite other users' documents. This ingest processor just adds
properties of the current authenticated user to the documents being indexed.
NOTE: Document level security doesn't apply to write APIs. You must use unique
ids for each user that uses the same index, otherwise they might overwrite other
users' documents. The ingest processor just adds properties for the current
authenticated user to the documents that are being indexed.
The `set_security_user` processor attaches user related details (`username`, `roles`, `email`, `full_name` and `metadata` )
from the current authenticated user to the current document by pre-processed by ingest.
So when indexing data with an ingest pipeline then user details get automatically attached with the document:
The `set_security_user` processor attaches user-related details (such as
`username`, `roles`, `email`, `full_name` and `metadata` ) from the current
authenticated user to the current document by pre-processing the ingest. When
you index data with an ingest pipeline, user details are automatically attached
to the document. For example:
[source,js]
--------------------------------------------------
@ -403,8 +399,8 @@ about setting up a pipeline and other processors.
| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`.
|======
Example config that adds all user details of the current authenticated user to the `user` field to all documents being
processed by this pipeline:
The following example adds all user details for the current authenticated user
to the `user` field for all documents that are processed by this pipeline:
[source,js]
--------------------------------------------------
@ -423,20 +419,30 @@ processed by this pipeline:
==== Multiple Roles with Document and Field Level Security
A user can have many roles and each role can define different permissions on the
same index. It is important to understand the behavior of Document and Field Level
security in this scenario.
same index. It is important to understand the behavior of document and field
level security in this scenario.
Document level security will take into account each role held by the user, and
combine each document level security query for a given index with an "OR". This
Document level security takes into account each role held by the user and
combines each document level security query for a given index with an "OR". This
means that only one of the role queries must match for a document to be returned.
For example, if a role grants access to an index without document level security
and another grants access with document level security, document level security
will not be applied; the user with both roles will have access to all of the
documents in the index.
is not applied; the user with both roles has access to all of the documents in
the index.
Field level security will take into account each role the user has and combine
Field level security takes into account each role the user has and combines
all of the fields listed into a single set for each index. For example, if a
role grants access to an index without field level security and another grants
access with field level security, field level security will not be applied for
that index; the user with both roles will have access to all of the fields in
in the index.
access with field level security, field level security is not be applied for
that index; the user with both roles has access to all of the fields in the
index.
For example, let's say `role_a` grants access to only the `address` field of the
documents in `index1`; it doesn't specify any document restrictions. Conversely,
`role_b` limits access to a subset of the documents in `index1`; it doesn't
specify any field restrictions. If you assign a user both roles, `role_a` gives
the user access to all documents and `role_b` gives the user access to all
fields.
If you need to restrict access to both documents and fields, consider splitting
documents by index instead.

View File

@ -131,10 +131,13 @@ information, see {xpack-ref}/setting-up-authentication.html[Setting Up Authentic
The type of the realm: `native, `ldap`, `active_directory`, `pki`, or `file`. Required.
`order`::
The priority of the realm within the realm chain. Defaults to `Integer.MAX_VALUE`.
The priority of the realm within the realm chain. Realms with a lower order are
consulted first. Although not required, use of this setting is strongly
recommended when you configure multiple realms. Defaults to `Integer.MAX_VALUE`.
`enabled`::
Enable/disable the realm. Defaults to `true`.
Indicates whether a realm is enabled. You can use this setting to disable a
realm without removing its configuration information. Defaults to `true`.
[[ref-users-settings]]
@ -589,13 +592,14 @@ the in-memory cached user credentials (see {xpack-ref}/controlling-user-cache.ht
`username_pattern`::
The regular expression pattern used to extract the username from the
certificate DN. The first match group is the used as the username.
Defaults to `CN=(.*?)(?:,\|$)`
Defaults to `CN=(.*?)(?:,\|$)`.
`certificate_authorities`::
List of PEM certificate files that should be used to authenticate a
user's certificate as trusted. Defaults to the trusted certificates configured for SSL.
See the {xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings] section of the PKI realm documentation for more information.
This setting may not be used with `truststore.path`.
List of paths to the PEM certificate files that should be used to authenticate a
user's certificate as trusted. Defaults to the trusted certificates configured
for SSL. See the {xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings]
section of the PKI realm documentation for more information.
This setting cannot be used with `truststore.path`.
`truststore.algorithm`::
Algorithm for the truststore. Defaults to `SunX509`.
@ -607,9 +611,11 @@ The password for the truststore. Must be provided if `truststore.path` is set.
The password for the truststore.
`truststore.path`::
The path of a truststore to use. Defaults to the trusted certificates configured for SSL.
See the {xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings] section of the PKI realm documentation for more information.
This setting may not be used with `certificate_authorities`.
The path of a truststore to use. Defaults to the trusted certificates configured
for SSL. See the
{xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings] section of the PKI realm
documentation for more information. This setting cannot be used with
`certificate_authorities`.
`files.role_mapping`::
Specifies the {xpack-ref}/security-files.html[location] of the

View File

@ -43,7 +43,6 @@ public abstract class XPackExtension implements SecurityExtension {
}
/**
<<<<<<< HEAD:plugin/core/src/main/java/org/elasticsearch/xpack/core/extensions/XPackExtension.java
* Returns authentication realm implementations added by this extension.
*
* The key of the returned {@link Map} is the type name of the realm, and the value
@ -77,8 +76,6 @@ public abstract class XPackExtension implements SecurityExtension {
}
/**
=======
>>>>>>> master:plugin/core/src/main/java/org/elasticsearch/xpack/extensions/XPackExtension.java
* Returns a list of settings that should be filtered from API calls. In most cases,
* these settings are sensitive such as passwords.
*

View File

@ -17,9 +17,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
public class UpdateCalendarJobAction extends Action<UpdateCalendarJobAction.Request, PutCalendarAction.Response,
UpdateCalendarJobAction.RequestBuilder> {
@ -43,28 +41,28 @@ public class UpdateCalendarJobAction extends Action<UpdateCalendarJobAction.Requ
public static class Request extends ActionRequest {
private String calendarId;
private Set<String> jobIdsToAdd;
private Set<String> jobIdsToRemove;
private String jobIdToAdd;
private String jobIdToRemove;
public Request() {
}
public Request(String calendarId, Set<String> jobIdsToAdd, Set<String> jobIdsToRemove) {
public Request(String calendarId, String jobIdToAdd, String jobIdToRemove) {
this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName());
this.jobIdsToAdd = ExceptionsHelper.requireNonNull(jobIdsToAdd, "job_ids_to_add");
this.jobIdsToRemove = ExceptionsHelper.requireNonNull(jobIdsToRemove, "job_ids_to_remove");
this.jobIdToAdd = jobIdToAdd;
this.jobIdToRemove = jobIdToRemove;
}
public String getCalendarId() {
return calendarId;
}
public Set<String> getJobIdsToAdd() {
return jobIdsToAdd;
public String getJobIdToAdd() {
return jobIdToAdd;
}
public Set<String> getJobIdsToRemove() {
return jobIdsToRemove;
public String getJobIdToRemove() {
return jobIdToRemove;
}
@Override
@ -76,21 +74,21 @@ public class UpdateCalendarJobAction extends Action<UpdateCalendarJobAction.Requ
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
calendarId = in.readString();
jobIdsToAdd = new HashSet<>(in.readList(StreamInput::readString));
jobIdsToRemove = new HashSet<>(in.readList(StreamInput::readString));
jobIdToAdd = in.readOptionalString();
jobIdToRemove = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(calendarId);
out.writeStringList(new ArrayList<>(jobIdsToAdd));
out.writeStringList(new ArrayList<>(jobIdsToRemove));
out.writeOptionalString(jobIdToAdd);
out.writeOptionalString(jobIdToRemove);
}
@Override
public int hashCode() {
return Objects.hash(calendarId, jobIdsToAdd, jobIdsToRemove);
return Objects.hash(calendarId, jobIdToAdd, jobIdToRemove);
}
@Override
@ -102,8 +100,8 @@ public class UpdateCalendarJobAction extends Action<UpdateCalendarJobAction.Requ
return false;
}
Request other = (Request) obj;
return Objects.equals(calendarId, other.calendarId) && Objects.equals(jobIdsToAdd, other.jobIdsToAdd)
&& Objects.equals(jobIdsToRemove, other.jobIdsToRemove);
return Objects.equals(calendarId, other.calendarId) && Objects.equals(jobIdToAdd, other.jobIdToAdd)
&& Objects.equals(jobIdToRemove, other.jobIdToRemove);
}
}

View File

@ -1,18 +1,4 @@
grant {
// needed because of problems in unbound LDAP library
permission java.util.PropertyPermission "*", "read,write";
// required to configure the custom mailcap for watcher
permission java.lang.RuntimePermission "setFactory";
// needed when sending emails for javax.activation
// otherwise a classnotfound exception is thrown due to trying
// to load the class with the application class loader
permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getClassLoader";
// TODO: remove use of this jar as soon as possible!!!!
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
// bouncy castle
permission java.security.SecurityPermission "putProviderProperty.BC";
@ -20,20 +6,10 @@ grant {
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "setPolicy";
permission java.util.PropertyPermission "*", "read,write";
// needed for multiple server implementations used in tests
permission java.net.SocketPermission "*", "accept,connect";
// needed for Windows named pipes in machine learning
permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
};
grant codeBase "${codebase.xmlsec-2.0.8.jar}" {
// needed during initialization of OpenSAML library where xml security algorithms are registered
// see https://github.com/apache/santuario-java/blob/e79f1fe4192de73a975bc7246aee58ed0703343d/src/main/java/org/apache/xml/security/utils/JavaUtils.java#L205-L220
// and https://git.shibboleth.net/view/?p=java-opensaml.git;a=blob;f=opensaml-xmlsec-impl/src/main/java/org/opensaml/xmlsec/signature/impl/SignatureMarshaller.java;hb=db0eaa64210f0e32d359cd6c57bedd57902bf811#l52
// which uses it in the opensaml-xmlsec-impl
permission java.security.SecurityPermission "org.apache.xml.security.register";
};
grant codeBase "${codebase.netty-common}" {

View File

@ -1,31 +1,6 @@
grant {
// needed because of problems in unbound LDAP library
permission java.util.PropertyPermission "*", "read,write";
// required to configure the custom mailcap for watcher
permission java.lang.RuntimePermission "setFactory";
// needed when sending emails for javax.activation
// otherwise a classnotfound exception is thrown due to trying
// to load the class with the application class loader
permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getClassLoader";
// TODO: remove use of this jar as soon as possible!!!!
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
// bouncy castle
permission java.security.SecurityPermission "putProviderProperty.BC";
// needed for x-pack security extension
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "setPolicy";
// needed for multiple server implementations used in tests
permission java.net.SocketPermission "*", "accept,connect";
// needed for Windows named pipes in machine learning
permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
};
grant codeBase "${codebase.netty-common}" {

View File

@ -1,31 +1,6 @@
grant {
// needed because of problems in unbound LDAP library
permission java.util.PropertyPermission "*", "read,write";
// required to configure the custom mailcap for watcher
permission java.lang.RuntimePermission "setFactory";
// needed when sending emails for javax.activation
// otherwise a classnotfound exception is thrown due to trying
// to load the class with the application class loader
permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getClassLoader";
// TODO: remove use of this jar as soon as possible!!!!
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
// bouncy castle
permission java.security.SecurityPermission "putProviderProperty.BC";
// needed for x-pack security extension
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "setPolicy";
// needed for multiple server implementations used in tests
permission java.net.SocketPermission "*", "accept,connect";
// needed for Windows named pipes in machine learning
permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
};
grant codeBase "${codebase.netty-common}" {

View File

@ -1,31 +1,6 @@
grant {
// needed because of problems in unbound LDAP library
permission java.util.PropertyPermission "*", "read,write";
// required to configure the custom mailcap for watcher
permission java.lang.RuntimePermission "setFactory";
// needed when sending emails for javax.activation
// otherwise a classnotfound exception is thrown due to trying
// to load the class with the application class loader
permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getClassLoader";
// TODO: remove use of this jar as soon as possible!!!!
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
// bouncy castle
permission java.security.SecurityPermission "putProviderProperty.BC";
// needed for x-pack security extension
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "setPolicy";
// needed for multiple server implementations used in tests
permission java.net.SocketPermission "*", "accept,connect";
// needed for Windows named pipes in machine learning
permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
};
grant codeBase "${codebase.netty-common}" {

View File

@ -38,12 +38,6 @@ class MlInitializationService extends AbstractComponent implements ClusterStateL
this.clusterService = clusterService;
this.client = client;
clusterService.addListener(this);
clusterService.addLifecycleListener(new LifecycleListener() {
@Override
public void beforeStop() {
super.beforeStop();
}
});
}
@Override

View File

@ -19,6 +19,9 @@ import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction;
import org.elasticsearch.xpack.ml.job.JobManager;
import org.elasticsearch.xpack.core.ml.job.persistence.JobProvider;
import java.util.HashSet;
import java.util.Set;
public class TransportUpdateCalendarJobAction extends HandledTransportAction<UpdateCalendarJobAction.Request, PutCalendarAction.Response> {
private final ClusterService clusterService;
@ -39,7 +42,17 @@ public class TransportUpdateCalendarJobAction extends HandledTransportAction<Upd
@Override
protected void doExecute(UpdateCalendarJobAction.Request request, ActionListener<PutCalendarAction.Response> listener) {
jobProvider.updateCalendar(request.getCalendarId(), request.getJobIdsToAdd(), request.getJobIdsToRemove(), clusterService.state(),
Set<String> jobIdsToAdd = new HashSet<>();
if (request.getJobIdToAdd() != null && request.getJobIdToAdd().isEmpty() == false) {
jobIdsToAdd.add(request.getJobIdToAdd());
}
Set<String> jobIdsToRemove = new HashSet<>();
if (request.getJobIdToRemove() != null && request.getJobIdToRemove().isEmpty() == false) {
jobIdsToRemove.add(request.getJobIdToRemove());
}
jobProvider.updateCalendar(request.getCalendarId(), jobIdsToAdd, jobIdsToRemove, clusterService.state(),
c -> {
jobManager.updateProcessOnCalendarChanged(c.getJobIds());
listener.onResponse(new PutCalendarAction.Response(c));

View File

@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ml.calendars.Calendar;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import java.io.IOException;
import java.util.Collections;
public class RestDeleteCalendarJobAction extends BaseRestHandler {
@ -38,7 +37,7 @@ public class RestDeleteCalendarJobAction extends BaseRestHandler {
String calendarId = restRequest.param(Calendar.ID.getPreferredName());
String jobId = restRequest.param(Job.ID.getPreferredName());
UpdateCalendarJobAction.Request request =
new UpdateCalendarJobAction.Request(calendarId, Collections.emptySet(), Collections.singleton(jobId));
new UpdateCalendarJobAction.Request(calendarId, null, jobId);
return channel -> client.execute(UpdateCalendarJobAction.INSTANCE, request, new RestToXContentListener<>(channel));
}
}

View File

@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ml.calendars.Calendar;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import java.io.IOException;
import java.util.Collections;
public class RestPutCalendarJobAction extends BaseRestHandler {
@ -38,7 +37,7 @@ public class RestPutCalendarJobAction extends BaseRestHandler {
String calendarId = restRequest.param(Calendar.ID.getPreferredName());
String jobId = restRequest.param(Job.ID.getPreferredName());
UpdateCalendarJobAction.Request putCalendarRequest =
new UpdateCalendarJobAction.Request(calendarId, Collections.singleton(jobId), Collections.emptySet());
new UpdateCalendarJobAction.Request(calendarId, jobId, null);
return channel -> client.execute(UpdateCalendarJobAction.INSTANCE, putCalendarRequest, new RestToXContentListener<>(channel));
}
}

View File

@ -1,26 +1,4 @@
grant {
// needed because of problems in unbound LDAP library
permission java.util.PropertyPermission "*", "read,write";
// required to configure the custom mailcap for watcher
permission java.lang.RuntimePermission "setFactory";
// needed when sending emails for javax.activation
// otherwise a classnotfound exception is thrown due to trying
// to load the class with the application class loader
permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getClassLoader";
// TODO: remove use of this jar as soon as possible!!!!
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
// bouncy castle
permission java.security.SecurityPermission "putProviderProperty.BC";
// needed for x-pack security extension
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "setPolicy";
// needed for multiple server implementations used in tests
permission java.net.SocketPermission "*", "accept,connect";

View File

@ -8,26 +8,14 @@ package org.elasticsearch.xpack.ml.action;
import org.elasticsearch.test.AbstractStreamableTestCase;
import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction;
import java.util.HashSet;
import java.util.Set;
public class UpdateCalendarJobActionResquestTests extends AbstractStreamableTestCase<UpdateCalendarJobAction.Request> {
@Override
protected UpdateCalendarJobAction.Request createTestInstance() {
int addSize = randomIntBetween(0, 2);
Set<String> toAdd = new HashSet<>();
for (int i=0; i<addSize; i++) {
toAdd.add(randomAlphaOfLength(10));
}
int removeSize = randomIntBetween(0, 2);
Set<String> toRemove = new HashSet<>();
for (int i=0; i<removeSize; i++) {
toRemove.add(randomAlphaOfLength(10));
}
return new UpdateCalendarJobAction.Request(randomAlphaOfLength(10), toAdd, toRemove);
return new UpdateCalendarJobAction.Request(randomAlphaOfLength(10),
randomBoolean() ? null : randomAlphaOfLength(10),
randomBoolean() ? null : randomAlphaOfLength(10));
}
@Override

View File

@ -171,24 +171,24 @@ public class Monitoring extends Plugin implements ActionPlugin {
@Override
public List<Setting<?>> getSettings() {
return Collections.unmodifiableList(
Arrays.asList(MonitoringField.HISTORY_DURATION,
CLEAN_WATCHER_HISTORY,
MonitoringService.INTERVAL,
Exporters.EXPORTERS_SETTINGS,
Collector.INDICES,
ClusterStatsCollector.CLUSTER_STATS_TIMEOUT,
IndexRecoveryCollector.INDEX_RECOVERY_TIMEOUT,
IndexRecoveryCollector.INDEX_RECOVERY_ACTIVE_ONLY,
IndexStatsCollector.INDEX_STATS_TIMEOUT,
JobStatsCollector.JOB_STATS_TIMEOUT,
NodeStatsCollector.NODE_STATS_TIMEOUT)
);
List<Setting<?>> settings = new ArrayList<>();
settings.add(MonitoringField.HISTORY_DURATION);
settings.add(CLEAN_WATCHER_HISTORY);
settings.add(MonitoringService.INTERVAL);
settings.add(Collector.INDICES);
settings.add(ClusterStatsCollector.CLUSTER_STATS_TIMEOUT);
settings.add(IndexRecoveryCollector.INDEX_RECOVERY_TIMEOUT);
settings.add(IndexRecoveryCollector.INDEX_RECOVERY_ACTIVE_ONLY);
settings.add(IndexStatsCollector.INDEX_STATS_TIMEOUT);
settings.add(JobStatsCollector.JOB_STATS_TIMEOUT);
settings.add(NodeStatsCollector.NODE_STATS_TIMEOUT);
settings.addAll(Exporters.getSettings());
return Collections.unmodifiableList(settings);
}
@Override
public List<String> getSettingsFilter() {
final String exportersKey = Exporters.EXPORTERS_SETTINGS.getKey();
final String exportersKey = "xpack.monitoring.exporters.";
return Collections.unmodifiableList(Arrays.asList(exportersKey + "*.auth.*", exportersKey + "*.ssl.*"));
}
}

View File

@ -16,7 +16,6 @@ import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Set;
@ -142,7 +141,8 @@ public class ClusterAlertsUtil {
* @throws SettingsException if an unknown cluster alert ID exists in the blacklist.
*/
public static List<String> getClusterAlertsBlacklist(final Exporter.Config config) {
final List<String> blacklist = config.settings().getAsList(CLUSTER_ALERTS_BLACKLIST_SETTING, Collections.emptyList());
final List<String> blacklist =
CLUSTER_ALERTS_BLACKLIST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
// validate the blacklist only contains recognized IDs
if (blacklist.isEmpty() == false) {
@ -151,9 +151,8 @@ public class ClusterAlertsUtil {
if (unknownIds.isEmpty() == false) {
throw new SettingsException(
"[" + Exporter.settingFQN(config, CLUSTER_ALERTS_BLACKLIST_SETTING) + "] contains unrecognized Cluster Alert IDs [" +
String.join(", ", unknownIds) + "]"
);
"[" + CLUSTER_ALERTS_BLACKLIST_SETTING.getConcreteSettingForNamespace(config.name()).getKey() +
"] contains unrecognized Cluster Alert IDs [" + String.join(", ", unknownIds) + "]");
}
}

View File

@ -6,36 +6,70 @@
package org.elasticsearch.xpack.monitoring.exporter;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.license.XPackLicenseState;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
public abstract class Exporter implements AutoCloseable {
private static final Setting.AffixSetting<Boolean> ENABLED_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","enabled",
(key) -> Setting.boolSetting(key, true, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> TYPE_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","type",
(key) -> Setting.simpleString(key, (v, s) -> {
switch (v) {
case "":
case "http":
case "local":
break;
default:
throw new IllegalArgumentException("only exporter types [http] and [local] are allowed [" + v +
"] is invalid");
}
}, Property.Dynamic, Property.NodeScope));
/**
* Every {@code Exporter} adds the ingest pipeline to bulk requests, but they should, at the exporter level, allow that to be disabled.
* <p>
* Note: disabling it obviously loses any benefit of using it, but it does allow clusters that don't run with ingest to not use it.
*/
public static final String USE_INGEST_PIPELINE_SETTING = "use_ingest";
public static final Setting.AffixSetting<Boolean> USE_INGEST_PIPELINE_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","use_ingest",
(key) -> Setting.boolSetting(key, true, Property.Dynamic, Property.NodeScope));
/**
* Every {@code Exporter} allows users to explicitly disable cluster alerts.
*/
public static final String CLUSTER_ALERTS_MANAGEMENT_SETTING = "cluster_alerts.management.enabled";
public static final Setting.AffixSetting<Boolean> CLUSTER_ALERTS_MANAGEMENT_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.", "cluster_alerts.management.enabled",
(key) -> Setting.boolSetting(key, true, Property.Dynamic, Property.NodeScope));
/**
* Every {@code Exporter} allows users to explicitly disable specific cluster alerts.
* <p>
* When cluster alerts management is enabled, this should delete anything blacklisted here in addition to not creating it.
*/
public static final String CLUSTER_ALERTS_BLACKLIST_SETTING = "cluster_alerts.management.blacklist";
public static final Setting.AffixSetting<List<String>> CLUSTER_ALERTS_BLACKLIST_SETTING = Setting
.affixKeySetting("xpack.monitoring.exporters.", "cluster_alerts.management.blacklist",
(key) -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.Dynamic, Property.NodeScope));
/**
* Every {@code Exporter} allows users to use a different index time format.
*/
public static final String INDEX_NAME_TIME_FORMAT_SETTING = "index.name.time_format";
private static final Setting.AffixSetting<String> INDEX_NAME_TIME_FORMAT_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","index.name.time_format",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final String INDEX_FORMAT = "YYYY.MM.dd";
protected final Config config;
@ -77,43 +111,39 @@ public abstract class Exporter implements AutoCloseable {
protected abstract void doClose();
protected static String settingFQN(final Config config) {
return Exporters.EXPORTERS_SETTINGS.getKey() + config.name;
}
public static String settingFQN(final Config config, final String setting) {
return Exporters.EXPORTERS_SETTINGS.getKey() + config.name + "." + setting;
}
protected static DateTimeFormatter dateTimeFormatter(final Config config) {
String format = config.settings().get(INDEX_NAME_TIME_FORMAT_SETTING, "YYYY.MM.dd");
Setting<String> setting = INDEX_NAME_TIME_FORMAT_SETTING.getConcreteSettingForNamespace(config.name);
String format = setting.exists(config.settings()) ? setting.get(config.settings()) : INDEX_FORMAT;
try {
return DateTimeFormat.forPattern(format).withZoneUTC();
} catch (IllegalArgumentException e) {
throw new SettingsException("[" + settingFQN(config, INDEX_NAME_TIME_FORMAT_SETTING)
+ "] invalid index name time format: [" + format + "]", e);
throw new SettingsException("[" + INDEX_NAME_TIME_FORMAT_SETTING.getKey() + "] invalid index name time format: ["
+ format + "]", e);
}
}
public static List<Setting.AffixSetting<?>> getSettings() {
return Arrays.asList(USE_INGEST_PIPELINE_SETTING, CLUSTER_ALERTS_MANAGEMENT_SETTING, TYPE_SETTING, ENABLED_SETTING,
INDEX_NAME_TIME_FORMAT_SETTING, CLUSTER_ALERTS_BLACKLIST_SETTING);
}
public static class Config {
private final String name;
private final String type;
private final boolean enabled;
private final Settings globalSettings;
private final Settings settings;
private final ClusterService clusterService;
private final XPackLicenseState licenseState;
public Config(String name, String type, Settings globalSettings, Settings settings,
public Config(String name, String type, Settings settings,
ClusterService clusterService, XPackLicenseState licenseState) {
this.name = name;
this.type = type;
this.globalSettings = globalSettings;
this.settings = settings;
this.clusterService = clusterService;
this.licenseState = licenseState;
this.enabled = settings.getAsBoolean("enabled", true);
this.enabled = ENABLED_SETTING.getConcreteSettingForNamespace(name).get(settings);
}
public String name() {
@ -128,10 +158,6 @@ public abstract class Exporter implements AutoCloseable {
return enabled;
}
public Settings globalSettings() {
return globalSettings;
}
public Settings settings() {
return settings;
}

View File

@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.xpack.monitoring.exporter.http.HttpExporter;
import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc;
import org.elasticsearch.xpack.monitoring.exporter.local.LocalExporter;
@ -33,16 +34,9 @@ import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.common.settings.Setting.groupSetting;
public class Exporters extends AbstractLifecycleComponent implements Iterable<Exporter> {
/**
* Settings/Options per configured exporter
*/
public static final Setting<Settings> EXPORTERS_SETTINGS =
groupSetting("xpack.monitoring.exporters.", Setting.Property.Dynamic, Setting.Property.NodeScope);
private final Map<String, Exporter.Factory> factories;
private final AtomicReference<Map<String, Exporter>> exporters;
private final ClusterService clusterService;
@ -60,14 +54,15 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
this.clusterService = Objects.requireNonNull(clusterService);
this.licenseState = Objects.requireNonNull(licenseState);
clusterService.getClusterSettings().addSettingsUpdateConsumer(EXPORTERS_SETTINGS, this::setExportersSetting);
clusterService.getClusterSettings().addSettingsUpdateConsumer(this::setExportersSetting, getSettings());
// this ensures, that logging is happening by adding an empty consumer per affix setting
for (Setting.AffixSetting<?> affixSetting : getSettings()) {
clusterService.getClusterSettings().addAffixUpdateConsumer(affixSetting, (s, o) -> {}, (s, o) -> {});
}
}
private void setExportersSetting(Settings exportersSetting) {
if (this.lifecycleState() == Lifecycle.State.STARTED) {
if (exportersSetting.names().isEmpty()) {
return;
}
if (this.lifecycle.started()) {
Map<String, Exporter> updated = initExporters(exportersSetting);
closeExporters(logger, this.exporters.getAndSet(updated));
}
@ -75,7 +70,7 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
@Override
protected void doStart() {
exporters.set(initExporters(EXPORTERS_SETTINGS.get(settings)));
exporters.set(initExporters(settings));
}
@Override
@ -129,10 +124,11 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
return bulks.isEmpty() ? null : new ExportBulk.Compound(bulks, threadContext);
}
Map<String, Exporter> initExporters(Settings exportersSettings) {
Map<String, Exporter> initExporters(Settings settings) {
Set<String> singletons = new HashSet<>();
Map<String, Exporter> exporters = new HashMap<>();
boolean hasDisabled = false;
Settings exportersSettings = settings.getByPrefix("xpack.monitoring.exporters.");
for (String name : exportersSettings.names()) {
Settings exporterSettings = exportersSettings.getAsSettings(name);
String type = exporterSettings.get("type");
@ -143,7 +139,7 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
if (factory == null) {
throw new SettingsException("unknown exporter type [" + type + "] set for exporter [" + name + "]");
}
Exporter.Config config = new Exporter.Config(name, type, settings, exporterSettings, clusterService, licenseState);
Exporter.Config config = new Exporter.Config(name, type, settings, clusterService, licenseState);
if (!config.enabled()) {
hasDisabled = true;
if (logger.isDebugEnabled()) {
@ -171,8 +167,7 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
//
if (exporters.isEmpty() && !hasDisabled) {
Exporter.Config config =
new Exporter.Config("default_" + LocalExporter.TYPE, LocalExporter.TYPE, settings, Settings.EMPTY,
clusterService, licenseState);
new Exporter.Config("default_" + LocalExporter.TYPE, LocalExporter.TYPE, settings, clusterService, licenseState);
exporters.put(config.name(), factories.get(LocalExporter.TYPE).create(config));
}
@ -210,4 +205,14 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
listener.onResponse(null);
}
}
/**
* Return all the settings of all the exporters, no matter if HTTP or Local
*/
public static List<Setting.AffixSetting<?>> getSettings() {
List<Setting.AffixSetting<?>> settings = new ArrayList<>();
settings.addAll(Exporter.getSettings());
settings.addAll(HttpExporter.getSettings());
return settings;
}
}

View File

@ -22,9 +22,12 @@ import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer.Scheme;
import org.elasticsearch.client.sniff.Sniffer;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.common.unit.TimeValue;
@ -41,12 +44,14 @@ import javax.net.ssl.SSLContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.function.Supplier;
/**
@ -71,45 +76,66 @@ public class HttpExporter extends Exporter {
/**
* A string array representing the Elasticsearch node(s) to communicate with over HTTP(S).
*/
public static final String HOST_SETTING = "host";
public static final Setting.AffixSetting<List<String>> HOST_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","host",
(key) -> Setting.listSetting(key, Collections.emptyList(), Function.identity(),
Property.Dynamic, Property.NodeScope));
/**
* Master timeout associated with bulk requests.
*/
public static final String BULK_TIMEOUT_SETTING = "bulk.timeout";
public static final Setting.AffixSetting<TimeValue> BULK_TIMEOUT_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","bulk.timeout",
(key) -> Setting.timeSetting(key, TimeValue.timeValueSeconds(10), Property.Dynamic, Property.NodeScope));
/**
* Timeout used for initiating a connection.
*/
public static final String CONNECTION_TIMEOUT_SETTING = "connection.timeout";
public static final Setting.AffixSetting<TimeValue> CONNECTION_TIMEOUT_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","connection.timeout",
(key) -> Setting.timeSetting(key, TimeValue.timeValueSeconds(6), Property.Dynamic, Property.NodeScope));
/**
* Timeout used for reading from the connection.
*/
public static final String CONNECTION_READ_TIMEOUT_SETTING = "connection.read_timeout";
public static final Setting.AffixSetting<TimeValue> CONNECTION_READ_TIMEOUT_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","connection.read_timeout",
(key) -> Setting.timeSetting(key, TimeValue.timeValueSeconds(60), Property.Dynamic, Property.NodeScope));
/**
* Username for basic auth.
*/
public static final String AUTH_USERNAME_SETTING = "auth.username";
public static final Setting.AffixSetting<String> AUTH_USERNAME_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","auth.username",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered));
/**
* Password for basic auth.
*/
public static final String AUTH_PASSWORD_SETTING = "auth.password";
public static final Setting.AffixSetting<String> AUTH_PASSWORD_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","auth.password",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered));
/**
* The SSL settings.
*
* @see SSLService
*/
public static final String SSL_SETTING = "ssl";
public static final Setting.AffixSetting<Settings> SSL_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","ssl",
(key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope, Property.Filtered));
/**
* Proxy setting to allow users to send requests to a remote cluster that requires a proxy base path.
*/
public static final String PROXY_BASE_PATH_SETTING = "proxy.base_path";
public static final Setting.AffixSetting<String> PROXY_BASE_PATH_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","proxy.base_path",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
/**
* A boolean setting to enable or disable sniffing for extra connections.
*/
public static final String SNIFF_ENABLED_SETTING = "sniff.enabled";
public static final Setting.AffixSetting<Boolean> SNIFF_ENABLED_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","sniff.enabled",
(key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope));
/**
* A parent setting to header key/value pairs, whose names are user defined.
*/
public static final String HEADERS_SETTING = "headers";
public static final Setting.AffixSetting<Settings> HEADERS_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","headers",
(key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope));
/**
* Blacklist of headers that the user is not allowed to set.
* <p>
@ -119,15 +145,21 @@ public class HttpExporter extends Exporter {
/**
* ES level timeout used when checking and writing templates (used to speed up tests)
*/
public static final String TEMPLATE_CHECK_TIMEOUT_SETTING = "index.template.master_timeout";
public static final Setting.AffixSetting<TimeValue> TEMPLATE_CHECK_TIMEOUT_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","index.template.master_timeout",
(key) -> Setting.timeSetting(key, TimeValue.timeValueSeconds(10), Property.Dynamic, Property.NodeScope));
/**
* A boolean setting to enable or disable whether to create placeholders for the old templates.
*/
public static final String TEMPLATE_CREATE_LEGACY_VERSIONS_SETTING = "index.template.create_legacy_templates";
public static final Setting.AffixSetting<Boolean> TEMPLATE_CREATE_LEGACY_VERSIONS_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","index.template.create_legacy_templates",
(key) -> Setting.boolSetting(key, true, Property.Dynamic, Property.NodeScope));
/**
* ES level timeout used when checking and writing pipelines (used to speed up tests)
*/
public static final String PIPELINE_CHECK_TIMEOUT_SETTING = "index.pipeline.master_timeout";
public static final Setting.AffixSetting<TimeValue> PIPELINE_CHECK_TIMEOUT_SETTING =
Setting.affixKeySetting("xpack.monitoring.exporters.","index.pipeline.master_timeout",
(key) -> Setting.timeSetting(key, TimeValue.timeValueSeconds(10), Property.Dynamic, Property.NodeScope));
/**
* Minimum supported version of the remote monitoring cluster (same major).
@ -234,14 +266,15 @@ public class HttpExporter extends Exporter {
*/
static RestClient createRestClient(final Config config, final SSLService sslService, final NodeFailureListener listener) {
final RestClientBuilder builder = RestClient.builder(createHosts(config)).setFailureListener(listener);
final String proxyBasePath = config.settings().get(PROXY_BASE_PATH_SETTING);
Setting<String> concreteSetting = PROXY_BASE_PATH_SETTING.getConcreteSettingForNamespace(config.name());
final String proxyBasePath = concreteSetting.get(config.settings());
// allow the user to configure proxies
if (proxyBasePath != null) {
if (Strings.isNullOrEmpty(proxyBasePath) == false) {
try {
builder.setPathPrefix(proxyBasePath);
} catch (final IllegalArgumentException e) {
throw new SettingsException("[" + settingFQN(config, "proxy.base_path") + "] is malformed [" + proxyBasePath + "]", e);
throw new SettingsException("[" + concreteSetting.getKey() + "] is malformed [" + proxyBasePath + "]", e);
}
}
@ -265,12 +298,12 @@ public class HttpExporter extends Exporter {
* @throws IndexOutOfBoundsException if no {@linkplain #HOST_SETTING hosts} are set
*/
static Sniffer createSniffer(final Config config, final RestClient client, final NodeFailureListener listener) {
final Settings settings = config.settings();
Sniffer sniffer = null;
// the sniffer is allowed to be ENABLED; it's disabled by default until we think it's ready for use
if (settings.getAsBoolean(SNIFF_ENABLED_SETTING, false)) {
final List<String> hosts = config.settings().getAsList(HOST_SETTING);
boolean sniffingEnabled = SNIFF_ENABLED_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
if (sniffingEnabled) {
final List<String> hosts = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
// createHosts(config) ensures that all schemes are the same for all hosts!
final Scheme scheme = hosts.get(0).startsWith("https") ? Scheme.HTTPS : Scheme.HTTP;
final ElasticsearchHostsSniffer hostsSniffer =
@ -281,7 +314,7 @@ public class HttpExporter extends Exporter {
// inform the sniffer whenever there's a node failure
listener.setSniffer(sniffer);
logger.debug("[" + settingFQN(config) + "] using host sniffing");
logger.debug("exporter [{}] using host sniffing", config.name());
}
return sniffer;
@ -294,7 +327,7 @@ public class HttpExporter extends Exporter {
* @return Never {@code null}.
*/
static MultiHttpResource createResources(final Config config) {
final String resourceOwnerName = settingFQN(config);
final String resourceOwnerName = "xpack.monitoring.exporters." + config.name();
// order controls the order that each is checked; more direct checks should always happen first (e.g., version checks)
final List<HttpResource> resources = new ArrayList<>();
@ -319,10 +352,11 @@ public class HttpExporter extends Exporter {
* @throws SettingsException if any setting is malformed or if no host is set
*/
private static HttpHost[] createHosts(final Config config) {
final List<String> hosts = config.settings().getAsList(HOST_SETTING);
final List<String> hosts = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());;
String configKey = HOST_SETTING.getConcreteSettingForNamespace(config.name()).getKey();
if (hosts.isEmpty()) {
throw new SettingsException("missing required setting [" + settingFQN(config, HOST_SETTING) + "]");
throw new SettingsException("missing required setting [" + configKey + "]");
}
final List<HttpHost> httpHosts = new ArrayList<>(hosts.size());
@ -336,7 +370,7 @@ public class HttpExporter extends Exporter {
try {
httpHost = HttpHostBuilder.builder(host).build();
} catch (IllegalArgumentException e) {
throw new SettingsException("[" + settingFQN(config, HOST_SETTING) + "] invalid host: [" + host + "]", e);
throw new SettingsException("[" + configKey + "] invalid host: [" + host + "]", e);
}
if ("http".equals(httpHost.getSchemeName())) {
@ -347,16 +381,13 @@ public class HttpExporter extends Exporter {
// fail if we find them configuring the scheme/protocol in different ways
if (httpHostFound && httpsHostFound) {
throw new SettingsException(
"[" + settingFQN(config, HOST_SETTING) + "] must use a consistent scheme: http or https");
throw new SettingsException("[" + configKey + "] must use a consistent scheme: http or https");
}
httpHosts.add(httpHost);
}
if (logger.isDebugEnabled()) {
logger.debug("[{}] using hosts {}", settingFQN(config), hosts);
}
logger.debug("exporter [{}] using hosts {}", config.name(), hosts);
return httpHosts.toArray(new HttpHost[httpHosts.size()]);
}
@ -369,7 +400,8 @@ public class HttpExporter extends Exporter {
* @throws SettingsException if any header is {@linkplain #BLACKLISTED_HEADERS blacklisted}
*/
private static void configureHeaders(final RestClientBuilder builder, final Config config) {
final Settings headerSettings = config.settings().getAsSettings(HEADERS_SETTING);
Setting<Settings> concreteSetting = HEADERS_SETTING.getConcreteSettingForNamespace(config.name());
final Settings headerSettings = concreteSetting.get(config.settings());
final Set<String> names = headerSettings.names();
// Most users won't define headers
@ -382,14 +414,13 @@ public class HttpExporter extends Exporter {
// record and validate each header as best we can
for (final String name : names) {
if (BLACKLISTED_HEADERS.contains(name)) {
throw new SettingsException("[" + name + "] cannot be overwritten via [" + settingFQN(config, "headers") + "]");
throw new SettingsException("header cannot be overwritten via [" + concreteSetting.getKey() + name + "]");
}
final List<String> values = headerSettings.getAsList(name);
if (values.isEmpty()) {
final String settingName = settingFQN(config, "headers." + name);
throw new SettingsException("headers must have values, missing for setting [" + settingName + "]");
throw new SettingsException("headers must have values, missing for setting [" + concreteSetting.getKey() + name + "]");
}
// add each value as a separate header; they literally appear like:
@ -414,16 +445,19 @@ public class HttpExporter extends Exporter {
* @throws SettingsException if any setting causes issues
*/
private static void configureSecurity(final RestClientBuilder builder, final Config config, final SSLService sslService) {
final Settings sslSettings = config.settings().getAsSettings(SSL_SETTING);
final Settings sslSettings = SSL_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
final SSLIOSessionStrategy sslStrategy = sslService.sslIOSessionStrategy(sslSettings);
final CredentialsProvider credentialsProvider = createCredentialsProvider(config);
List<String> hostList = config.settings().getAsList(HOST_SETTING);
List<String> hostList = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());;
// sending credentials in plaintext!
if (credentialsProvider != null && hostList.stream().findFirst().orElse("").startsWith("https") == false) {
logger.warn("[" + settingFQN(config) + "] is not using https, but using user authentication with plaintext username/password!");
logger.warn("exporter [{}] is not using https, but using user authentication with plaintext " +
"username/password!", config.name());
}
builder.setHttpClientConfigCallback(new SecurityHttpClientConfigCallback(sslStrategy, credentialsProvider));
if (sslStrategy != null) {
builder.setHttpClientConfigCallback(new SecurityHttpClientConfigCallback(sslStrategy, credentialsProvider));
}
}
/**
@ -433,10 +467,10 @@ public class HttpExporter extends Exporter {
* @param config The exporter's configuration
*/
private static void configureTimeouts(final RestClientBuilder builder, final Config config) {
final Settings settings = config.settings();
final TimeValue connectTimeout = settings.getAsTime(CONNECTION_TIMEOUT_SETTING, TimeValue.timeValueMillis(6000));
final TimeValue socketTimeout = settings.getAsTime(CONNECTION_READ_TIMEOUT_SETTING,
TimeValue.timeValueMillis(connectTimeout.millis() * 10));
final TimeValue connectTimeout =
CONNECTION_TIMEOUT_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
final TimeValue socketTimeout =
CONNECTION_READ_TIMEOUT_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
// if the values could ever be null, then we should only set it if they're not null
builder.setRequestConfigCallback(new TimeoutRequestConfigCallback(connectTimeout, socketTimeout));
@ -452,15 +486,15 @@ public class HttpExporter extends Exporter {
*/
@Nullable
private static CredentialsProvider createCredentialsProvider(final Config config) {
final Settings settings = config.settings();
final String username = settings.get(AUTH_USERNAME_SETTING);
final String password = settings.get(AUTH_PASSWORD_SETTING);
final String username = AUTH_USERNAME_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
final String password = AUTH_PASSWORD_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
// username is required for any auth
if (username == null) {
if (password != null) {
if (Strings.isNullOrEmpty(username)) {
if (Strings.isNullOrEmpty(password) == false) {
throw new SettingsException(
"[" + settingFQN(config, AUTH_PASSWORD_SETTING) + "] without [" + settingFQN(config, AUTH_USERNAME_SETTING) + "]");
"[" + AUTH_PASSWORD_SETTING.getConcreteSettingForNamespace(config.name()).getKey() + "] without [" +
AUTH_USERNAME_SETTING.getConcreteSettingForNamespace(config.name()).getKey() + "]");
}
// nothing to configure; default situation for most users
return null;
@ -479,8 +513,7 @@ public class HttpExporter extends Exporter {
* @return Never {@code null}. Can be empty.
*/
static Map<String, String> createDefaultParams(final Config config) {
final Settings settings = config.settings();
final TimeValue bulkTimeout = settings.getAsTime(BULK_TIMEOUT_SETTING, null);
final TimeValue bulkTimeout = BULK_TIMEOUT_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
final MapBuilder<String, String> params = new MapBuilder<>();
@ -489,7 +522,7 @@ public class HttpExporter extends Exporter {
}
// allow the use of ingest pipelines to be completely optional
if (settings.getAsBoolean(USE_INGEST_PIPELINE_SETTING, true)) {
if (USE_INGEST_PIPELINE_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings())) {
params.put("pipeline", MonitoringTemplateUtils.pipelineName(MonitoringTemplateUtils.TEMPLATE_VERSION));
}
@ -509,8 +542,8 @@ public class HttpExporter extends Exporter {
private static void configureTemplateResources(final Config config,
final String resourceOwnerName,
final List<HttpResource> resources) {
final Settings settings = config.settings();
final TimeValue templateTimeout = settings.getAsTime(TEMPLATE_CHECK_TIMEOUT_SETTING, null);
final TimeValue templateTimeout =
TEMPLATE_CHECK_TIMEOUT_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
// add templates not managed by resolvers
for (final String templateId : MonitoringTemplateUtils.TEMPLATE_IDS) {
@ -521,7 +554,9 @@ public class HttpExporter extends Exporter {
}
// add old templates, like ".monitoring-data-2" and ".monitoring-es-2" so that other versions can continue to work
if (settings.getAsBoolean(TEMPLATE_CREATE_LEGACY_VERSIONS_SETTING, true)) {
boolean createLegacyTemplates =
TEMPLATE_CREATE_LEGACY_VERSIONS_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
if (createLegacyTemplates) {
for (final String templateId : MonitoringTemplateUtils.OLD_TEMPLATE_IDS) {
final String templateName = MonitoringTemplateUtils.oldTemplateName(templateId);
final Supplier<String> templateLoader = () -> MonitoringTemplateUtils.createEmptyTemplate(templateId);
@ -540,11 +575,10 @@ public class HttpExporter extends Exporter {
*/
private static void configurePipelineResources(final Config config, final String resourceOwnerName,
final List<HttpResource> resources) {
final Settings settings = config.settings();
// don't require pipelines if we're not using them
if (settings.getAsBoolean(USE_INGEST_PIPELINE_SETTING, true)) {
final TimeValue pipelineTimeout = settings.getAsTime(PIPELINE_CHECK_TIMEOUT_SETTING, null);
if (USE_INGEST_PIPELINE_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings())) {
final TimeValue pipelineTimeout =
PIPELINE_CHECK_TIMEOUT_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
// add all pipelines
for (final String pipelineId : MonitoringTemplateUtils.PIPELINE_IDS) {
@ -567,10 +601,8 @@ public class HttpExporter extends Exporter {
*/
private static void configureClusterAlertsResources(final Config config, final String resourceOwnerName,
final List<HttpResource> resources) {
final Settings settings = config.settings();
// don't create watches if we're not using them
if (settings.getAsBoolean(CLUSTER_ALERTS_MANAGEMENT_SETTING, true)) {
if (CLUSTER_ALERTS_MANAGEMENT_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings())) {
final ClusterService clusterService = config.clusterService();
final List<HttpResource> watchResources = new ArrayList<>();
final List<String> blacklist = ClusterAlertsUtil.getClusterAlertsBlacklist(config);
@ -612,7 +644,8 @@ public class HttpExporter extends Exporter {
public HttpExportBulk openBulk() {
// block until all resources are verified to exist
if (isExporterReady()) {
return new HttpExportBulk(settingFQN(config), client, defaultParams, dateTimeFormatter, threadContext);
String name = "xpack.monitoring.exporters." + config.name();
return new HttpExportBulk(name, client, defaultParams, dateTimeFormatter, threadContext);
}
return null;
@ -634,4 +667,10 @@ public class HttpExporter extends Exporter {
}
}
}
public static List<Setting.AffixSetting<?>> getSettings() {
return Arrays.asList(HOST_SETTING, TEMPLATE_CREATE_LEGACY_VERSIONS_SETTING, AUTH_PASSWORD_SETTING, AUTH_USERNAME_SETTING,
BULK_TIMEOUT_SETTING, CONNECTION_READ_TIMEOUT_SETTING, CONNECTION_TIMEOUT_SETTING, PIPELINE_CHECK_TIMEOUT_SETTING,
PROXY_BASE_PATH_SETTING, SNIFF_ENABLED_SETTING, TEMPLATE_CHECK_TIMEOUT_SETTING, SSL_SETTING);
}
}

View File

@ -105,7 +105,7 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
this.client = client;
this.clusterService = config.clusterService();
this.licenseState = config.licenseState();
this.useIngest = config.settings().getAsBoolean(USE_INGEST_PIPELINE_SETTING, true);
this.useIngest = USE_INGEST_PIPELINE_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
this.clusterAlertBlacklist = ClusterAlertsUtil.getClusterAlertsBlacklist(config);
this.cleanerService = cleanerService;
this.dateTimeFormatter = dateTimeFormatter(config);
@ -478,8 +478,8 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
* @return {@code true} to use Cluster Alerts.
*/
private boolean canUseWatcher() {
return XPackSettings.WATCHER_ENABLED.get(config.globalSettings()) &&
config.settings().getAsBoolean(CLUSTER_ALERTS_MANAGEMENT_SETTING, true);
return XPackSettings.WATCHER_ENABLED.get(config.settings()) &&
CLUSTER_ALERTS_MANAGEMENT_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
}
@Override

View File

@ -13,19 +13,8 @@ grant {
// TODO: remove use of this jar as soon as possible!!!!
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
// bouncy castle
permission java.security.SecurityPermission "putProviderProperty.BC";
// needed for x-pack security extension
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "setPolicy";
// needed for multiple server implementations used in tests
permission java.net.SocketPermission "*", "accept,connect";
// needed for Windows named pipes in machine learning
permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
};
grant codeBase "${codebase.netty-common}" {

View File

@ -116,12 +116,12 @@ public class ClusterAlertsUtilTests extends ESTestCase {
private Exporter.Config createConfigWithBlacklist(final String name, final List<String> blacklist) {
final Settings settings = Settings.builder()
.putList(Exporter.CLUSTER_ALERTS_BLACKLIST_SETTING, blacklist)
.putList("xpack.monitoring.exporters." + name + ".cluster_alerts.management.blacklist", blacklist)
.build();
final ClusterService clusterService = mock(ClusterService.class);
final XPackLicenseState licenseState = mock(XPackLicenseState.class);
return new Exporter.Config(name, "fake", Settings.EMPTY, settings, clusterService, licenseState);
return new Exporter.Config(name, "local", settings, clusterService, licenseState);
}
}

View File

@ -10,6 +10,7 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
@ -22,6 +23,7 @@ import org.elasticsearch.xpack.core.monitoring.MonitoredSystem;
import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc;
import org.elasticsearch.xpack.monitoring.MonitoringService;
import org.elasticsearch.xpack.monitoring.cleaner.CleanerService;
import org.elasticsearch.xpack.monitoring.exporter.http.HttpExporter;
import org.elasticsearch.xpack.monitoring.exporter.local.LocalExporter;
import org.junit.Before;
@ -33,6 +35,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
@ -72,8 +75,9 @@ public class ExportersTests extends ESTestCase {
clusterService = mock(ClusterService.class);
// default state.version() will be 0, which is "valid"
state = mock(ClusterState.class);
clusterSettings = new ClusterSettings(Settings.EMPTY,
new HashSet<>(Arrays.asList(MonitoringService.INTERVAL, Exporters.EXPORTERS_SETTINGS)));
Set<Setting<?>> settingsSet = new HashSet<>(Exporters.getSettings());
settingsSet.add(MonitoringService.INTERVAL);
clusterSettings = new ClusterSettings(Settings.EMPTY, settingsSet);
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
when(clusterService.state()).thenReturn(state);
@ -94,23 +98,23 @@ public class ExportersTests extends ESTestCase {
}
public void testInitExportersSingle() throws Exception {
factories.put("_type", TestExporter::new);
factories.put("local", TestExporter::new);
Map<String, Exporter> internalExporters = exporters.initExporters(Settings.builder()
.put("_name.type", "_type")
.put("xpack.monitoring.exporters._name.type", "local")
.build());
assertThat(internalExporters, notNullValue());
assertThat(internalExporters.size(), is(1));
assertThat(internalExporters, hasKey("_name"));
assertThat(internalExporters.get("_name"), instanceOf(TestExporter.class));
assertThat(internalExporters.get("_name").config().type(), is("_type"));
assertThat(internalExporters.get("_name").config().type(), is("local"));
}
public void testInitExportersSingleDisabled() throws Exception {
factories.put("_type", TestExporter::new);
factories.put("local", TestExporter::new);
Map<String, Exporter> internalExporters = exporters.initExporters(Settings.builder()
.put("_name.type", "_type")
.put("_name.enabled", false)
.put("xpack.monitoring.exporters._name.type", "local")
.put("xpack.monitoring.exporters._name.enabled", false)
.build());
assertThat(internalExporters, notNullValue());
@ -120,32 +124,23 @@ public class ExportersTests extends ESTestCase {
}
public void testInitExportersSingleUnknownType() throws Exception {
try {
exporters.initExporters(Settings.builder()
.put("_name.type", "unknown_type")
.build());
fail("Expected SettingsException");
} catch (SettingsException e) {
assertThat(e.getMessage(), containsString("unknown exporter type [unknown_type]"));
}
SettingsException e = expectThrows(SettingsException.class, () -> exporters.initExporters(Settings.builder()
.put("xpack.monitoring.exporters._name.type", "unknown_type")
.build()));
assertThat(e.getMessage(), containsString("unknown exporter type [unknown_type]"));
}
public void testInitExportersSingleMissingExporterType() throws Exception {
try {
exporters.initExporters(Settings.builder()
.put("_name.foo", "bar")
.build());
fail("Expected SettingsException");
} catch (SettingsException e) {
assertThat(e.getMessage(), containsString("missing exporter type for [_name]"));
}
SettingsException e = expectThrows(SettingsException.class, () -> exporters.initExporters(
Settings.builder().put("xpack.monitoring.exporters._name.foo", "bar").build()));
assertThat(e.getMessage(), containsString("missing exporter type for [_name]"));
}
public void testInitExportersMultipleSameType() throws Exception {
factories.put("_type", TestExporter::new);
Map<String, Exporter> internalExporters = exporters.initExporters(Settings.builder()
.put("_name0.type", "_type")
.put("_name1.type", "_type")
.put("xpack.monitoring.exporters._name0.type", "_type")
.put("xpack.monitoring.exporters._name1.type", "_type")
.build());
assertThat(internalExporters, notNullValue());
@ -159,26 +154,27 @@ public class ExportersTests extends ESTestCase {
}
public void testInitExportersMultipleSameTypeSingletons() throws Exception {
factories.put("_type", TestSingletonExporter::new);
factories.put("local", TestSingletonExporter::new);
SettingsException e = expectThrows(SettingsException.class, () ->
exporters.initExporters(Settings.builder()
.put("_name0.type", "_type")
.put("_name1.type", "_type")
.put("xpack.monitoring.exporters._name0.type", "local")
.put("xpack.monitoring.exporters._name1.type", "local")
.build())
);
assertThat(e.getMessage(), containsString("multiple [_type] exporters are configured. there can only be one"));
assertThat(e.getMessage(), containsString("multiple [local] exporters are configured. there can only be one"));
}
public void testSettingsUpdate() throws Exception {
factories.put("_type", TestExporter::new);
factories.put("http", TestExporter::new);
factories.put("local", TestExporter::new);
final AtomicReference<Settings> settingsHolder = new AtomicReference<>();
Settings nodeSettings = Settings.builder()
.put("xpack.monitoring.exporters._name0.type", "_type")
.put("xpack.monitoring.exporters._name1.type", "_type")
.put("xpack.monitoring.exporters._name0.type", "local")
.put("xpack.monitoring.exporters._name1.type", "http")
.build();
clusterSettings = new ClusterSettings(nodeSettings, singleton(Exporters.EXPORTERS_SETTINGS));
clusterSettings = new ClusterSettings(nodeSettings, new HashSet<>(Exporters.getSettings()));
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
exporters = new Exporters(nodeSettings, factories, clusterService, licenseState, threadContext) {
@ -193,21 +189,22 @@ public class ExportersTests extends ESTestCase {
assertThat(settingsHolder.get(), notNullValue());
Settings settings = settingsHolder.get();
assertThat(settings.size(), is(2));
assertEquals(settings.get("_name0.type"), "_type");
assertEquals(settings.get("_name1.type"), "_type");
assertEquals(settings.get("xpack.monitoring.exporters._name0.type"), "local");
assertEquals(settings.get("xpack.monitoring.exporters._name1.type"), "http");
Settings update = Settings.builder()
.put("xpack.monitoring.exporters._name0.foo", "bar")
.put("xpack.monitoring.exporters._name1.foo", "bar")
.put("xpack.monitoring.exporters._name0.use_ingest", true)
.put("xpack.monitoring.exporters._name1.use_ingest", false)
.build();
clusterSettings.applySettings(update);
assertThat(settingsHolder.get(), notNullValue());
settings = settingsHolder.get();
logger.info(settings);
assertThat(settings.size(), is(4));
assertEquals(settings.get("_name0.type"), "_type");
assertEquals(settings.get("_name0.foo"), "bar");
assertEquals(settings.get("_name1.type"), "_type");
assertEquals(settings.get("_name1.foo"), "bar");
assertEquals(settings.get("xpack.monitoring.exporters._name0.type"), "local");
assertEquals(settings.get("xpack.monitoring.exporters._name0.use_ingest"), "true");
assertEquals(settings.get("xpack.monitoring.exporters._name1.type"), "http");
assertEquals(settings.get("xpack.monitoring.exporters._name1.use_ingest"), "false");
}
public void testExporterBlocksOnClusterState() {

View File

@ -115,6 +115,7 @@ public class HttpExporterIT extends MonitoringIntegTestCase {
.put(super.nodeSettings(nodeOrdinal))
.put(MonitoringService.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters._http.type", "http")
.put("xpack.monitoring.exporters._http.ssl.truststore.password", "foobar")
.put("xpack.monitoring.exporters._http.enabled", false)
.build();
}
@ -554,9 +555,7 @@ public class HttpExporterIT extends MonitoringIntegTestCase {
private HttpExporter createHttpExporter(final Settings settings) throws Exception {
final Exporter.Config config =
new Exporter.Config("_http", "http",
settings, settings.getAsSettings("xpack.monitoring.exporters._http"),
clusterService(), new XPackLicenseState());
new Exporter.Config("_http", "http", settings, clusterService(), new XPackLicenseState());
return new HttpExporter(config, new SSLService(settings, environment), new ThreadContext(settings));
}
@ -627,7 +626,7 @@ public class HttpExporterIT extends MonitoringIntegTestCase {
}
private String resourceVersionQueryString() {
return "filter_path=" + FILTER_PATH_RESOURCE_VERSION;
return "master_timeout=10s&filter_path=" + FILTER_PATH_RESOURCE_VERSION;
}
private String watcherCheckQueryString() {
@ -637,7 +636,7 @@ public class HttpExporterIT extends MonitoringIntegTestCase {
private String bulkQueryString() {
final String pipelineName = MonitoringTemplateUtils.pipelineName(TEMPLATE_VERSION);
return "pipeline=" + pipelineName + "&filter_path=" + "errors,items.*.error";
return "master_timeout=10s&pipeline=" + pipelineName + "&filter_path=" + "errors,items.*.error";
}
private void enqueueGetClusterVersionResponse(Version v) throws IOException {

View File

@ -34,7 +34,6 @@ import java.util.stream.Collectors;
import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.OLD_TEMPLATE_IDS;
import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.PIPELINE_IDS;
import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.TEMPLATE_IDS;
import static org.elasticsearch.xpack.monitoring.exporter.http.HttpExporter.TEMPLATE_CREATE_LEGACY_VERSIONS_SETTING;
import static org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.CheckResponse.DOES_NOT_EXIST;
import static org.elasticsearch.xpack.monitoring.exporter.http.PublishableHttpResource.CheckResponse.EXISTS;
import static org.hamcrest.Matchers.hasSize;
@ -73,11 +72,13 @@ public class HttpExporterResourceTests extends AbstractPublishableHttpResourceTe
private final List<String> pipelineNames = new ArrayList<>(EXPECTED_PIPELINES);
private final List<String> watchNames = new ArrayList<>(EXPECTED_WATCHES);
private final Settings exporterSettings = Settings.builder().put(TEMPLATE_CREATE_LEGACY_VERSIONS_SETTING, createOldTemplates).build();
private final Settings exporterSettings = Settings.builder()
.put("xpack.monitoring.exporters._http.index.template.create_legacy_templates", createOldTemplates)
.build();
private final MultiHttpResource resources =
HttpExporter.createResources(
new Exporter.Config("_http", "http", Settings.EMPTY, exporterSettings, clusterService, licenseState));
new Exporter.Config("_http", "http", exporterSettings, clusterService, licenseState));
@Before
public void setupResources() {
@ -557,7 +558,7 @@ public class HttpExporterResourceTests extends AbstractPublishableHttpResourceTe
final MultiHttpResource resources =
HttpExporter.createResources(
new Exporter.Config("_http", "http", Settings.EMPTY, exporterSettings, clusterService, licenseState));
new Exporter.Config("_http", "http", exporterSettings, clusterService, licenseState));
final int successfulGetTemplates = randomIntBetween(0, EXPECTED_TEMPLATES);
final int unsuccessfulGetTemplates = EXPECTED_TEMPLATES - successfulGetTemplates;

View File

@ -81,7 +81,7 @@ public class HttpExporterTests extends ESTestCase {
public void testExporterWithBlacklistedHeaders() {
final String blacklistedHeader = randomFrom(HttpExporter.BLACKLISTED_HEADERS);
final String expected = "[" + blacklistedHeader + "] cannot be overwritten via [xpack.monitoring.exporters._http.headers]";
final String expected = "header cannot be overwritten via [xpack.monitoring.exporters._http.headers." + blacklistedHeader + "]";
final Settings.Builder builder = Settings.builder()
.put("xpack.monitoring.exporters._http.type", HttpExporter.TYPE)
.put("xpack.monitoring.exporters._http.host", "http://localhost:9200")
@ -417,6 +417,8 @@ public class HttpExporterTests extends ESTestCase {
if (bulkTimeout != null) {
assertThat(parameters.remove("master_timeout"), equalTo(bulkTimeout.toString()));
} else {
assertThat(parameters.remove("master_timeout"), equalTo("10s"));
}
if (useIngest) {
@ -504,7 +506,7 @@ public class HttpExporterTests extends ESTestCase {
* @return Never {@code null}.
*/
private Config createConfig(final Settings settings) {
return new Config("_http", HttpExporter.TYPE, settings, settings.getAsSettings(exporterName()), clusterService, licenseState);
return new Config("_http", HttpExporter.TYPE, settings, clusterService, licenseState);
}
private static String exporterName() {

View File

@ -18,8 +18,6 @@ import org.elasticsearch.xpack.monitoring.test.MonitoringIntegTestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import static org.elasticsearch.xpack.monitoring.exporter.Exporter.CLUSTER_ALERTS_MANAGEMENT_SETTING;
/**
* {@code LocalExporterIntegTestCase} offers a basis for integration tests for the {@link LocalExporter}.
*/
@ -50,7 +48,7 @@ public abstract class LocalExporterIntegTestCase extends MonitoringIntegTestCase
.put(MonitoringService.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.exporters." + exporterName + ".type", LocalExporter.TYPE)
.put("xpack.monitoring.exporters." + exporterName + ".enabled", false)
.put("xpack.monitoring.exporters." + exporterName + "." + CLUSTER_ALERTS_MANAGEMENT_SETTING, false)
.put("xpack.monitoring.exporters." + exporterName + ".cluster_alerts.management.enabled", false)
.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false)
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
.build();
@ -78,10 +76,7 @@ public abstract class LocalExporterIntegTestCase extends MonitoringIntegTestCase
protected LocalExporter createLocalExporter() {
final Settings settings = localExporterSettings();
final XPackLicenseState licenseState = new XPackLicenseState();
final Exporter.Config config =
new Exporter.Config(exporterName, "local",
settings, settings.getAsSettings("xpack.monitoring.exporters." + exporterName),
clusterService(), licenseState);
final Exporter.Config config = new Exporter.Config(exporterName, "local", settings, clusterService(), licenseState);
final CleanerService cleanerService =
new CleanerService(settings, clusterService().getClusterSettings(), THREADPOOL, licenseState);

View File

@ -71,6 +71,7 @@ public class LocalExporterIntegTests extends LocalExporterIntegTestCase {
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
Settings.builder().putNull(MonitoringService.INTERVAL.getKey())
.putNull("xpack.monitoring.exporters._local.enabled")
.putNull("xpack.monitoring.exporters._local.cluster_alerts.management.enabled")
.putNull("xpack.monitoring.exporters._local.index.name.time_format")));
}
@ -87,7 +88,8 @@ public class LocalExporterIntegTests extends LocalExporterIntegTestCase {
}
Settings.Builder exporterSettings = Settings.builder()
.put("xpack.monitoring.exporters._local.enabled", true);
.put("xpack.monitoring.exporters._local.enabled", true)
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", false);
if (indexTimeFormat != null) {
exporterSettings.put("xpack.monitoring.exporters._local.index.name.time_format", indexTimeFormat);

View File

@ -11,12 +11,15 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.ingest.PipelineConfiguration;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.xpack.core.monitoring.MonitoredSystem;
import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils;
import org.elasticsearch.xpack.monitoring.exporter.ClusterAlertsUtil;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST;

View File

@ -44,6 +44,7 @@ import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportMessage;
import org.elasticsearch.xpack.core.XPackClientPlugin;
import org.elasticsearch.xpack.core.XPackPlugin;
import org.elasticsearch.xpack.core.security.authc.AuthenticationToken;
import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField;
@ -961,7 +962,7 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail {
// method for testing to allow different plugins such as mock transport...
List<Class<? extends Plugin>> remoteTransportClientPlugins() {
return Arrays.asList(Security.class, XPackPlugin.class);
return Arrays.asList(XPackClientPlugin.class);
}
public static void registerSettings(List<Setting<?>> settings) {

View File

@ -9,6 +9,7 @@ import net.shibboleth.utilities.java.support.component.ComponentInitializationEx
import net.shibboleth.utilities.java.support.resolver.CriteriaSet;
import net.shibboleth.utilities.java.support.resolver.ResolverException;
import net.shibboleth.utilities.java.support.xml.BasicParserPool;
import org.apache.http.client.HttpClient;
import org.apache.http.conn.ssl.DefaultHostnameVerifier;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
@ -457,7 +458,7 @@ public final class SamlRealm extends Realm implements Releasable {
SSLConnectionSocketFactory factory = new SSLConnectionSocketFactory(sslService.sslSocketFactory(sslSettings), verifier);
builder.setSSLSocketFactory(factory);
HTTPMetadataResolver resolver = new HTTPMetadataResolver(builder.build(), metadataUrl);
HTTPMetadataResolver resolver = new PrivilegedHTTPMetadataResolver(builder.build(), metadataUrl);
TimeValue refresh = IDP_METADATA_HTTP_REFRESH.get(config.settings());
resolver.setMinRefreshDelay(refresh.millis());
resolver.setMaxRefreshDelay(refresh.millis());
@ -476,6 +477,24 @@ public final class SamlRealm extends Realm implements Releasable {
});
}
private static final class PrivilegedHTTPMetadataResolver extends HTTPMetadataResolver {
PrivilegedHTTPMetadataResolver(final HttpClient client, final String metadataURL) throws ResolverException {
super(client, metadataURL);
}
@Override
protected byte[] fetchMetadata() throws ResolverException {
try {
return AccessController.doPrivileged(
(PrivilegedExceptionAction<byte[]>) () -> PrivilegedHTTPMetadataResolver.super.fetchMetadata());
} catch (final PrivilegedActionException e) {
throw (ResolverException) e.getCause();
}
}
}
@SuppressForbidden(reason = "uses toFile")
private static Tuple<AbstractReloadingMetadataResolver, Supplier<EntityDescriptor>> parseFileSystemMetadata(
Logger logger, String metadataPath, RealmConfig config, ResourceWatcherService watcherService)

View File

@ -1,31 +1,14 @@
grant {
permission java.lang.RuntimePermission "setFactory";
// needed because of problems in unbound LDAP library
permission java.util.PropertyPermission "*", "read,write";
// required to configure the custom mailcap for watcher
permission java.lang.RuntimePermission "setFactory";
// needed when sending emails for javax.activation
// otherwise a classnotfound exception is thrown due to trying
// to load the class with the application class loader
// needed because of SAML (cf. o.e.x.s.s.RestorableContextClassLoader)
permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getClassLoader";
// TODO: remove use of this jar as soon as possible!!!!
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
// bouncy castle
permission java.security.SecurityPermission "putProviderProperty.BC";
// needed for x-pack security extension
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "setPolicy";
// needed for multiple server implementations used in tests
permission java.net.SocketPermission "*", "accept,connect";
// needed for Windows named pipes in machine learning
permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
};
grant codeBase "${codebase.xmlsec-2.0.8.jar}" {

View File

@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.xpack.core.XPackPlugin;
import org.elasticsearch.xpack.core.XPackSettings;
import org.elasticsearch.xpack.security.LocalStateSecurity;
import org.elasticsearch.xpack.security.Security;
import org.hamcrest.Matcher;
@ -38,7 +39,7 @@ public class SettingsFilterTests extends ESTestCase {
private Map<String, Matcher> settingsMatcherMap = new HashMap<>();
private MockSecureSettings mockSecureSettings = new MockSecureSettings();
public void testFiltering() throws OperatorCreationException, GeneralSecurityException, DestroyFailedException, IOException {
public void testFiltering() throws Exception {
configureUnfilteredSetting("xpack.security.authc.realms.file.type", "file");
// ldap realm filtering
@ -103,17 +104,15 @@ public class SettingsFilterTests extends ESTestCase {
.setSecureSettings(mockSecureSettings)
.build();
XPackPlugin xPackPlugin = new XPackPlugin(settings, null);
Security securityPlugin = new Security(settings, null);
LocalStateSecurity securityPlugin = new LocalStateSecurity(settings, null);
List<Setting<?>> settingList = new ArrayList<>();
settingList.add(Setting.simpleString("foo.bar", Setting.Property.NodeScope));
settingList.add(Setting.simpleString("foo.baz", Setting.Property.NodeScope));
settingList.add(Setting.simpleString("bar.baz", Setting.Property.NodeScope));
settingList.add(Setting.simpleString("baz.foo", Setting.Property.NodeScope));
settingList.addAll(xPackPlugin.getSettings());
settingList.addAll(securityPlugin.getSettings());
List<String> settingsFilterList = new ArrayList<>();
settingsFilterList.addAll(xPackPlugin.getSettingsFilter());
settingsFilterList.addAll(securityPlugin.getSettingsFilter());
// custom settings, potentially added by a plugin
SettingsModule settingsModule = new SettingsModule(settings, settingList, settingsFilterList);

View File

@ -1,31 +1,6 @@
grant {
// needed because of problems in unbound LDAP library
permission java.util.PropertyPermission "*", "read,write";
// required to configure the custom mailcap for watcher
permission java.lang.RuntimePermission "setFactory";
// needed when sending emails for javax.activation
// otherwise a classnotfound exception is thrown due to trying
// to load the class with the application class loader
permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getClassLoader";
// TODO: remove use of this jar as soon as possible!!!!
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
// bouncy castle
permission java.security.SecurityPermission "putProviderProperty.BC";
// needed for x-pack security extension
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "setPolicy";
// needed for multiple server implementations used in tests
permission java.net.SocketPermission "*", "accept,connect";
// needed for Windows named pipes in machine learning
permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
};
grant codeBase "${codebase.netty-common}" {

View File

@ -131,7 +131,6 @@ import org.elasticsearch.xpack.watcher.notification.email.attachment.ReportingAt
import org.elasticsearch.xpack.watcher.notification.email.support.BodyPartSource;
import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService;
import org.elasticsearch.xpack.watcher.notification.jira.JiraService;
import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyAccount;
import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyService;
import org.elasticsearch.xpack.watcher.notification.slack.SlackService;
import org.elasticsearch.xpack.watcher.rest.action.RestAckWatchAction;
@ -194,12 +193,6 @@ import static java.util.Collections.emptyList;
public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin {
static {
// some classes need to have their own clinit blocks
BodyPartSource.init();
Account.init();
}
public static final Setting<String> INDEX_WATCHER_TEMPLATE_VERSION_SETTING =
new Setting<>("index.xpack.watcher.template.version", "", Function.identity(), Setting.Property.IndexScope);
public static final Setting<Boolean> ENCRYPT_SENSITIVE_DATA_SETTING =
@ -252,6 +245,10 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin {
return Collections.emptyList();
}
// only initialize these classes if Watcher is enabled, and only after the plugin security policy for Watcher is in place
BodyPartSource.init();
Account.init();
final CryptoService cryptoService;
try {
cryptoService = ENCRYPT_SENSITIVE_DATA_SETTING.get(settings) ? new CryptoService(settings) : null;
@ -421,11 +418,11 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin {
settings.add(Setting.simpleString("xpack.watcher.start_immediately", Setting.Property.NodeScope));
// notification services
settings.add(SlackService.SLACK_ACCOUNT_SETTING);
settings.add(EmailService.EMAIL_ACCOUNT_SETTING);
settings.add(HipChatService.HIPCHAT_ACCOUNT_SETTING);
settings.add(JiraService.JIRA_ACCOUNT_SETTING);
settings.add(PagerDutyService.PAGERDUTY_ACCOUNT_SETTING);
settings.addAll(SlackService.getSettings());
settings.addAll(EmailService.getSettings());
settings.addAll(HipChatService.getSettings());
settings.addAll(JiraService.getSettings());
settings.addAll(PagerDutyService.getSettings());
settings.add(ReportingAttachmentParser.RETRIES_SETTING);
settings.add(ReportingAttachmentParser.INTERVAL_SETTING);
@ -595,19 +592,6 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin {
return Collections.singletonList(new EncryptSensitiveDataBootstrapCheck(env));
}
@Override
public List<String> getSettingsFilter() {
List<String> filters = new ArrayList<>();
filters.add("xpack.notification.email.account.*.smtp.password");
filters.add("xpack.notification.jira.account.*.password");
filters.add("xpack.notification.slack.account.*.url");
filters.add("xpack.notification.pagerduty.account.*.url");
filters.add("xpack.notification.pagerduty." + PagerDutyAccount.SERVICE_KEY_SETTING);
filters.add("xpack.notification.pagerduty.account.*." + PagerDutyAccount.SERVICE_KEY_SETTING);
filters.add("xpack.notification.hipchat.account.*.auth_token");
return filters;
}
@Override
public List<ScriptContext> getContexts() {
return Arrays.asList(Watcher.SCRIPT_SEARCH_CONTEXT, Watcher.SCRIPT_EXECUTABLE_CONTEXT, Watcher.SCRIPT_TEMPLATE_CONTEXT);

View File

@ -44,6 +44,8 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste
private final ExecutorService executor;
private AtomicReference<List<String>> previousAllocationIds = new AtomicReference<>(Collections.emptyList());
private volatile WatcherMetaData watcherMetaData;
private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this.
WatcherLifeCycleService(Settings settings, ThreadPool threadPool, ClusterService clusterService,
WatcherService watcherService) {
@ -56,17 +58,25 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste
clusterService.addLifecycleListener(new LifecycleListener() {
@Override
public void beforeStop() {
stop("shutdown initiated");
shutDown();
}
});
watcherMetaData = new WatcherMetaData(!settings.getAsBoolean("xpack.watcher.start_immediately", true));
}
public void stop(String reason) {
public synchronized void stop(String reason) {
watcherService.stop(reason);
}
synchronized void shutDown() {
shutDown = true;
stop("shutdown initiated");
}
private synchronized void start(ClusterState state, boolean manual) {
if (shutDown) {
return;
}
WatcherState watcherState = watcherService.state();
if (watcherState != WatcherState.STOPPED) {
logger.debug("not starting watcher. watcher can only start if its current state is [{}], but its current state now is [{}]",
@ -109,7 +119,7 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste
*/
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) || shutDown) {
// wait until the gateway has recovered from disk, otherwise we think may not have .watches and
// a .triggered_watches index, but they may not have been restored from the cluster state on disk
return;

View File

@ -23,7 +23,7 @@ public abstract class NotificationService<Account> extends AbstractComponent {
private final String type;
// both are guarded by this
private Map<String, Account> accounts;
protected Account defaultAccount;
private Account defaultAccount;
public NotificationService(Settings settings, String type) {
super(settings);
@ -59,7 +59,7 @@ public abstract class NotificationService<Account> extends AbstractComponent {
}
private <A> Tuple<Map<String, A>, A> buildAccounts(Settings settings, BiFunction<String, Settings, A> accountFactory) {
Settings accountsSettings = settings.getAsSettings("account");
Settings accountsSettings = settings.getByPrefix("xpack.notification." + type + ".").getAsSettings("account");
Map<String, A> accounts = new HashMap<>();
for (String name : accountsSettings.names()) {
Settings accountSettings = accountsSettings.getAsSettings(name);
@ -67,7 +67,7 @@ public abstract class NotificationService<Account> extends AbstractComponent {
accounts.put(name, account);
}
final String defaultAccountName = settings.get("default_account");
final String defaultAccountName = settings.get("xpack.notification." + type + ".default_account");
A defaultAccount;
if (defaultAccountName == null) {
if (accounts.isEmpty()) {

View File

@ -8,26 +8,73 @@ package org.elasticsearch.xpack.watcher.notification.email;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.core.watcher.crypto.CryptoService;
import org.elasticsearch.xpack.watcher.notification.NotificationService;
import javax.mail.MessagingException;
import java.util.Arrays;
import java.util.List;
/**
* A component to store email credentials and handle sending email notifications.
*/
public class EmailService extends NotificationService<Account> {
private static final Setting<String> SETTING_DEFAULT_ACCOUNT =
Setting.simpleString("xpack.notification.email.default_account", Property.Dynamic, Property.NodeScope);
private static final Setting.AffixSetting<String> SETTING_PROFILE =
Setting.affixKeySetting("xpack.notification.email.account.", "profile",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Settings> SETTING_EMAIL_DEFAULTS =
Setting.affixKeySetting("xpack.notification.email.account.", "email_defaults",
(key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Boolean> SETTING_SMTP_AUTH =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.auth",
(key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Boolean> SETTING_SMTP_STARTTLS_ENABLE =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.starttls.enable",
(key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_SMTP_HOST =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.host",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<Integer> SETTING_SMTP_PORT =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.port",
(key) -> Setting.intSetting(key, 587, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_SMTP_USER =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.user",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_SMTP_PASSWORD =
Setting.affixKeySetting("xpack.notification.email.account.", "smtp.password",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered));
private final CryptoService cryptoService;
public static final Setting<Settings> EMAIL_ACCOUNT_SETTING =
Setting.groupSetting("xpack.notification.email.", Setting.Property.Dynamic, Setting.Property.NodeScope);
public EmailService(Settings settings, @Nullable CryptoService cryptoService, ClusterSettings clusterSettings) {
super(settings, "email");
this.cryptoService = cryptoService;
clusterSettings.addSettingsUpdateConsumer(EMAIL_ACCOUNT_SETTING, this::setAccountSetting);
setAccountSetting(EMAIL_ACCOUNT_SETTING.get(settings));
clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings());
// ensure logging of setting changes
clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_PROFILE, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_EMAIL_DEFAULTS, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_AUTH, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_STARTTLS_ENABLE, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_HOST, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_PORT, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_USER, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_PASSWORD, (s, o) -> {}, (s, o) -> {});
// do an initial load
setAccountSetting(settings);
}
@Override
@ -75,4 +122,9 @@ public class EmailService extends NotificationService<Account> {
}
}
public static List<Setting<?>> getSettings() {
return Arrays.asList(SETTING_DEFAULT_ACCOUNT, SETTING_PROFILE, SETTING_EMAIL_DEFAULTS, SETTING_SMTP_AUTH, SETTING_SMTP_HOST,
SETTING_SMTP_PASSWORD, SETTING_SMTP_PORT, SETTING_SMTP_STARTTLS_ENABLE, SETTING_SMTP_USER);
}
}

View File

@ -12,26 +12,72 @@ import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.xpack.watcher.common.http.HttpClient;
import org.elasticsearch.xpack.watcher.notification.NotificationService;
import java.util.Arrays;
import java.util.List;
/**
* A component to store hipchat credentials.
*/
public class HipChatService extends NotificationService<HipChatAccount> {
private static final Setting<String> SETTING_DEFAULT_ACCOUNT =
Setting.simpleString("xpack.notification.hipchat.default_account", Setting.Property.Dynamic, Setting.Property.NodeScope);
static final Setting<String> SETTING_DEFAULT_HOST =
Setting.simpleString("xpack.notification.hipchat.host", Setting.Property.Dynamic, Setting.Property.NodeScope);
static final Setting<Integer> SETTING_DEFAULT_PORT =
Setting.intSetting("xpack.notification.hipchat.port", 443, Setting.Property.Dynamic, Setting.Property.NodeScope);
private static final Setting.AffixSetting<String> SETTING_AUTH_TOKEN =
Setting.affixKeySetting("xpack.notification.hipchat.account.", "auth_token",
(key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope, Setting.Property.Filtered));
private static final Setting.AffixSetting<String> SETTING_PROFILE =
Setting.affixKeySetting("xpack.notification.hipchat.account.", "profile",
(key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_ROOM =
Setting.affixKeySetting("xpack.notification.hipchat.account.", "room",
(key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_HOST =
Setting.affixKeySetting("xpack.notification.hipchat.account.", "host",
(key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope));
private static final Setting.AffixSetting<Integer> SETTING_PORT =
Setting.affixKeySetting("xpack.notification.hipchat.account.", "port",
(key) -> Setting.intSetting(key, 443, Setting.Property.Dynamic, Setting.Property.NodeScope));
private static final Setting.AffixSetting<Settings> SETTING_MESSAGE_DEFAULTS =
Setting.affixKeySetting("xpack.notification.hipchat.account.", "message",
(key) -> Setting.groupSetting(key + ".", Setting.Property.Dynamic, Setting.Property.NodeScope));
private final HttpClient httpClient;
public static final Setting<Settings> HIPCHAT_ACCOUNT_SETTING =
Setting.groupSetting("xpack.notification.hipchat.", Setting.Property.Dynamic, Setting.Property.NodeScope);
private HipChatServer defaultServer;
public HipChatService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) {
super(settings, "hipchat");
this.httpClient = httpClient;
clusterSettings.addSettingsUpdateConsumer(HIPCHAT_ACCOUNT_SETTING, this::setAccountSetting);
setAccountSetting(HIPCHAT_ACCOUNT_SETTING.get(settings));
clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings());
// ensure logging of setting changes
clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {});
clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_HOST, (s) -> {});
clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_PORT, (s) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_AUTH_TOKEN, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_PROFILE, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_ROOM, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_HOST, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_PORT, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_MESSAGE_DEFAULTS, (s, o) -> {}, (s, o) -> {});
setAccountSetting(settings);
}
@Override
protected synchronized void setAccountSetting(Settings settings) {
defaultServer = new HipChatServer(settings);
defaultServer = new HipChatServer(settings.getByPrefix("xpack.notification.hipchat."));
super.setAccountSetting(settings);
}
@ -43,4 +89,9 @@ public class HipChatService extends NotificationService<HipChatAccount> {
}
return profile.createAccount(name, accountSettings, defaultServer, httpClient, logger);
}
public static List<Setting<?>> getSettings() {
return Arrays.asList(SETTING_DEFAULT_ACCOUNT, SETTING_AUTH_TOKEN, SETTING_PROFILE, SETTING_ROOM, SETTING_MESSAGE_DEFAULTS,
SETTING_DEFAULT_HOST, SETTING_DEFAULT_PORT, SETTING_HOST, SETTING_PORT);
}
}

View File

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.watcher.notification.jira;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;

View File

@ -7,10 +7,14 @@ package org.elasticsearch.xpack.watcher.notification.jira;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.watcher.common.http.HttpClient;
import org.elasticsearch.xpack.watcher.notification.NotificationService;
import java.util.Arrays;
import java.util.List;
/**
* A component to store Atlassian's JIRA credentials.
*
@ -18,20 +22,52 @@ import org.elasticsearch.xpack.watcher.notification.NotificationService;
*/
public class JiraService extends NotificationService<JiraAccount> {
public static final Setting<Settings> JIRA_ACCOUNT_SETTING =
Setting.groupSetting("xpack.notification.jira.", Setting.Property.Dynamic, Setting.Property.NodeScope);
private static final Setting<String> SETTING_DEFAULT_ACCOUNT =
Setting.simpleString("xpack.notification.jira.default_account", Property.Dynamic, Property.NodeScope);
private static final Setting.AffixSetting<Boolean> SETTING_ALLOW_HTTP =
Setting.affixKeySetting("xpack.notification.jira.account.", "allow_http",
(key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_URL =
Setting.affixKeySetting("xpack.notification.jira.account.", "url",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
private static final Setting.AffixSetting<String> SETTING_USER =
Setting.affixKeySetting("xpack.notification.jira.account.", "user",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered));
private static final Setting.AffixSetting<String> SETTING_PASSWORD =
Setting.affixKeySetting("xpack.notification.jira.account.", "password",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered));
private static final Setting.AffixSetting<Settings> SETTING_DEFAULTS =
Setting.affixKeySetting("xpack.notification.jira.account.", "issue_defaults",
(key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope));
private final HttpClient httpClient;
public JiraService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) {
super(settings, "jira");
this.httpClient = httpClient;
clusterSettings.addSettingsUpdateConsumer(JIRA_ACCOUNT_SETTING, this::setAccountSetting);
setAccountSetting(JIRA_ACCOUNT_SETTING.get(settings));
clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings());
// ensure logging of setting changes
clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_ALLOW_HTTP, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_URL, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_USER, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_PASSWORD, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_DEFAULTS, (s, o) -> {}, (s, o) -> {});
// do an initial load
setAccountSetting(settings);
}
@Override
protected JiraAccount createAccount(String name, Settings accountSettings) {
return new JiraAccount(name, accountSettings, httpClient);
protected JiraAccount createAccount(String name, Settings settings) {
return new JiraAccount(name, settings, httpClient);
}
public static List<Setting<?>> getSettings() {
return Arrays.asList(SETTING_ALLOW_HTTP, SETTING_URL, SETTING_USER, SETTING_PASSWORD, SETTING_DEFAULTS, SETTING_DEFAULT_ACCOUNT);
}
}

View File

@ -7,29 +7,47 @@ package org.elasticsearch.xpack.watcher.notification.pagerduty;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.watcher.common.http.HttpClient;
import org.elasticsearch.xpack.watcher.notification.NotificationService;
import java.util.Arrays;
import java.util.List;
/**
* A component to store pagerduty credentials.
*/
public class PagerDutyService extends NotificationService<PagerDutyAccount> {
public static final Setting<Settings> PAGERDUTY_ACCOUNT_SETTING =
Setting.groupSetting("xpack.notification.pagerduty.", Setting.Property.Dynamic, Setting.Property.NodeScope);
private static final Setting<String> SETTING_DEFAULT_ACCOUNT =
Setting.simpleString("xpack.notification.pagerduty.default_account", Property.Dynamic, Property.NodeScope);
private static final Setting.AffixSetting<String> SETTING_SERVICE_API_KEY =
Setting.affixKeySetting("xpack.notification.pagerduty.account.", "service_api_key",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered));
private static final Setting.AffixSetting<Settings> SETTING_DEFAULTS =
Setting.affixKeySetting("xpack.notification.pagerduty.account.", "event_defaults",
(key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope));
private final HttpClient httpClient;
public PagerDutyService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) {
super(settings, "pagerduty");
this.httpClient = httpClient;
clusterSettings.addSettingsUpdateConsumer(PAGERDUTY_ACCOUNT_SETTING, this::setAccountSetting);
setAccountSetting(PAGERDUTY_ACCOUNT_SETTING.get(settings));
clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_SERVICE_API_KEY, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_DEFAULTS, (s, o) -> {}, (s, o) -> {});
setAccountSetting(settings);
}
@Override
protected PagerDutyAccount createAccount(String name, Settings accountSettings) {
return new PagerDutyAccount(name, accountSettings, accountSettings, httpClient, logger);
}
public static List<Setting<?>> getSettings() {
return Arrays.asList(SETTING_SERVICE_API_KEY, SETTING_DEFAULTS, SETTING_DEFAULT_ACCOUNT);
}
}

View File

@ -7,29 +7,48 @@ package org.elasticsearch.xpack.watcher.notification.slack;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.watcher.common.http.HttpClient;
import org.elasticsearch.xpack.watcher.notification.NotificationService;
import java.util.Arrays;
import java.util.List;
/**
* A component to store slack credentials.
*/
public class SlackService extends NotificationService<SlackAccount> {
public static final Setting<Settings> SLACK_ACCOUNT_SETTING =
Setting.groupSetting("xpack.notification.slack.", Setting.Property.Dynamic, Setting.Property.NodeScope);
private static final Setting<String> SETTING_DEFAULT_ACCOUNT =
Setting.simpleString("xpack.notification.slack.default_account", Property.Dynamic, Property.NodeScope);
private static final Setting.AffixSetting<String> SETTING_URL =
Setting.affixKeySetting("xpack.notification.slack.account.", "url",
(key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered));
private static final Setting.AffixSetting<Settings> SETTING_DEFAULTS =
Setting.affixKeySetting("xpack.notification.slack.account.", "message_defaults",
(key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope));
private final HttpClient httpClient;
public SlackService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) {
super(settings, "slack");
this.httpClient = httpClient;
clusterSettings.addSettingsUpdateConsumer(SLACK_ACCOUNT_SETTING, this::setAccountSetting);
setAccountSetting(SLACK_ACCOUNT_SETTING.get(settings));
clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings());
clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_URL, (s, o) -> {}, (s, o) -> {});
clusterSettings.addAffixUpdateConsumer(SETTING_DEFAULTS, (s, o) -> {}, (s, o) -> {});
setAccountSetting(settings);
}
@Override
protected SlackAccount createAccount(String name, Settings accountSettings) {
return new SlackAccount(name, accountSettings, accountSettings, httpClient, logger);
}
public static List<Setting<?>> getSettings() {
return Arrays.asList(SETTING_URL, SETTING_DEFAULT_ACCOUNT, SETTING_DEFAULTS);
}
}

View File

@ -1,7 +1,4 @@
grant {
// needed because of problems in unbound LDAP library
permission java.util.PropertyPermission "*", "read,write";
// required to configure the custom mailcap for watcher
permission java.lang.RuntimePermission "setFactory";
@ -13,19 +10,8 @@ grant {
// TODO: remove use of this jar as soon as possible!!!!
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
// bouncy castle
permission java.security.SecurityPermission "putProviderProperty.BC";
// needed for x-pack security extension
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "setPolicy";
// needed for multiple server implementations used in tests
permission java.net.SocketPermission "*", "accept,connect";
// needed for Windows named pipes in machine learning
permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write";
};
grant codeBase "${codebase.netty-common}" {

View File

@ -15,9 +15,9 @@ import static org.hamcrest.Matchers.is;
public class NotificationServiceTests extends ESTestCase {
public void testSingleAccount() throws Exception {
public void testSingleAccount() {
String accountName = randomAlphaOfLength(10);
Settings settings = Settings.builder().put("account." + accountName, "bar").build();
Settings settings = Settings.builder().put("xpack.notification.test.account." + accountName, "bar").build();
TestNotificationService service = new TestNotificationService(settings);
assertThat(service.getAccount(accountName), is(accountName));
@ -25,12 +25,12 @@ public class NotificationServiceTests extends ESTestCase {
assertThat(service.getAccount("non-existing"), is(accountName));
}
public void testMultipleAccountsWithExistingDefault() throws Exception {
public void testMultipleAccountsWithExistingDefault() {
String accountName = randomAlphaOfLength(10);
Settings settings = Settings.builder()
.put("account." + accountName, "bar")
.put("account.second", "bar")
.put("default_account", accountName)
.put("xpack.notification.test.account." + accountName, "bar")
.put("xpack.notification.test.account.second", "bar")
.put("xpack.notification.test.default_account", accountName)
.build();
TestNotificationService service = new TestNotificationService(settings);
@ -39,33 +39,33 @@ public class NotificationServiceTests extends ESTestCase {
assertThat(service.getAccount("non-existing"), is(accountName));
}
public void testMultipleAccountsWithNoDefault() throws Exception {
public void testMultipleAccountsWithNoDefault() {
String accountName = randomAlphaOfLength(10);
Settings settings = Settings.builder()
.put("account." + accountName, "bar")
.put("account.second", "bar")
.put("account.third", "bar")
.put("xpack.notification.test.account." + accountName, "bar")
.put("xpack.notification.test.account.second", "bar")
.put("xpack.notification.test.account.third", "bar")
.build();
TestNotificationService service = new TestNotificationService(settings);
assertThat(service.getAccount(null), anyOf(is(accountName), is("second"), is("third")));
}
public void testMultipleAccountsUnknownDefault() throws Exception {
public void testMultipleAccountsUnknownDefault() {
String accountName = randomAlphaOfLength(10);
Settings settings = Settings.builder()
.put("account." + accountName, "bar")
.put("account.second", "bar")
.put("default_account", "non-existing")
.put("xpack.notification.test.account." + accountName, "bar")
.put("xpack.notification.test.account.second", "bar")
.put("xpack.notification.test.default_account", "non-existing")
.build();
SettingsException e = expectThrows(SettingsException.class, () -> new TestNotificationService(settings));
assertThat(e.getMessage(), is("could not find default account [non-existing]"));
}
public void testNoSpecifiedDefaultAccount() throws Exception {
public void testNoSpecifiedDefaultAccount() {
String accountName = randomAlphaOfLength(10);
Settings settings = Settings.builder().put("account." + accountName, "bar").build();
Settings settings = Settings.builder().put("xpack.notification.test.account." + accountName, "bar").build();
TestNotificationService service = new TestNotificationService(settings);
assertThat(service.getAccount(null), is(accountName));

View File

@ -123,6 +123,36 @@ public class WatcherLifeCycleServiceTests extends ESTestCase {
verify(watcherService, never()).start(any(ClusterState.class));
}
public void testShutdown() throws Exception {
IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(new Index(Watch.INDEX, "foo")).build();
ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster"))
.nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1")))
.routingTable(RoutingTable.builder().add(watchRoutingTable).build())
.metaData(MetaData.builder()
.put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns()))
.put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns()))
.put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns()))
.build())
.build();
when(watcherService.validate(clusterState)).thenReturn(true);
when(watcherService.state()).thenReturn(WatcherState.STOPPED);
lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, clusterState));
verify(watcherService, times(1)).start(any(ClusterState.class));
verify(watcherService, never()).stop(anyString());
when(watcherService.state()).thenReturn(WatcherState.STARTED);
lifeCycleService.shutDown();
verify(watcherService, times(1)).start(any(ClusterState.class));
verify(watcherService, times(1)).stop(eq("shutdown initiated"));
when(watcherService.state()).thenReturn(WatcherState.STOPPED);
lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState));
verify(watcherService, times(1)).start(any(ClusterState.class));
verify(watcherService, times(1)).stop(eq("shutdown initiated"));
}
public void testManualStartStop() throws Exception {
IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(new Index(Watch.INDEX, "foo")).build();
ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster"))

View File

@ -16,6 +16,7 @@ import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService;
import org.junit.Before;
import java.util.Collections;
import java.util.HashSet;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.hipchatAction;
@ -52,7 +53,7 @@ public class HipChatActionFactoryTests extends ESTestCase {
public void testParseActionUnknownAccount() throws Exception {
hipchatService = new HipChatService(Settings.EMPTY, null, new ClusterSettings(Settings.EMPTY,
Collections.singleton(HipChatService.HIPCHAT_ACCOUNT_SETTING)));
new HashSet<>(HipChatService.getSettings())));
factory = new HipChatActionFactory(Settings.EMPTY, mock(TextTemplateEngine.class), hipchatService);
HipChatAction action = hipchatAction("_unknown", "_body").build();
XContentBuilder jsonBuilder = jsonBuilder().value(action);

View File

@ -16,6 +16,7 @@ import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyService;
import org.junit.Before;
import java.util.Collections;
import java.util.HashSet;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.triggerPagerDutyAction;
@ -50,7 +51,7 @@ public class PagerDutyActionFactoryTests extends ESTestCase {
public void testParseActionUnknownAccount() throws Exception {
factory = new PagerDutyActionFactory(Settings.EMPTY, mock(TextTemplateEngine.class), new PagerDutyService(Settings.EMPTY, null,
new ClusterSettings(Settings.EMPTY, Collections.singleton(PagerDutyService.PAGERDUTY_ACCOUNT_SETTING))));
new ClusterSettings(Settings.EMPTY, new HashSet<>(PagerDutyService.getSettings()))));
PagerDutyAction action = triggerPagerDutyAction("_unknown", "_body").build();
XContentBuilder jsonBuilder = jsonBuilder().value(action);
XContentParser parser = createParser(jsonBuilder);

View File

@ -16,6 +16,7 @@ import org.elasticsearch.xpack.watcher.notification.slack.SlackService;
import org.junit.Before;
import java.util.Collections;
import java.util.HashSet;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessageTests.createRandomTemplate;
@ -49,7 +50,7 @@ public class SlackActionFactoryTests extends ESTestCase {
public void testParseActionUnknownAccount() throws Exception {
SlackService service = new SlackService(Settings.EMPTY, null, new ClusterSettings(Settings.EMPTY,
Collections.singleton(SlackService.SLACK_ACCOUNT_SETTING)));
new HashSet<>(SlackService.getSettings())));
factory = new SlackActionFactory(Settings.EMPTY, mock(TextTemplateEngine.class), service);
SlackAction action = slackAction("_unknown", createRandomTemplate()).build();
XContentBuilder jsonBuilder = jsonBuilder().value(action);

View File

@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.test.ESTestCase;
import java.util.Collections;
import java.util.HashSet;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
@ -23,7 +24,7 @@ public class AccountsTests extends ESTestCase {
.put("default_account", "account1");
addAccountSettings("account1", builder);
EmailService service = new EmailService(builder.build(), null,
new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING)));
new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())));
Account account = service.getAccount("account1");
assertThat(account, notNullValue());
assertThat(account.name(), equalTo("account1"));
@ -36,7 +37,7 @@ public class AccountsTests extends ESTestCase {
Settings.Builder builder = Settings.builder();
addAccountSettings("account1", builder);
EmailService service = new EmailService(builder.build(), null,
new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING)));
new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())));
Account account = service.getAccount("account1");
assertThat(account, notNullValue());
assertThat(account.name(), equalTo("account1"));
@ -52,7 +53,7 @@ public class AccountsTests extends ESTestCase {
addAccountSettings("account2", builder);
EmailService service = new EmailService(builder.build(), null,
new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING)));
new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())));
Account account = service.getAccount("account1");
assertThat(account, notNullValue());
assertThat(account.name(), equalTo("account1"));
@ -71,7 +72,7 @@ public class AccountsTests extends ESTestCase {
addAccountSettings("account2", builder);
EmailService service = new EmailService(builder.build(), null,
new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING)));
new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())));
Account account = service.getAccount("account1");
assertThat(account, notNullValue());
assertThat(account.name(), equalTo("account1"));
@ -87,7 +88,7 @@ public class AccountsTests extends ESTestCase {
Settings.Builder builder = Settings.builder().put("xpack.notification.email.default_account", "unknown");
addAccountSettings("account1", builder);
addAccountSettings("account2", builder);
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING));
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()));
SettingsException e = expectThrows(SettingsException.class, () -> new EmailService(builder.build(), null, clusterSettings));
assertThat(e.getMessage(), is("could not find default account [unknown]"));
}
@ -95,13 +96,13 @@ public class AccountsTests extends ESTestCase {
public void testNoAccount() throws Exception {
Settings.Builder builder = Settings.builder();
EmailService service = new EmailService(builder.build(), null,
new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING)));
new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())));
expectThrows(IllegalArgumentException.class, () -> service.getAccount(null));
}
public void testNoAccountWithDefaultAccount() throws Exception {
Settings settings = Settings.builder().put("xpack.notification.email.default_account", "unknown").build();
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING));
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()));
SettingsException e = expectThrows(SettingsException.class, () -> new EmailService(settings, null, clusterSettings));
assertThat(e.getMessage(), is("could not find default account [unknown]"));
}

View File

@ -12,6 +12,7 @@ import org.elasticsearch.xpack.core.watcher.common.secret.Secret;
import org.junit.Before;
import java.util.Collections;
import java.util.HashSet;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
@ -28,7 +29,7 @@ public class EmailServiceTests extends ESTestCase {
public void init() throws Exception {
account = mock(Account.class);
service = new EmailService(Settings.builder().put("xpack.notification.email.account.account1.foo", "bar").build(), null,
new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING))) {
new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))) {
@Override
protected Account createAccount(String name, Settings accountSettings) {
return account;

View File

@ -16,6 +16,7 @@ import javax.mail.internet.MimeMessage;
import javax.mail.internet.MimeMultipart;
import java.util.Collections;
import java.util.HashSet;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
@ -41,7 +42,7 @@ public class ProfileTests extends ESTestCase {
.build();
EmailService service = new EmailService(settings, null,
new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING)));
new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())));
Session session = service.getAccount("foo").getConfig().createSession();
MimeMessage mimeMessage = Profile.STANDARD.toMimeMessage(email, session);

View File

@ -19,6 +19,7 @@ import org.mockito.ArgumentCaptor;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import static org.hamcrest.Matchers.is;
import static org.mockito.Mockito.mock;
@ -37,7 +38,7 @@ public class HipChatAccountsTests extends ESTestCase {
.put("xpack.notification.hipchat.default_account", "account1");
addAccountSettings("account1", builder);
HipChatService service = new HipChatService(builder.build(), httpClient, new ClusterSettings(Settings.EMPTY,
Collections.singleton(HipChatService.HIPCHAT_ACCOUNT_SETTING)));
new HashSet<>(HipChatService.getSettings())));
HipChatAccount account = service.getAccount("account1");
HipChatMessage.Template template = new HipChatMessage.Template.Builder(new TextTemplate("foo"))

View File

@ -5,6 +5,7 @@
*/
package org.elasticsearch.xpack.watcher.notification.hipchat;
import com.carrotsearch.randomizedtesting.annotations.Repeat;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
@ -13,6 +14,7 @@ import org.elasticsearch.xpack.watcher.common.http.HttpClient;
import org.junit.Before;
import java.util.Collections;
import java.util.HashSet;
import static org.hamcrest.Matchers.arrayContaining;
import static org.hamcrest.Matchers.containsString;
@ -51,7 +53,7 @@ public class HipChatServiceTests extends ESTestCase {
}
buildMessageDefaults(accountName, settingsBuilder, defaultRoom, null, defaultFrom, defaultColor, defaultFormat, defaultNotify);
HipChatService service = new HipChatService(settingsBuilder.build(), httpClient,
new ClusterSettings(settingsBuilder.build(), Collections.singleton(HipChatService.HIPCHAT_ACCOUNT_SETTING)));
new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings())));
HipChatAccount account = service.getAccount(accountName);
assertThat(account, notNullValue());
@ -99,7 +101,7 @@ public class HipChatServiceTests extends ESTestCase {
}
buildMessageDefaults(accountName, settingsBuilder, null, null, defaultFrom, defaultColor, defaultFormat, defaultNotify);
HipChatService service = new HipChatService(settingsBuilder.build(), httpClient,
new ClusterSettings(settingsBuilder.build(), Collections.singleton(HipChatService.HIPCHAT_ACCOUNT_SETTING)));
new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings())));
HipChatAccount account = service.getAccount(accountName);
assertThat(account, notNullValue());
@ -128,7 +130,7 @@ public class HipChatServiceTests extends ESTestCase {
.put("xpack.notification.hipchat.account." + accountName + ".auth_token", "_token");
SettingsException e = expectThrows(SettingsException.class, () ->
new HipChatService(settingsBuilder.build(), httpClient,
new ClusterSettings(settingsBuilder.build(), Collections.singleton(HipChatService.HIPCHAT_ACCOUNT_SETTING))));
new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))));
assertThat(e.getMessage(), containsString("missing required [room] setting for [integration] account profile"));
}
@ -152,7 +154,7 @@ public class HipChatServiceTests extends ESTestCase {
}
buildMessageDefaults(accountName, settingsBuilder, defaultRoom, defaultUser, null, defaultColor, defaultFormat, defaultNotify);
HipChatService service = new HipChatService(settingsBuilder.build(), httpClient,
new ClusterSettings(settingsBuilder.build(), Collections.singleton(HipChatService.HIPCHAT_ACCOUNT_SETTING)));
new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings())));
HipChatAccount account = service.getAccount(accountName);
assertThat(account, notNullValue());
@ -213,7 +215,7 @@ public class HipChatServiceTests extends ESTestCase {
}
HipChatService service = new HipChatService(settingsBuilder.build(), httpClient,
new ClusterSettings(settingsBuilder.build(), Collections.singleton(HipChatService.HIPCHAT_ACCOUNT_SETTING)));
new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings())));
for (int i = 0; i < 5; i++) {
String name = "_a" + i;

View File

@ -36,12 +36,12 @@ public class IntegrationAccountTests extends ESTestCase {
String host = HipChatServer.DEFAULT.host();
if (randomBoolean()) {
host = randomAlphaOfLength(10);
sb.put(HipChatServer.HOST_SETTING, host);
sb.put("host", host);
}
int port = HipChatServer.DEFAULT.port();
if (randomBoolean()) {
port = randomIntBetween(300, 400);
sb.put(HipChatServer.PORT_SETTING, port);
sb.put("port", port);
}
String room = randomAlphaOfLength(10);

View File

@ -47,12 +47,12 @@ public class UserAccountTests extends ESTestCase {
String host = HipChatServer.DEFAULT.host();
if (randomBoolean()) {
host = randomAlphaOfLength(10);
sb.put(HipChatServer.HOST_SETTING, host);
sb.put("host", host);
}
int port = HipChatServer.DEFAULT.port();
if (randomBoolean()) {
port = randomIntBetween(300, 400);
sb.put(HipChatServer.PORT_SETTING, port);
sb.put("port", port);
}
String[] defaultRooms = null;

View File

@ -36,12 +36,12 @@ public class V1AccountTests extends ESTestCase {
String host = HipChatServer.DEFAULT.host();
if (randomBoolean()) {
host = randomAlphaOfLength(10);
sb.put(HipChatServer.HOST_SETTING, host);
sb.put("host", host);
}
int port = HipChatServer.DEFAULT.port();
if (randomBoolean()) {
port = randomIntBetween(300, 400);
sb.put(HipChatServer.PORT_SETTING, port);
sb.put("port", port);
}
String[] defaultRooms = null;

View File

@ -23,6 +23,7 @@ import org.mockito.ArgumentCaptor;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import static java.util.Collections.emptyMap;
@ -47,7 +48,7 @@ public class JiraAccountTests extends ESTestCase {
@Before
public void init() throws Exception {
httpClient = mock(HttpClient.class);
clusterSettings = new ClusterSettings(Settings.EMPTY, Collections.singleton(JiraService.JIRA_ACCOUNT_SETTING));
clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(JiraService.getSettings()));
}
public void testJiraAccountSettings() {

View File

@ -20,7 +20,7 @@ import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessageDe
import org.junit.Before;
import org.mockito.ArgumentCaptor;
import java.util.Collections;
import java.util.HashSet;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
@ -40,7 +40,7 @@ public class PagerDutyAccountsTests extends ESTestCase {
Settings.Builder builder = Settings.builder().put("xpack.notification.pagerduty.default_account", "account1");
addAccountSettings("account1", builder);
PagerDutyService service = new PagerDutyService(builder.build(), httpClient, new ClusterSettings(Settings.EMPTY,
Collections.singleton(PagerDutyService.PAGERDUTY_ACCOUNT_SETTING)));
new HashSet<>(PagerDutyService.getSettings())));
PagerDutyAccount account = service.getAccount("account1");
ArgumentCaptor<HttpRequest> argumentCaptor = ArgumentCaptor.forClass(HttpRequest.class);
@ -60,7 +60,7 @@ public class PagerDutyAccountsTests extends ESTestCase {
Settings.Builder builder = Settings.builder().put("xpack.notification.pagerduty.default_account", "account1");
addAccountSettings("account1", builder);
PagerDutyService service = new PagerDutyService(builder.build(), httpClient, new ClusterSettings(Settings.EMPTY,
Collections.singleton(PagerDutyService.PAGERDUTY_ACCOUNT_SETTING)));
new HashSet<>(PagerDutyService.getSettings())));
PagerDutyAccount account = service.getAccount("account1");
ArgumentCaptor<HttpRequest> argumentCaptor = ArgumentCaptor.forClass(HttpRequest.class);

View File

@ -520,8 +520,7 @@ public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase
public static class NoopEmailService extends EmailService {
public NoopEmailService() {
super(Settings.EMPTY, null,
new ClusterSettings(Settings.EMPTY, Collections.singleton(EmailService.EMAIL_ACCOUNT_SETTING)));
super(Settings.EMPTY, null, new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())));
}
@Override

View File

@ -69,8 +69,6 @@ compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-tr
* configuration options based on their name.
*/
subprojects {
// TODO remove after backport
ext.bwc_tests_enabled = false
Matcher m = project.name =~ /with(out)?-system-key/
if (false == m.matches()) {
throw new InvalidUserDataException("Invalid project name [${project.name}]")

View File

@ -28,9 +28,7 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME;
import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
public class TokenBackwardsCompatibilityIT extends ESRestTestCase {
@ -140,7 +138,6 @@ public class TokenBackwardsCompatibilityIT extends ESRestTestCase {
public void testMixedCluster() throws Exception {
assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.MIXED);
assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion());
awaitIndexTemplateUpgrade();
Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2");
assertOK(getResponse);
Map<String, Object> source = (Map<String, Object>) entityAsMap(getResponse).get("_source");
@ -188,7 +185,6 @@ public class TokenBackwardsCompatibilityIT extends ESRestTestCase {
public void testUpgradedCluster() throws Exception {
assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.UPGRADED);
awaitIndexTemplateUpgrade();
Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2");
assertOK(getResponse);
Map<String, Object> source = (Map<String, Object>) entityAsMap(getResponse).get("_source");
@ -265,28 +261,6 @@ public class TokenBackwardsCompatibilityIT extends ESRestTestCase {
return Version.CURRENT.equals(Version.fromString(objectPath.evaluate("nodes." + masterNodeId + ".version")));
}
private void awaitIndexTemplateUpgrade() throws Exception {
assertTrue(awaitBusy(() -> {
try {
Response response = client().performRequest("GET", "/_cluster/state/metadata");
assertOK(response);
ObjectPath objectPath = ObjectPath.createFromResponse(response);
final String mappingsPath = "metadata.templates." + SECURITY_TEMPLATE_NAME + "" +
".mappings";
Map<String, Object> mappings = objectPath.evaluate(mappingsPath);
assertNotNull(mappings);
assertThat(mappings.size(), greaterThanOrEqualTo(1));
String key = mappings.keySet().iterator().next();
String templateVersion = objectPath.evaluate(mappingsPath + "." + key + "" + "._meta.security-version");
final Version tVersion = Version.fromString(templateVersion);
return Version.CURRENT.equals(tVersion);
} catch (IOException e) {
logger.warn("caught exception checking template version", e);
return false;
}
}));
}
private RestClient getRestClientForCurrentVersionNodesOnly() throws IOException {
Response response = client().performRequest("GET", "_nodes");
assertOK(response);