Merge branch 'master' into index-lifecycle

This commit is contained in:
Tal Levy 2018-06-13 14:15:28 -07:00
commit 04e0d0a5e7
69 changed files with 224 additions and 116 deletions

View File

@ -131,7 +131,7 @@ task verifyVersions {
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
xml = new XmlParser().parse(s) xml = new XmlParser().parse(s)
} }
Set<Version> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ }.collect { Version.fromString(it) }) Set<Version> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ }.collect { Version.fromString(it) })
// Limit the known versions to those that should be index compatible, and are not future versions // Limit the known versions to those that should be index compatible, and are not future versions
knownVersions = knownVersions.findAll { it.major >= bwcVersions.currentVersion.major - 1 && it.before(VersionProperties.elasticsearch) } knownVersions = knownVersions.findAll { it.major >= bwcVersions.currentVersion.major - 1 && it.before(VersionProperties.elasticsearch) }

View File

@ -2,7 +2,7 @@ call "%~dp0elasticsearch-env.bat" || exit /b 1
if defined ES_ADDITIONAL_SOURCES ( if defined ES_ADDITIONAL_SOURCES (
for %%a in ("%ES_ADDITIONAL_SOURCES:;=","%") do ( for %%a in ("%ES_ADDITIONAL_SOURCES:;=","%") do (
call %~dp0%%a call "%~dp0%%a"
) )
) )

View File

@ -120,6 +120,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
public static final int V_5_6_10_ID = 5061099; public static final int V_5_6_10_ID = 5061099;
public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
public static final int V_5_6_11_ID = 5061199;
public static final Version V_5_6_11 = new Version(V_5_6_11_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
public static final int V_6_0_0_alpha1_ID = 6000001; public static final int V_6_0_0_alpha1_ID = 6000001;
public static final Version V_6_0_0_alpha1 = public static final Version V_6_0_0_alpha1 =
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
@ -232,6 +234,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
return V_6_0_0_alpha2; return V_6_0_0_alpha2;
case V_6_0_0_alpha1_ID: case V_6_0_0_alpha1_ID:
return V_6_0_0_alpha1; return V_6_0_0_alpha1;
case V_5_6_11_ID:
return V_5_6_11;
case V_5_6_10_ID: case V_5_6_10_ID:
return V_5_6_10; return V_5_6_10;
case V_5_6_9_ID: case V_5_6_9_ID:

View File

@ -5,7 +5,7 @@
<titleabbrev>Close Jobs</titleabbrev> <titleabbrev>Close Jobs</titleabbrev>
++++ ++++
This API enables you to close one or more jobs. Closes one or more jobs.
A job can be opened and closed multiple times throughout its lifecycle. A job can be opened and closed multiple times throughout its lifecycle.
A closed job cannot receive data or perform analysis A closed job cannot receive data or perform analysis

View File

@ -5,7 +5,7 @@
<titleabbrev>Delete Events from Calendar</titleabbrev> <titleabbrev>Delete Events from Calendar</titleabbrev>
++++ ++++
This API enables you to delete scheduled events from a calendar. Deletes scheduled events from a calendar.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Delete Jobs from Calendar</titleabbrev> <titleabbrev>Delete Jobs from Calendar</titleabbrev>
++++ ++++
This API enables you to delete jobs from a calendar. Deletes jobs from a calendar.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Delete Calendar</titleabbrev> <titleabbrev>Delete Calendar</titleabbrev>
++++ ++++
This API enables you to delete a calendar. Deletes a calendar.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Delete {dfeeds-cap}</titleabbrev> <titleabbrev>Delete {dfeeds-cap}</titleabbrev>
++++ ++++
This API enables you to delete an existing {dfeed}. Deletes an existing {dfeed}.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Delete Jobs</titleabbrev> <titleabbrev>Delete Jobs</titleabbrev>
++++ ++++
This API enables you to delete an existing anomaly detection job. Deletes an existing anomaly detection job.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Delete Model Snapshots</titleabbrev> <titleabbrev>Delete Model Snapshots</titleabbrev>
++++ ++++
This API enables you to delete an existing model snapshot. Deletes an existing model snapshot.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Flush Jobs</titleabbrev> <titleabbrev>Flush Jobs</titleabbrev>
++++ ++++
This API forces any buffered data to be processed by the job. Forces any buffered data to be processed by the job.
==== Request ==== Request

View File

@ -5,8 +5,7 @@
<titleabbrev>Forecast Jobs</titleabbrev> <titleabbrev>Forecast Jobs</titleabbrev>
++++ ++++
This API uses historical behavior to predict the future behavior of a time Predict the future behavior of a time series by using historical behavior.
series.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Buckets</titleabbrev> <titleabbrev>Get Buckets</titleabbrev>
++++ ++++
This API enables you to retrieve job results for one or more buckets. Retrieves job results for one or more buckets.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Scheduled Events</titleabbrev> <titleabbrev>Get Scheduled Events</titleabbrev>
++++ ++++
This API enables you to retrieve information about the scheduled events in Retrieves information about the scheduled events in
calendars. calendars.

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Calendars</titleabbrev> <titleabbrev>Get Calendars</titleabbrev>
++++ ++++
This API enables you to retrieve configuration information for calendars. Retrieves configuration information for calendars.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Categories</titleabbrev> <titleabbrev>Get Categories</titleabbrev>
++++ ++++
This API enables you to retrieve job results for one or more categories. Retrieves job results for one or more categories.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get {dfeed-cap} Statistics</titleabbrev> <titleabbrev>Get {dfeed-cap} Statistics</titleabbrev>
++++ ++++
This API enables you to retrieve usage information for {dfeeds}. Retrieves usage information for {dfeeds}.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get {dfeeds-cap}</titleabbrev> <titleabbrev>Get {dfeeds-cap}</titleabbrev>
++++ ++++
This API enables you to retrieve configuration information for {dfeeds}. Retrieves configuration information for {dfeeds}.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Influencers</titleabbrev> <titleabbrev>Get Influencers</titleabbrev>
++++ ++++
This API enables you to retrieve job results for one or more influencers. Retrieves job results for one or more influencers.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Job Statistics</titleabbrev> <titleabbrev>Get Job Statistics</titleabbrev>
++++ ++++
This API enables you to retrieve usage information for jobs. Retrieves usage information for jobs.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Jobs</titleabbrev> <titleabbrev>Get Jobs</titleabbrev>
++++ ++++
This API enables you to retrieve configuration information for jobs. Retrieves configuration information for jobs.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Overall Buckets</titleabbrev> <titleabbrev>Get Overall Buckets</titleabbrev>
++++ ++++
This API enables you to retrieve overall bucket results that summarize the Retrieves overall bucket results that summarize the
bucket results of multiple jobs. bucket results of multiple jobs.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Records</titleabbrev> <titleabbrev>Get Records</titleabbrev>
++++ ++++
This API enables you to retrieve anomaly records for a job. Retrieves anomaly records for a job.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Get Model Snapshots</titleabbrev> <titleabbrev>Get Model Snapshots</titleabbrev>
++++ ++++
This API enables you to retrieve information about model snapshots. Retrieves information about model snapshots.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Open Jobs</titleabbrev> <titleabbrev>Open Jobs</titleabbrev>
++++ ++++
This API enables you to open one or more jobs. Opens one or more jobs.
A job must be opened in order for it to be ready to receive and analyze data. A job must be opened in order for it to be ready to receive and analyze data.
A job can be opened and closed multiple times throughout its lifecycle. A job can be opened and closed multiple times throughout its lifecycle.

View File

@ -5,7 +5,7 @@
<titleabbrev>Add Events to Calendar</titleabbrev> <titleabbrev>Add Events to Calendar</titleabbrev>
++++ ++++
This API enables you to post scheduled events in a calendar. Posts scheduled events in a calendar.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Post Data to Jobs</titleabbrev> <titleabbrev>Post Data to Jobs</titleabbrev>
++++ ++++
This API enables you to send data to an anomaly detection job for analysis. Sends data to an anomaly detection job for analysis.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Preview {dfeeds-cap}</titleabbrev> <titleabbrev>Preview {dfeeds-cap}</titleabbrev>
++++ ++++
This API enables you to preview a {dfeed}. Previews a {dfeed}.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Add Jobs to Calendar</titleabbrev> <titleabbrev>Add Jobs to Calendar</titleabbrev>
++++ ++++
This API enables you to add a job to a calendar. Adds a job to a calendar.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Create Calendar</titleabbrev> <titleabbrev>Create Calendar</titleabbrev>
++++ ++++
This API enables you to instantiate a calendar. Instantiates a calendar.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Create {dfeeds-cap}</titleabbrev> <titleabbrev>Create {dfeeds-cap}</titleabbrev>
++++ ++++
This API enables you to instantiate a {dfeed}. Instantiates a {dfeed}.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Create Jobs</titleabbrev> <titleabbrev>Create Jobs</titleabbrev>
++++ ++++
This API enables you to instantiate a job. Instantiates a job.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Revert Model Snapshots</titleabbrev> <titleabbrev>Revert Model Snapshots</titleabbrev>
++++ ++++
This API enables you to revert to a specific snapshot. Reverts to a specific snapshot.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Start {dfeeds-cap}</titleabbrev> <titleabbrev>Start {dfeeds-cap}</titleabbrev>
++++ ++++
This API enables you to start one or more {dfeeds}. Starts one or more {dfeeds}.
A {dfeed} must be started in order to retrieve data from {es}. A {dfeed} must be started in order to retrieve data from {es}.
A {dfeed} can be started and stopped multiple times throughout its lifecycle. A {dfeed} can be started and stopped multiple times throughout its lifecycle.

View File

@ -5,7 +5,7 @@
<titleabbrev>Stop {dfeeds-cap}</titleabbrev> <titleabbrev>Stop {dfeeds-cap}</titleabbrev>
++++ ++++
This API enables you to stop one or more {dfeeds}. Stops one or more {dfeeds}.
A {dfeed} that is stopped ceases to retrieve data from {es}. A {dfeed} that is stopped ceases to retrieve data from {es}.
A {dfeed} can be started and stopped multiple times throughout its lifecycle. A {dfeed} can be started and stopped multiple times throughout its lifecycle.

View File

@ -5,7 +5,7 @@
<titleabbrev>Update {dfeeds-cap}</titleabbrev> <titleabbrev>Update {dfeeds-cap}</titleabbrev>
++++ ++++
This API enables you to update certain properties of a {dfeed}. Updates certain properties of a {dfeed}.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Update Jobs</titleabbrev> <titleabbrev>Update Jobs</titleabbrev>
++++ ++++
This API enables you to update certain properties of a job. Updates certain properties of a job.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Update Model Snapshots</titleabbrev> <titleabbrev>Update Model Snapshots</titleabbrev>
++++ ++++
This API enables you to update certain properties of a snapshot. Updates certain properties of a snapshot.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Validate Detectors </titleabbrev> <titleabbrev>Validate Detectors </titleabbrev>
++++ ++++
This API validates detector configuration information. Validates detector configuration information.
==== Request ==== Request

View File

@ -5,7 +5,7 @@
<titleabbrev>Validate Jobs</titleabbrev> <titleabbrev>Validate Jobs</titleabbrev>
++++ ++++
This API validates job configuration information. Validates job configuration information.
==== Request ==== Request

View File

@ -5,9 +5,37 @@
<titleabbrev>Delete Job</titleabbrev> <titleabbrev>Delete Job</titleabbrev>
++++ ++++
experimental[]
This API deletes an existing rollup job. The job can be started or stopped, in both cases it will be deleted. Attempting This API deletes an existing rollup job. The job can be started or stopped, in both cases it will be deleted. Attempting
to delete a non-existing job will throw an exception to delete a non-existing job will throw an exception
.Deleting the job does not delete rolled up data
**********************************
When a job is deleted, that only removes the process that is actively monitoring and rolling up data.
It does not delete any previously rolled up data. This is by design; a user may wish to roll up a static dataset. Because
the dataset is static, once it has been fully rolled up there is no need to keep the indexing Rollup job around (as there
will be no new data). So the job may be deleted, leaving behind the rolled up data for analysis.
If you wish to also remove the rollup data, and the rollup index only contains the data for a single job, you can simply
delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a Delete-By-Query that
targets the Rollup job's ID in the rollup index:
[source,js]
--------------------------------------------------
POST my_rollup_index/_delete_by_query
{
"query": {
"term": {
"_rollup.id": "the_rollup_job_id"
}
}
}
--------------------------------------------------
// NOTCONSOLE
**********************************
==== Request ==== Request
`DELETE _xpack/rollup/job/<job_id>` `DELETE _xpack/rollup/job/<job_id>`

View File

@ -5,6 +5,8 @@
<titleabbrev>Get Job</titleabbrev> <titleabbrev>Get Job</titleabbrev>
++++ ++++
experimental[]
This API returns the configuration, stats and status of rollup jobs. The API can return the details for a single job, This API returns the configuration, stats and status of rollup jobs. The API can return the details for a single job,
or for all jobs. or for all jobs.

View File

@ -5,6 +5,8 @@
<titleabbrev>Create Job</titleabbrev> <titleabbrev>Create Job</titleabbrev>
++++ ++++
experimental[]
This API enables you to create a rollup job. The job will be created in a `STOPPED` state, and must be This API enables you to create a rollup job. The job will be created in a `STOPPED` state, and must be
started with the <<rollup-start-job,Start Job API>>. started with the <<rollup-start-job,Start Job API>>.

View File

@ -5,6 +5,8 @@
<titleabbrev>Get Rollup Caps</titleabbrev> <titleabbrev>Get Rollup Caps</titleabbrev>
++++ ++++
experimental[]
This API returns the rollup capabilities that have been configured for an index or index pattern. This API is useful This API returns the rollup capabilities that have been configured for an index or index pattern. This API is useful
because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only
certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on

View File

@ -2,6 +2,8 @@
[[rollup-job-config]] [[rollup-job-config]]
=== Rollup Job Configuration === Rollup Job Configuration
experimental[]
The Rollup Job Configuration contains all the details about how the rollup job should run, when it indexes documents, The Rollup Job Configuration contains all the details about how the rollup job should run, when it indexes documents,
and what future queries will be able to execute against the rollup index. and what future queries will be able to execute against the rollup index.

View File

@ -5,6 +5,8 @@
<titleabbrev>Rollup Search</titleabbrev> <titleabbrev>Rollup Search</titleabbrev>
++++ ++++
experimental[]
The Rollup Search endpoint allows searching rolled-up data using the standard query DSL. The Rollup Search endpoint The Rollup Search endpoint allows searching rolled-up data using the standard query DSL. The Rollup Search endpoint
is needed because, internally, rolled-up documents utilize a different document structure than the original data. The is needed because, internally, rolled-up documents utilize a different document structure than the original data. The
Rollup Search endpoint rewrites standard query DSL into a format that matches the rollup documents, then takes the response Rollup Search endpoint rewrites standard query DSL into a format that matches the rollup documents, then takes the response

View File

@ -5,6 +5,8 @@
<titleabbrev>Start Job</titleabbrev> <titleabbrev>Start Job</titleabbrev>
++++ ++++
experimental[]
This API starts an existing, stopped rollup job. If the job does not exist an exception will be thrown. This API starts an existing, stopped rollup job. If the job does not exist an exception will be thrown.
Starting an already started job has no action. Starting an already started job has no action.

View File

@ -5,6 +5,8 @@
<titleabbrev>Stop Job</titleabbrev> <titleabbrev>Stop Job</titleabbrev>
++++ ++++
experimental[]
This API stops an existing, started rollup job. If the job does not exist an exception will be thrown. This API stops an existing, started rollup job. If the job does not exist an exception will be thrown.
Stopping an already stopped job has no action. Stopping an already stopped job has no action.

View File

@ -1,6 +1,8 @@
[[rollup-api-quickref]] [[rollup-api-quickref]]
== API Quick Reference == API Quick Reference
experimental[]
Most {rollup} endpoints have the following base: Most {rollup} endpoints have the following base:
[source,js] [source,js]

View File

@ -18,7 +18,8 @@ for analysis, but at a fraction of the storage cost of raw data.
* <<rollup-getting-started,Getting Started>> * <<rollup-getting-started,Getting Started>>
* <<rollup-api-quickref, API Quick Reference>> * <<rollup-api-quickref, API Quick Reference>>
* <<rollup-understanding-groups,Understanding Rollup Grouping>> * <<rollup-understanding-groups,Understanding Rollup Grouping>>
* <<rollup-search-limitations,Limitations of Rollup Search>> * <<rollup-agg-limitations,Rollup aggregation limitations>>
* <<rollup-search-limitations,Rollup Search limitations>>
-- --
@ -27,4 +28,5 @@ include::overview.asciidoc[]
include::api-quickref.asciidoc[] include::api-quickref.asciidoc[]
include::rollup-getting-started.asciidoc[] include::rollup-getting-started.asciidoc[]
include::understanding-groups.asciidoc[] include::understanding-groups.asciidoc[]
include::rollup-agg-limitations.asciidoc[]
include::rollup-search-limitations.asciidoc[] include::rollup-search-limitations.asciidoc[]

View File

@ -1,6 +1,8 @@
[[rollup-overview]] [[rollup-overview]]
== Overview == Overview
experimental[]
Time-based data (documents that are predominantly identified by their timestamp) often have associated retention policies Time-based data (documents that are predominantly identified by their timestamp) often have associated retention policies
to manage data growth. For example, your system may be generating 500,000 documents every second. That will generate to manage data growth. For example, your system may be generating 500,000 documents every second. That will generate
43 million documents per day, and nearly 16 billion documents a year. 43 million documents per day, and nearly 16 billion documents a year.

View File

@ -0,0 +1,24 @@
[[rollup-agg-limitations]]
== Rollup Aggregation Limitations
experimental[]
There are some limitations to how fields can be rolled up / aggregated. This page highlights the major limitations so that
you are aware of them.
[float]
=== Limited aggregation components
The Rollup functionality allows fields to be grouped with the following aggregations:
- Date Histogram aggregation
- Histogram aggregation
- Terms aggregation
And the following metrics are allowed to be specified for numeric fields:
- Min aggregation
- Max aggregation
- Sum aggregation
- Average aggregation
- Value Count aggregation

View File

@ -1,6 +1,8 @@
[[rollup-getting-started]] [[rollup-getting-started]]
== Getting Started == Getting Started
experimental[]
To use the Rollup feature, you need to create one or more "Rollup Jobs". These jobs run continuously in the background To use the Rollup feature, you need to create one or more "Rollup Jobs". These jobs run continuously in the background
and rollup the index or indices that you specify, placing the rolled documents in a secondary index (also of your choosing). and rollup the index or indices that you specify, placing the rolled documents in a secondary index (also of your choosing).

View File

@ -1,6 +1,8 @@
[[rollup-search-limitations]] [[rollup-search-limitations]]
== Rollup Search Limitations == Rollup Search Limitations
experimental[]
While we feel the Rollup function is extremely flexible, the nature of summarizing data means there will be some limitations. Once While we feel the Rollup function is extremely flexible, the nature of summarizing data means there will be some limitations. Once
live data is thrown away, you will always lose some flexibility. live data is thrown away, you will always lose some flexibility.
@ -100,8 +102,8 @@ The Rollup functionality allows `query`'s in the search request, but with a limi
- MatchAll Query - MatchAll Query
- Any compound query (Boolean, Boosting, ConstantScore, etc) - Any compound query (Boolean, Boosting, ConstantScore, etc)
Furthermore, these queries can only use fields that were also saved in the rollup job. If you wish to filter on a keyword `hostname` field, Furthermore, these queries can only use fields that were also saved in the rollup job as a `group`.
that field must have been configured in the rollup job under a `terms` grouping. If you wish to filter on a keyword `hostname` field, that field must have been configured in the rollup job under a `terms` grouping.
If you attempt to use an unsupported query, or the query references a field that wasn't configured in the rollup job, an exception will be If you attempt to use an unsupported query, or the query references a field that wasn't configured in the rollup job, an exception will be
thrown. We expect the list of support queries to grow over time as more are implemented. thrown. We expect the list of support queries to grow over time as more are implemented.

View File

@ -1,6 +1,8 @@
[[rollup-understanding-groups]] [[rollup-understanding-groups]]
== Understanding Groups == Understanding Groups
experimental[]
To preserve flexibility, Rollup Jobs are defined based on how future queries may need to use the data. Traditionally, systems force To preserve flexibility, Rollup Jobs are defined based on how future queries may need to use the data. Traditionally, systems force
the admin to make decisions about what metrics to rollup and on what interval. E.g. The average of `cpu_time` on an hourly basis. This the admin to make decisions about what metrics to rollup and on what interval. E.g. The average of `cpu_time` on an hourly basis. This
is limiting; if, at a future date, the admin wishes to see the average of `cpu_time` on an hourly basis _and partitioned by `host_name`_, is limiting; if, at a future date, the admin wishes to see the average of `cpu_time` on an hourly basis _and partitioned by `host_name`_,

View File

@ -115,7 +115,6 @@ import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField;
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl;
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapper; import org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapper;
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor;
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions;
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache;
import org.elasticsearch.xpack.security.authz.store.FileRolesStore; import org.elasticsearch.xpack.security.authz.store.FileRolesStore;
@ -174,6 +173,7 @@ import org.elasticsearch.xpack.security.authz.SecuritySearchOperationListener;
import org.elasticsearch.xpack.security.authz.accesscontrol.OptOutQueryCache; import org.elasticsearch.xpack.security.authz.accesscontrol.OptOutQueryCache;
import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore;
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor;
import org.elasticsearch.xpack.security.rest.SecurityRestFilter; import org.elasticsearch.xpack.security.rest.SecurityRestFilter;
import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction; import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction;
import org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction; import org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction;

View File

@ -3,7 +3,7 @@
* or more contributor license agreements. Licensed under the Elastic License; * or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License. * you may not use this file except in compliance with the Elastic License.
*/ */
package org.elasticsearch.xpack.core.security.authz.accesscontrol; package org.elasticsearch.xpack.security.ingest;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.AbstractProcessor;

View File

@ -3,11 +3,11 @@
* or more contributor license agreements. Licensed under the Elastic License; * or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License. * you may not use this file except in compliance with the Elastic License.
*/ */
package org.elasticsearch.xpack.core.security.authz.accesscontrol; package org.elasticsearch.xpack.security.ingest;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor.Property; import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor.Property;
import java.util.Arrays; import java.util.Arrays;
import java.util.EnumSet; import java.util.EnumSet;

View File

@ -3,7 +3,7 @@
* or more contributor license agreements. Licensed under the Elastic License; * or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License. * you may not use this file except in compliance with the Elastic License.
*/ */
package org.elasticsearch.xpack.security.authz.accesscontrol; package org.elasticsearch.xpack.security.ingest;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
@ -11,9 +11,8 @@ import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication;
import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authc.AuthenticationField;
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor;
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor.Property;
import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.User;
import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor.Property;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet; import java.util.EnumSet;

View File

@ -5,7 +5,7 @@ esplugin {
name 'x-pack-sql' name 'x-pack-sql'
description 'The Elasticsearch plugin that powers SQL for Elasticsearch' description 'The Elasticsearch plugin that powers SQL for Elasticsearch'
classname 'org.elasticsearch.xpack.sql.plugin.SqlPlugin' classname 'org.elasticsearch.xpack.sql.plugin.SqlPlugin'
extendedPlugins = ['x-pack-core'] extendedPlugins = ['x-pack-core', 'lang-painless']
} }
configurations { configurations {
@ -20,6 +20,7 @@ integTest.enabled = false
dependencies { dependencies {
compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" compileOnly "org.elasticsearch.plugin:x-pack-core:${version}"
compileOnly project(':modules:lang-painless')
compile project('sql-proto') compile project('sql-proto')
compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"
compile "org.antlr:antlr4-runtime:4.5.3" compile "org.antlr:antlr4-runtime:4.5.3"

View File

@ -1 +0,0 @@
2609e36f18f7e8d593cc1cddfb2ac776dc96b8e0

View File

@ -1,26 +0,0 @@
[The "BSD license"]
Copyright (c) 2015 Terence Parr, Sam Harwell
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -51,8 +51,18 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
protected final NodeInfo<DateTimeFunction> info() { protected final NodeInfo<DateTimeFunction> info() {
return NodeInfo.create(this, ctorForInfo(), field(), timeZone()); return NodeInfo.create(this, ctorForInfo(), field(), timeZone());
} }
protected abstract NodeInfo.NodeCtor2<Expression, TimeZone, DateTimeFunction> ctorForInfo(); protected abstract NodeInfo.NodeCtor2<Expression, TimeZone, DateTimeFunction> ctorForInfo();
@Override
protected TypeResolution resolveType() {
if (field().dataType() == DataType.DATE) {
return TypeResolution.TYPE_RESOLVED;
}
return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression (["
+ Expressions.name(field()) + "] of type [" + field().dataType().esType + "])");
}
public TimeZone timeZone() { public TimeZone timeZone() {
return timeZone; return timeZone;
} }
@ -69,18 +79,12 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
return null; return null;
} }
ZonedDateTime time = ZonedDateTime.ofInstant( return dateTimeChrono(folded.getMillis(), timeZone.getID(), chronoField().name());
Instant.ofEpochMilli(folded.getMillis()), ZoneId.of(timeZone.getID()));
return time.get(chronoField());
} }
@Override public static Integer dateTimeChrono(long millis, String tzId, String chronoName) {
protected TypeResolution resolveType() { ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId));
if (field().dataType() == DataType.DATE) { return Integer.valueOf(time.get(ChronoField.valueOf(chronoName)));
return TypeResolution.TYPE_RESOLVED;
}
return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression (["
+ Expressions.name(field()) + "] of type [" + field().dataType().esType + "])");
} }
@Override @Override
@ -88,28 +92,11 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
ParamsBuilder params = paramsBuilder(); ParamsBuilder params = paramsBuilder();
String template = null; String template = null;
if (TimeZone.getTimeZone("UTC").equals(timeZone)) { template = formatTemplate("{sql}.dateTimeChrono(doc[{}].value.millis, {}, {})");
// TODO: it would be nice to be able to externalize the extract function and reuse the script across all extractors params.variable(field.name())
template = formatTemplate("doc[{}].value.get" + extractFunction() + "()"); .variable(timeZone.getID())
params.variable(field.name()); .variable(chronoField().name());
} else {
// TODO ewwww
/*
* This uses the Java 8 time API because Painless doesn't whitelist creation of new
* Joda classes.
*
* The actual script is
* ZonedDateTime.ofInstant(Instant.ofEpochMilli(<insert doc field>.value.millis),
* ZoneId.of(<insert user tz>)).get(ChronoField.get(MONTH_OF_YEAR))
*/
template = formatTemplate("ZonedDateTime.ofInstant(Instant.ofEpochMilli(doc[{}].value.millis), "
+ "ZoneId.of({})).get(ChronoField.valueOf({}))");
params.variable(field.name())
.variable(timeZone.getID())
.variable(chronoField().name());
}
return new ScriptTemplate(template, params.build(), dataType()); return new ScriptTemplate(template, params.build(), dataType());
} }
@ -119,10 +106,6 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
protected String extractFunction() {
return getClass().getSimpleName();
}
/** /**
* Used for generating the painless script version of this function when the time zone is not UTC * Used for generating the painless script version of this function when the time zone is not UTC
*/ */
@ -164,4 +147,4 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
public int hashCode() { public int hashCode() {
return Objects.hash(field(), timeZone); return Objects.hash(field(), timeZone);
} }
} }

View File

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.script;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
import org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalSqlScriptUtils;
import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataType;
import org.elasticsearch.xpack.sql.util.StringUtils; import org.elasticsearch.xpack.sql.util.StringUtils;
@ -92,6 +93,6 @@ public class ScriptTemplate {
} }
public static String formatTemplate(String template) { public static String formatTemplate(String template) {
return template.replace("{}", "params.%s"); return template.replace("{sql}", InternalSqlScriptUtils.class.getSimpleName()).replace("{}", "params.%s");
} }
} }

View File

@ -0,0 +1,22 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.expression.function.scalar.whitelist;
import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction;
/**
* Whitelisted class for SQL scripts.
* Acts as a registry of the various static methods used <b>internally</b> by the scalar functions
* (to simplify the whitelist definition).
*/
public final class InternalSqlScriptUtils {
private InternalSqlScriptUtils() {}
public static Integer dateTimeChrono(long millis, String tzId, String chronoName) {
return DateTimeFunction.dateTimeChrono(millis, tzId, chronoName);
}
}

View File

@ -0,0 +1,35 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.sql.plugin;
import org.elasticsearch.painless.spi.PainlessExtension;
import org.elasticsearch.painless.spi.Whitelist;
import org.elasticsearch.painless.spi.WhitelistLoader;
import org.elasticsearch.script.FilterScript;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.SearchScript;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.util.Collections.singletonList;
public class SqlPainlessExtension implements PainlessExtension {
private static final Whitelist WHITELIST = WhitelistLoader.loadFromResourceFiles(SqlPainlessExtension.class, "sql_whitelist.txt");
@Override
public Map<ScriptContext<?>, List<Whitelist>> getContextWhitelists() {
Map<ScriptContext<?>, List<Whitelist>> whitelist = new HashMap<>();
List<Whitelist> list = singletonList(WHITELIST);
whitelist.put(FilterScript.CONTEXT, list);
whitelist.put(SearchScript.AGGS_CONTEXT, list);
whitelist.put(SearchScript.CONTEXT, list);
whitelist.put(SearchScript.SCRIPT_SORT_CONTEXT, list);
return whitelist;
}
}

View File

@ -0,0 +1 @@
org.elasticsearch.xpack.sql.plugin.SqlPainlessExtension

View File

@ -0,0 +1,12 @@
#
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
#
# This file contains a whitelist for SQL specific utilities available inside SQL scripting
class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalSqlScriptUtils {
Integer dateTimeChrono(long, String, String)
}