Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
04e0d0a5e7
|
@ -131,7 +131,7 @@ task verifyVersions {
|
|||
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
|
||||
xml = new XmlParser().parse(s)
|
||||
}
|
||||
Set<Version> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ }.collect { Version.fromString(it) })
|
||||
Set<Version> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ }.collect { Version.fromString(it) })
|
||||
|
||||
// Limit the known versions to those that should be index compatible, and are not future versions
|
||||
knownVersions = knownVersions.findAll { it.major >= bwcVersions.currentVersion.major - 1 && it.before(VersionProperties.elasticsearch) }
|
||||
|
|
|
@ -2,7 +2,7 @@ call "%~dp0elasticsearch-env.bat" || exit /b 1
|
|||
|
||||
if defined ES_ADDITIONAL_SOURCES (
|
||||
for %%a in ("%ES_ADDITIONAL_SOURCES:;=","%") do (
|
||||
call %~dp0%%a
|
||||
call "%~dp0%%a"
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -120,6 +120,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_10_ID = 5061099;
|
||||
public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_11_ID = 5061199;
|
||||
public static final Version V_5_6_11 = new Version(V_5_6_11_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 =
|
||||
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
|
@ -232,6 +234,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
return V_6_0_0_alpha2;
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_6_11_ID:
|
||||
return V_5_6_11;
|
||||
case V_5_6_10_ID:
|
||||
return V_5_6_10;
|
||||
case V_5_6_9_ID:
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Close Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to close one or more jobs.
|
||||
Closes one or more jobs.
|
||||
A job can be opened and closed multiple times throughout its lifecycle.
|
||||
|
||||
A closed job cannot receive data or perform analysis
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Delete Events from Calendar</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to delete scheduled events from a calendar.
|
||||
Deletes scheduled events from a calendar.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Delete Jobs from Calendar</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to delete jobs from a calendar.
|
||||
Deletes jobs from a calendar.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Delete Calendar</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to delete a calendar.
|
||||
Deletes a calendar.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Delete {dfeeds-cap}</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to delete an existing {dfeed}.
|
||||
Deletes an existing {dfeed}.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Delete Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to delete an existing anomaly detection job.
|
||||
Deletes an existing anomaly detection job.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Delete Model Snapshots</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to delete an existing model snapshot.
|
||||
Deletes an existing model snapshot.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Flush Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API forces any buffered data to be processed by the job.
|
||||
Forces any buffered data to be processed by the job.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,8 +5,7 @@
|
|||
<titleabbrev>Forecast Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API uses historical behavior to predict the future behavior of a time
|
||||
series.
|
||||
Predict the future behavior of a time series by using historical behavior.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Buckets</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve job results for one or more buckets.
|
||||
Retrieves job results for one or more buckets.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Scheduled Events</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve information about the scheduled events in
|
||||
Retrieves information about the scheduled events in
|
||||
calendars.
|
||||
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Calendars</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve configuration information for calendars.
|
||||
Retrieves configuration information for calendars.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Categories</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve job results for one or more categories.
|
||||
Retrieves job results for one or more categories.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get {dfeed-cap} Statistics</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve usage information for {dfeeds}.
|
||||
Retrieves usage information for {dfeeds}.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get {dfeeds-cap}</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve configuration information for {dfeeds}.
|
||||
Retrieves configuration information for {dfeeds}.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Influencers</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve job results for one or more influencers.
|
||||
Retrieves job results for one or more influencers.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Job Statistics</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve usage information for jobs.
|
||||
Retrieves usage information for jobs.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve configuration information for jobs.
|
||||
Retrieves configuration information for jobs.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Overall Buckets</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve overall bucket results that summarize the
|
||||
Retrieves overall bucket results that summarize the
|
||||
bucket results of multiple jobs.
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Records</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve anomaly records for a job.
|
||||
Retrieves anomaly records for a job.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Get Model Snapshots</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to retrieve information about model snapshots.
|
||||
Retrieves information about model snapshots.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Open Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to open one or more jobs.
|
||||
Opens one or more jobs.
|
||||
A job must be opened in order for it to be ready to receive and analyze data.
|
||||
A job can be opened and closed multiple times throughout its lifecycle.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Add Events to Calendar</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to post scheduled events in a calendar.
|
||||
Posts scheduled events in a calendar.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Post Data to Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to send data to an anomaly detection job for analysis.
|
||||
Sends data to an anomaly detection job for analysis.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Preview {dfeeds-cap}</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to preview a {dfeed}.
|
||||
Previews a {dfeed}.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Add Jobs to Calendar</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to add a job to a calendar.
|
||||
Adds a job to a calendar.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Create Calendar</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to instantiate a calendar.
|
||||
Instantiates a calendar.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Create {dfeeds-cap}</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to instantiate a {dfeed}.
|
||||
Instantiates a {dfeed}.
|
||||
|
||||
|
||||
==== Request
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Create Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to instantiate a job.
|
||||
Instantiates a job.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Revert Model Snapshots</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to revert to a specific snapshot.
|
||||
Reverts to a specific snapshot.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Start {dfeeds-cap}</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to start one or more {dfeeds}.
|
||||
Starts one or more {dfeeds}.
|
||||
A {dfeed} must be started in order to retrieve data from {es}.
|
||||
A {dfeed} can be started and stopped multiple times throughout its lifecycle.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Stop {dfeeds-cap}</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to stop one or more {dfeeds}.
|
||||
Stops one or more {dfeeds}.
|
||||
|
||||
A {dfeed} that is stopped ceases to retrieve data from {es}.
|
||||
A {dfeed} can be started and stopped multiple times throughout its lifecycle.
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Update {dfeeds-cap}</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to update certain properties of a {dfeed}.
|
||||
Updates certain properties of a {dfeed}.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Update Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to update certain properties of a job.
|
||||
Updates certain properties of a job.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Update Model Snapshots</titleabbrev>
|
||||
++++
|
||||
|
||||
This API enables you to update certain properties of a snapshot.
|
||||
Updates certain properties of a snapshot.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Validate Detectors </titleabbrev>
|
||||
++++
|
||||
|
||||
This API validates detector configuration information.
|
||||
Validates detector configuration information.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
<titleabbrev>Validate Jobs</titleabbrev>
|
||||
++++
|
||||
|
||||
This API validates job configuration information.
|
||||
Validates job configuration information.
|
||||
|
||||
==== Request
|
||||
|
||||
|
|
|
@ -5,9 +5,37 @@
|
|||
<titleabbrev>Delete Job</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
This API deletes an existing rollup job. The job can be started or stopped, in both cases it will be deleted. Attempting
|
||||
to delete a non-existing job will throw an exception
|
||||
|
||||
.Deleting the job does not delete rolled up data
|
||||
**********************************
|
||||
When a job is deleted, that only removes the process that is actively monitoring and rolling up data.
|
||||
It does not delete any previously rolled up data. This is by design; a user may wish to roll up a static dataset. Because
|
||||
the dataset is static, once it has been fully rolled up there is no need to keep the indexing Rollup job around (as there
|
||||
will be no new data). So the job may be deleted, leaving behind the rolled up data for analysis.
|
||||
|
||||
If you wish to also remove the rollup data, and the rollup index only contains the data for a single job, you can simply
|
||||
delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a Delete-By-Query that
|
||||
targets the Rollup job's ID in the rollup index:
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST my_rollup_index/_delete_by_query
|
||||
{
|
||||
"query": {
|
||||
"term": {
|
||||
"_rollup.id": "the_rollup_job_id"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
**********************************
|
||||
==== Request
|
||||
|
||||
`DELETE _xpack/rollup/job/<job_id>`
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
<titleabbrev>Get Job</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
This API returns the configuration, stats and status of rollup jobs. The API can return the details for a single job,
|
||||
or for all jobs.
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
<titleabbrev>Create Job</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
This API enables you to create a rollup job. The job will be created in a `STOPPED` state, and must be
|
||||
started with the <<rollup-start-job,Start Job API>>.
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
<titleabbrev>Get Rollup Caps</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
This API returns the rollup capabilities that have been configured for an index or index pattern. This API is useful
|
||||
because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only
|
||||
certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
[[rollup-job-config]]
|
||||
=== Rollup Job Configuration
|
||||
|
||||
experimental[]
|
||||
|
||||
The Rollup Job Configuration contains all the details about how the rollup job should run, when it indexes documents,
|
||||
and what future queries will be able to execute against the rollup index.
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
<titleabbrev>Rollup Search</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
The Rollup Search endpoint allows searching rolled-up data using the standard query DSL. The Rollup Search endpoint
|
||||
is needed because, internally, rolled-up documents utilize a different document structure than the original data. The
|
||||
Rollup Search endpoint rewrites standard query DSL into a format that matches the rollup documents, then takes the response
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
<titleabbrev>Start Job</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
This API starts an existing, stopped rollup job. If the job does not exist an exception will be thrown.
|
||||
Starting an already started job has no action.
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
<titleabbrev>Stop Job</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
This API stops an existing, started rollup job. If the job does not exist an exception will be thrown.
|
||||
Stopping an already stopped job has no action.
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
[[rollup-api-quickref]]
|
||||
== API Quick Reference
|
||||
|
||||
experimental[]
|
||||
|
||||
Most {rollup} endpoints have the following base:
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -18,7 +18,8 @@ for analysis, but at a fraction of the storage cost of raw data.
|
|||
* <<rollup-getting-started,Getting Started>>
|
||||
* <<rollup-api-quickref, API Quick Reference>>
|
||||
* <<rollup-understanding-groups,Understanding Rollup Grouping>>
|
||||
* <<rollup-search-limitations,Limitations of Rollup Search>>
|
||||
* <<rollup-agg-limitations,Rollup aggregation limitations>>
|
||||
* <<rollup-search-limitations,Rollup Search limitations>>
|
||||
|
||||
|
||||
--
|
||||
|
@ -27,4 +28,5 @@ include::overview.asciidoc[]
|
|||
include::api-quickref.asciidoc[]
|
||||
include::rollup-getting-started.asciidoc[]
|
||||
include::understanding-groups.asciidoc[]
|
||||
include::rollup-agg-limitations.asciidoc[]
|
||||
include::rollup-search-limitations.asciidoc[]
|
|
@ -1,6 +1,8 @@
|
|||
[[rollup-overview]]
|
||||
== Overview
|
||||
|
||||
experimental[]
|
||||
|
||||
Time-based data (documents that are predominantly identified by their timestamp) often have associated retention policies
|
||||
to manage data growth. For example, your system may be generating 500,000 documents every second. That will generate
|
||||
43 million documents per day, and nearly 16 billion documents a year.
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
[[rollup-agg-limitations]]
|
||||
== Rollup Aggregation Limitations
|
||||
|
||||
experimental[]
|
||||
|
||||
There are some limitations to how fields can be rolled up / aggregated. This page highlights the major limitations so that
|
||||
you are aware of them.
|
||||
|
||||
[float]
|
||||
=== Limited aggregation components
|
||||
|
||||
The Rollup functionality allows fields to be grouped with the following aggregations:
|
||||
|
||||
- Date Histogram aggregation
|
||||
- Histogram aggregation
|
||||
- Terms aggregation
|
||||
|
||||
And the following metrics are allowed to be specified for numeric fields:
|
||||
|
||||
- Min aggregation
|
||||
- Max aggregation
|
||||
- Sum aggregation
|
||||
- Average aggregation
|
||||
- Value Count aggregation
|
|
@ -1,6 +1,8 @@
|
|||
[[rollup-getting-started]]
|
||||
== Getting Started
|
||||
|
||||
experimental[]
|
||||
|
||||
To use the Rollup feature, you need to create one or more "Rollup Jobs". These jobs run continuously in the background
|
||||
and rollup the index or indices that you specify, placing the rolled documents in a secondary index (also of your choosing).
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
[[rollup-search-limitations]]
|
||||
== Rollup Search Limitations
|
||||
|
||||
experimental[]
|
||||
|
||||
While we feel the Rollup function is extremely flexible, the nature of summarizing data means there will be some limitations. Once
|
||||
live data is thrown away, you will always lose some flexibility.
|
||||
|
||||
|
@ -100,8 +102,8 @@ The Rollup functionality allows `query`'s in the search request, but with a limi
|
|||
- MatchAll Query
|
||||
- Any compound query (Boolean, Boosting, ConstantScore, etc)
|
||||
|
||||
Furthermore, these queries can only use fields that were also saved in the rollup job. If you wish to filter on a keyword `hostname` field,
|
||||
that field must have been configured in the rollup job under a `terms` grouping.
|
||||
Furthermore, these queries can only use fields that were also saved in the rollup job as a `group`.
|
||||
If you wish to filter on a keyword `hostname` field, that field must have been configured in the rollup job under a `terms` grouping.
|
||||
|
||||
If you attempt to use an unsupported query, or the query references a field that wasn't configured in the rollup job, an exception will be
|
||||
thrown. We expect the list of support queries to grow over time as more are implemented.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
[[rollup-understanding-groups]]
|
||||
== Understanding Groups
|
||||
|
||||
experimental[]
|
||||
|
||||
To preserve flexibility, Rollup Jobs are defined based on how future queries may need to use the data. Traditionally, systems force
|
||||
the admin to make decisions about what metrics to rollup and on what interval. E.g. The average of `cpu_time` on an hourly basis. This
|
||||
is limiting; if, at a future date, the admin wishes to see the average of `cpu_time` on an hourly basis _and partitioned by `host_name`_,
|
||||
|
|
|
@ -115,7 +115,6 @@ import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField;
|
|||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapper;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache;
|
||||
import org.elasticsearch.xpack.security.authz.store.FileRolesStore;
|
||||
|
@ -174,6 +173,7 @@ import org.elasticsearch.xpack.security.authz.SecuritySearchOperationListener;
|
|||
import org.elasticsearch.xpack.security.authz.accesscontrol.OptOutQueryCache;
|
||||
import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
|
||||
import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor;
|
||||
import org.elasticsearch.xpack.security.rest.SecurityRestFilter;
|
||||
import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction;
|
||||
import org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction;
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.security.authz.accesscontrol;
|
||||
package org.elasticsearch.xpack.security.ingest;
|
||||
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.ingest.AbstractProcessor;
|
|
@ -3,11 +3,11 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.security.authz.accesscontrol;
|
||||
package org.elasticsearch.xpack.security.ingest;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor.Property;
|
||||
import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor.Property;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
|
@ -3,7 +3,7 @@
|
|||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.accesscontrol;
|
||||
package org.elasticsearch.xpack.security.ingest;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
@ -11,9 +11,8 @@ import org.elasticsearch.ingest.IngestDocument;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.core.security.authc.AuthenticationField;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor.Property;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.xpack.security.ingest.SetSecurityUserProcessor.Property;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
|
@ -5,7 +5,7 @@ esplugin {
|
|||
name 'x-pack-sql'
|
||||
description 'The Elasticsearch plugin that powers SQL for Elasticsearch'
|
||||
classname 'org.elasticsearch.xpack.sql.plugin.SqlPlugin'
|
||||
extendedPlugins = ['x-pack-core']
|
||||
extendedPlugins = ['x-pack-core', 'lang-painless']
|
||||
}
|
||||
|
||||
configurations {
|
||||
|
@ -20,6 +20,7 @@ integTest.enabled = false
|
|||
|
||||
dependencies {
|
||||
compileOnly "org.elasticsearch.plugin:x-pack-core:${version}"
|
||||
compileOnly project(':modules:lang-painless')
|
||||
compile project('sql-proto')
|
||||
compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"
|
||||
compile "org.antlr:antlr4-runtime:4.5.3"
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
2609e36f18f7e8d593cc1cddfb2ac776dc96b8e0
|
|
@ -1,26 +0,0 @@
|
|||
[The "BSD license"]
|
||||
Copyright (c) 2015 Terence Parr, Sam Harwell
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
3. The name of the author may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -51,8 +51,18 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
|
|||
protected final NodeInfo<DateTimeFunction> info() {
|
||||
return NodeInfo.create(this, ctorForInfo(), field(), timeZone());
|
||||
}
|
||||
|
||||
protected abstract NodeInfo.NodeCtor2<Expression, TimeZone, DateTimeFunction> ctorForInfo();
|
||||
|
||||
@Override
|
||||
protected TypeResolution resolveType() {
|
||||
if (field().dataType() == DataType.DATE) {
|
||||
return TypeResolution.TYPE_RESOLVED;
|
||||
}
|
||||
return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression (["
|
||||
+ Expressions.name(field()) + "] of type [" + field().dataType().esType + "])");
|
||||
}
|
||||
|
||||
public TimeZone timeZone() {
|
||||
return timeZone;
|
||||
}
|
||||
|
@ -69,18 +79,12 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
|
|||
return null;
|
||||
}
|
||||
|
||||
ZonedDateTime time = ZonedDateTime.ofInstant(
|
||||
Instant.ofEpochMilli(folded.getMillis()), ZoneId.of(timeZone.getID()));
|
||||
return time.get(chronoField());
|
||||
return dateTimeChrono(folded.getMillis(), timeZone.getID(), chronoField().name());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TypeResolution resolveType() {
|
||||
if (field().dataType() == DataType.DATE) {
|
||||
return TypeResolution.TYPE_RESOLVED;
|
||||
}
|
||||
return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression (["
|
||||
+ Expressions.name(field()) + "] of type [" + field().dataType().esType + "])");
|
||||
public static Integer dateTimeChrono(long millis, String tzId, String chronoName) {
|
||||
ZonedDateTime time = ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(tzId));
|
||||
return Integer.valueOf(time.get(ChronoField.valueOf(chronoName)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -88,28 +92,11 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
|
|||
ParamsBuilder params = paramsBuilder();
|
||||
|
||||
String template = null;
|
||||
if (TimeZone.getTimeZone("UTC").equals(timeZone)) {
|
||||
// TODO: it would be nice to be able to externalize the extract function and reuse the script across all extractors
|
||||
template = formatTemplate("doc[{}].value.get" + extractFunction() + "()");
|
||||
params.variable(field.name());
|
||||
} else {
|
||||
// TODO ewwww
|
||||
/*
|
||||
* This uses the Java 8 time API because Painless doesn't whitelist creation of new
|
||||
* Joda classes.
|
||||
*
|
||||
* The actual script is
|
||||
* ZonedDateTime.ofInstant(Instant.ofEpochMilli(<insert doc field>.value.millis),
|
||||
* ZoneId.of(<insert user tz>)).get(ChronoField.get(MONTH_OF_YEAR))
|
||||
*/
|
||||
|
||||
template = formatTemplate("ZonedDateTime.ofInstant(Instant.ofEpochMilli(doc[{}].value.millis), "
|
||||
+ "ZoneId.of({})).get(ChronoField.valueOf({}))");
|
||||
params.variable(field.name())
|
||||
.variable(timeZone.getID())
|
||||
.variable(chronoField().name());
|
||||
}
|
||||
|
||||
template = formatTemplate("{sql}.dateTimeChrono(doc[{}].value.millis, {}, {})");
|
||||
params.variable(field.name())
|
||||
.variable(timeZone.getID())
|
||||
.variable(chronoField().name());
|
||||
|
||||
return new ScriptTemplate(template, params.build(), dataType());
|
||||
}
|
||||
|
||||
|
@ -119,10 +106,6 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
protected String extractFunction() {
|
||||
return getClass().getSimpleName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Used for generating the painless script version of this function when the time zone is not UTC
|
||||
*/
|
||||
|
@ -164,4 +147,4 @@ public abstract class DateTimeFunction extends UnaryScalarFunction {
|
|||
public int hashCode() {
|
||||
return Objects.hash(field(), timeZone);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.script;
|
|||
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalSqlScriptUtils;
|
||||
import org.elasticsearch.xpack.sql.type.DataType;
|
||||
import org.elasticsearch.xpack.sql.util.StringUtils;
|
||||
|
||||
|
@ -92,6 +93,6 @@ public class ScriptTemplate {
|
|||
}
|
||||
|
||||
public static String formatTemplate(String template) {
|
||||
return template.replace("{}", "params.%s");
|
||||
return template.replace("{sql}", InternalSqlScriptUtils.class.getSimpleName()).replace("{}", "params.%s");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.expression.function.scalar.whitelist;
|
||||
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction;
|
||||
|
||||
/**
|
||||
* Whitelisted class for SQL scripts.
|
||||
* Acts as a registry of the various static methods used <b>internally</b> by the scalar functions
|
||||
* (to simplify the whitelist definition).
|
||||
*/
|
||||
public final class InternalSqlScriptUtils {
|
||||
|
||||
private InternalSqlScriptUtils() {}
|
||||
|
||||
public static Integer dateTimeChrono(long millis, String tzId, String chronoName) {
|
||||
return DateTimeFunction.dateTimeChrono(millis, tzId, chronoName);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.plugin;
|
||||
|
||||
import org.elasticsearch.painless.spi.PainlessExtension;
|
||||
import org.elasticsearch.painless.spi.Whitelist;
|
||||
import org.elasticsearch.painless.spi.WhitelistLoader;
|
||||
import org.elasticsearch.script.FilterScript;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
import org.elasticsearch.script.SearchScript;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
|
||||
public class SqlPainlessExtension implements PainlessExtension {
|
||||
|
||||
private static final Whitelist WHITELIST = WhitelistLoader.loadFromResourceFiles(SqlPainlessExtension.class, "sql_whitelist.txt");
|
||||
|
||||
@Override
|
||||
public Map<ScriptContext<?>, List<Whitelist>> getContextWhitelists() {
|
||||
Map<ScriptContext<?>, List<Whitelist>> whitelist = new HashMap<>();
|
||||
List<Whitelist> list = singletonList(WHITELIST);
|
||||
whitelist.put(FilterScript.CONTEXT, list);
|
||||
whitelist.put(SearchScript.AGGS_CONTEXT, list);
|
||||
whitelist.put(SearchScript.CONTEXT, list);
|
||||
whitelist.put(SearchScript.SCRIPT_SORT_CONTEXT, list);
|
||||
return whitelist;
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
org.elasticsearch.xpack.sql.plugin.SqlPainlessExtension
|
|
@ -0,0 +1,12 @@
|
|||
#
|
||||
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
#
|
||||
|
||||
# This file contains a whitelist for SQL specific utilities available inside SQL scripting
|
||||
|
||||
class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalSqlScriptUtils {
|
||||
|
||||
Integer dateTimeChrono(long, String, String)
|
||||
}
|
Loading…
Reference in New Issue