Merge remote-tracking branch 'es/7.x' into enrich-7.x

This commit is contained in:
Martijn van Groningen 2019-08-01 13:38:03 +07:00
commit aae2f0cff2
No known key found for this signature in database
GPG Key ID: AB236F4FCF2AF12A
493 changed files with 8490 additions and 3831 deletions

View File

@ -68,7 +68,7 @@ public class ElasticsearchDistribution implements Buildable {
} }
// package private to tests can use // package private to tests can use
static final Platform CURRENT_PLATFORM = OS.<Platform>conditional() public static final Platform CURRENT_PLATFORM = OS.<Platform>conditional()
.onLinux(() -> Platform.LINUX) .onLinux(() -> Platform.LINUX)
.onWindows(() -> Platform.WINDOWS) .onWindows(() -> Platform.WINDOWS)
.onMac(() -> Platform.DARWIN) .onMac(() -> Platform.DARWIN)

View File

@ -34,12 +34,12 @@ import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRespons
import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest;
import org.elasticsearch.client.indexlifecycle.StartILMRequest; import org.elasticsearch.client.indexlifecycle.StartILMRequest;
import org.elasticsearch.client.indexlifecycle.StopILMRequest; import org.elasticsearch.client.indexlifecycle.StopILMRequest;
import org.elasticsearch.client.snapshotlifecycle.DeleteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.DeleteSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyResponse; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyResponse;
import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyResponse; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyResponse;
import org.elasticsearch.client.snapshotlifecycle.PutSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest;
import java.io.IOException; import java.io.IOException;

View File

@ -32,10 +32,10 @@ import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest
import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest;
import org.elasticsearch.client.indexlifecycle.StartILMRequest; import org.elasticsearch.client.indexlifecycle.StartILMRequest;
import org.elasticsearch.client.indexlifecycle.StopILMRequest; import org.elasticsearch.client.indexlifecycle.StopILMRequest;
import org.elasticsearch.client.snapshotlifecycle.DeleteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.DeleteSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.PutSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import java.io.IOException; import java.io.IOException;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.client.snapshotlifecycle; package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest; import org.elasticsearch.client.TimedRequest;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.client.snapshotlifecycle; package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest; import org.elasticsearch.client.TimedRequest;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.client.snapshotlifecycle; package org.elasticsearch.client.slm;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ConstructingObjectParser;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.client.snapshotlifecycle; package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest; import org.elasticsearch.client.TimedRequest;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.client.snapshotlifecycle; package org.elasticsearch.client.slm;
import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.client.snapshotlifecycle; package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest; import org.elasticsearch.client.TimedRequest;
import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.ToXContentObject;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.client.snapshotlifecycle; package org.elasticsearch.client.slm;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ConstructingObjectParser;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.client.snapshotlifecycle; package org.elasticsearch.client.slm;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.client.snapshotlifecycle; package org.elasticsearch.client.slm;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;

View File

@ -54,15 +54,15 @@ import org.elasticsearch.client.indexlifecycle.ShrinkAction;
import org.elasticsearch.client.indexlifecycle.StartILMRequest; import org.elasticsearch.client.indexlifecycle.StartILMRequest;
import org.elasticsearch.client.indexlifecycle.StopILMRequest; import org.elasticsearch.client.indexlifecycle.StopILMRequest;
import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.client.snapshotlifecycle.DeleteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.DeleteSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.ExecuteSnapshotLifecyclePolicyResponse; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyResponse;
import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.GetSnapshotLifecyclePolicyResponse; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyResponse;
import org.elasticsearch.client.snapshotlifecycle.PutSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.snapshotlifecycle.SnapshotInvocationRecord; import org.elasticsearch.client.slm.SnapshotInvocationRecord;
import org.elasticsearch.client.snapshotlifecycle.SnapshotLifecyclePolicy; import org.elasticsearch.client.slm.SnapshotLifecyclePolicy;
import org.elasticsearch.client.snapshotlifecycle.SnapshotLifecyclePolicyMetadata; import org.elasticsearch.client.slm.SnapshotLifecyclePolicyMetadata;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;

View File

@ -4,14 +4,14 @@
:response: GetBucketsResponse :response: GetBucketsResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Buckets API === Get buckets API
The Get Buckets API retrieves one or more bucket results. Retrieves one or more bucket results.
It accepts a +{request}+ object and responds It accepts a +{request}+ object and responds
with a +{response}+ object. with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Buckets Request ==== Get buckets request
A +{request}+ object gets created with an existing non-null `jobId`. A +{request}+ object gets created with an existing non-null `jobId`.
@ -19,9 +19,9 @@ A +{request}+ object gets created with an existing non-null `jobId`.
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing an existing `jobId` <1> Constructing a new request referencing an existing `jobId`.
==== Optional Arguments ==== Optional arguments
The following arguments are optional: The following arguments are optional:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
@ -82,7 +82,7 @@ include-tagged::{doc-tests-file}[{api}-start]
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Buckets Response ==== Get buckets response
The returned +{response}+ contains the requested buckets: The returned +{response}+ contains the requested buckets:
@ -90,5 +90,5 @@ The returned +{response}+ contains the requested buckets:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of buckets that were matched <1> The count of buckets that were matched.
<2> The buckets retrieved <2> The buckets retrieved.

View File

@ -4,13 +4,13 @@
:response: GetCalendarEventsResponse :response: GetCalendarEventsResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Calendar Events API === Get calendar events API
Retrieves a calendars events. Retrieves a calendar's events.
It accepts a +{request}+ and responds It accepts a +{request}+ and responds
with a +{response}+ object. with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Calendars Request ==== Get calendars request
A +{request}+ requires a non-null calendar ID. A +{request}+ requires a non-null calendar ID.
Using the literal `_all` returns the events for all calendars. Using the literal `_all` returns the events for all calendars.
@ -19,9 +19,9 @@ Using the literal `_all` returns the events for all calendars.
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request for the specified calendarId <1> Constructing a new request for the specified calendarId.
==== Optional Arguments ==== Optional arguments
The following arguments are optional: The following arguments are optional:
@ -48,12 +48,12 @@ include-tagged::{doc-tests-file}[{api}-end]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-jobid] include-tagged::{doc-tests-file}[{api}-jobid]
-------------------------------------------------- --------------------------------------------------
<1> Get events for the job. When this option is used calendar_id must be `_all` <1> Get events for the job. When this option is used calendar_id must be `_all`.
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get calendars Response ==== Get calendars response
The returned +{response}+ contains the requested events: The returned +{response}+ contains the requested events:
@ -61,5 +61,5 @@ The returned +{response}+ contains the requested events:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of events that were matched <1> The count of events that were matched.
<2> The events retrieved <2> The events retrieved.

View File

@ -4,44 +4,45 @@
:response: GetCalendarsResponse :response: GetCalendarsResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Calendars API === Get calendars API
Retrieves one or more calendar objects. Retrieves one or more calendar objects.
It accepts a +{request}+ and responds It accepts a +{request}+ and responds
with a +{response}+ object. with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Calendars Request ==== Get calendars request
By default a +{request}+ with no calendar Id set will return all By default, a +{request}+ with no calendar ID set will return all
calendars. Using the literal `_all` also returns all calendars. calendars. Using the literal `_all` also returns all calendars.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request for all calendars <1> Constructing a new request for all calendars.
==== Optional Arguments ==== Optional arguments
The following arguments are optional: The following arguments are optional:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-id] include-tagged::{doc-tests-file}[{api}-id]
-------------------------------------------------- --------------------------------------------------
<1> Construct a request for the single calendar `holidays` <1> Construct a request for the single calendar `holidays`.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-page] include-tagged::{doc-tests-file}[{api}-page]
-------------------------------------------------- --------------------------------------------------
<1> The page parameters `from` and `size`. `from` specifies the number of calendars to skip. <1> The page parameters `from` and `size`. `from` specifies the number of
`size` specifies the maximum number of calendars to get. Defaults to `0` and `100` respectively. calendars to skip. `size` specifies the maximum number of calendars to get.
Defaults to `0` and `100` respectively.
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get calendars Response ==== Get calendars response
The returned +{response}+ contains the requested calendars: The returned +{response}+ contains the requested calendars:
@ -49,5 +50,5 @@ The returned +{response}+ contains the requested calendars:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of calendars that were matched <1> The count of calendars that were matched.
<2> The calendars retrieved <2> The calendars retrieved.

View File

@ -4,14 +4,13 @@
:response: GetCategoriesResponse :response: GetCategoriesResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Categories API === Get categories API
The Get Categories API retrieves one or more category results. Retrieves one or more category results.
It accepts a +{request}+ object and responds It accepts a +{request}+ object and responds with a +{response}+ object.
with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Categories Request ==== Get categories request
A +{request}+ object gets created with an existing non-null `jobId`. A +{request}+ object gets created with an existing non-null `jobId`.
@ -19,28 +18,29 @@ A +{request}+ object gets created with an existing non-null `jobId`.
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing an existing `jobId` <1> Constructing a new request referencing an existing `jobId`.
==== Optional Arguments ==== Optional arguments
The following arguments are optional: The following arguments are optional:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-category-id] include-tagged::{doc-tests-file}[{api}-category-id]
-------------------------------------------------- --------------------------------------------------
<1> The id of the category to get. Otherwise it will return all categories. <1> The ID of the category to get. Otherwise, it will return all categories.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-page] include-tagged::{doc-tests-file}[{api}-page]
-------------------------------------------------- --------------------------------------------------
<1> The page parameters `from` and `size`. `from` specifies the number of categories to skip. <1> The page parameters `from` and `size`. `from` specifies the number of
`size` specifies the maximum number of categories to get. Defaults to `0` and `100` respectively. categories to skip. `size` specifies the maximum number of categories to get.
Defaults to `0` and `100` respectively.
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Categories Response ==== Get categories response
The returned +{response}+ contains the requested categories: The returned +{response}+ contains the requested categories:
@ -48,5 +48,5 @@ The returned +{response}+ contains the requested categories:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of categories that were matched <1> The count of categories that were matched.
<2> The categories retrieved <2> The categories retrieved.

View File

@ -4,37 +4,36 @@
:response: GetDatafeedStatsResponse :response: GetDatafeedStatsResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Datafeed Stats API === Get datafeed stats API
The Get Datafeed Stats API provides the ability to get any number of Retrieves any number of {ml} datafeeds' statistics in the cluster.
{ml} datafeed's statistics in the cluster. It accepts a +{request}+ object and responds with a +{response}+ object.
It accepts a +{request}+ object and responds
with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Datafeed Stats Request ==== Get datafeed stats request
A +{request}+ object can have any number of `datafeedId` A +{request}+ object can have any number of `datafeedId` entries. However, they
entries. However, they all must be non-null. An empty list is the same as all must be non-null. An empty list is the same as requesting statistics for all
requesting statistics for all datafeeds. datafeeds.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing existing `datafeedIds`, can contain wildcards <1> Constructing a new request referencing existing `datafeedIds`. It can
contain wildcards.
<2> Whether to ignore if a wildcard expression matches no datafeeds. <2> Whether to ignore if a wildcard expression matches no datafeeds.
(This includes `_all` string or when no datafeeds have been specified) (This includes `_all` string or when no datafeeds have been specified).
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Datafeed Stats Response ==== Get datafeed stats response
The returned +{response}+ contains the requested datafeed statistics: The returned +{response}+ contains the requested datafeed statistics:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> `count()` indicates the number of datafeeds statistics found <1> `count()` indicates the number of datafeeds statistics found.
<2> `datafeedStats()` is the collection of {ml} `DatafeedStats` objects found <2> `datafeedStats()` is the collection of {ml} `DatafeedStats` objects found.

View File

@ -4,34 +4,35 @@
:response: GetDatafeedResponse :response: GetDatafeedResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Datafeed API === Get datafeed API
The Get Datafeed API provides the ability to get {ml} datafeeds in the cluster. Retrieves configuration information about {ml} datafeeds in the cluster.
It accepts a +{request}+ object and responds It accepts a +{request}+ object and responds with a +{response}+ object.
with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Datafeed Request ==== Get datafeed request
A +{request}+ object gets can have any number of `datafeedId` entries. A +{request}+ object gets can have any number of `datafeedId` entries. However,
However, they all must be non-null. An empty list is the same as requesting for all datafeeds. they all must be non-null. An empty list is the same as requesting for all
datafeeds.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing existing `datafeedIds`, can contain wildcards <1> Constructing a new request referencing existing `datafeedIds`. It can
contain wildcards.
<2> Whether to ignore if a wildcard expression matches no datafeeds. <2> Whether to ignore if a wildcard expression matches no datafeeds.
(This includes `_all` string or when no datafeeds have been specified) (This includes `_all` string or when no datafeeds have been specified).
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Datafeed Response ==== Get datafeed response
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of retrieved datafeeds <1> The count of retrieved datafeeds.
<2> The retrieved datafeeds <2> The retrieved datafeeds.
include::../execution.asciidoc[] include::../execution.asciidoc[]

View File

@ -4,14 +4,13 @@
:response: GetFiltersResponse :response: GetFiltersResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Filters API === Get filters API
The Get Filters API retrieves one or more filter results. Retrieves one or more filter results.
It accepts a +{request}+ object and responds It accepts a +{request}+ object and responds with a +{response}+ object.
with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Filters Request ==== Get filters request
A +{request}+ object gets created. A +{request}+ object gets created.
@ -19,16 +18,16 @@ A +{request}+ object gets created.
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request <1> Constructing a new request.
==== Optional Arguments ==== Optional arguments
The following arguments are optional: The following arguments are optional:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-filter-id] include-tagged::{doc-tests-file}[{api}-filter-id]
-------------------------------------------------- --------------------------------------------------
<1> The id of the filter to get. Otherwise it will return all filters. <1> The ID of the filter to get. Otherwise, it will return all filters.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
@ -40,7 +39,7 @@ include-tagged::{doc-tests-file}[{api}-page-params]
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Filters Response ==== Get filters response
The returned +{response}+ contains the requested filters: The returned +{response}+ contains the requested filters:
@ -48,5 +47,5 @@ The returned +{response}+ contains the requested filters:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of filters that were matched <1> The count of filters that were matched.
<2> The filters retrieved <2> The filters retrieved.

View File

@ -4,14 +4,13 @@
:response: GetInfluencersResponse :response: GetInfluencersResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Influencers API === Get influencers API
The Get Influencers API retrieves one or more influencer results. Retrieves one or more influencer results.
It accepts a +{request}+ object and responds It accepts a +{request}+ object and responds with a +{response}+ object.
with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Influencers Request ==== Get influencers request
A +{request}+ object gets created with an existing non-null `jobId`. A +{request}+ object gets created with an existing non-null `jobId`.
@ -19,9 +18,9 @@ A +{request}+ object gets created with an existing non-null `jobId`.
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing an existing `jobId` <1> Constructing a new request referencing an existing `jobId`.
==== Optional Arguments ==== Optional arguments
The following arguments are optional: The following arguments are optional:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
@ -46,14 +45,16 @@ include-tagged::{doc-tests-file}[{api}-exclude-interim]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-influencer-score] include-tagged::{doc-tests-file}[{api}-influencer-score]
-------------------------------------------------- --------------------------------------------------
<1> Influencers with influencer_score greater or equal than this value will be returned. <1> Influencers with `influencer_score` greater than or equal to this value will
be returned.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-page] include-tagged::{doc-tests-file}[{api}-page]
-------------------------------------------------- --------------------------------------------------
<1> The page parameters `from` and `size`. `from` specifies the number of influencers to skip. <1> The page parameters `from` and `size`. `from` specifies the number of
`size` specifies the maximum number of influencers to get. Defaults to `0` and `100` respectively. influencers to skip. `size` specifies the maximum number of influencers to get.
Defaults to `0` and `100` respectively.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
@ -70,7 +71,7 @@ include-tagged::{doc-tests-file}[{api}-start]
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Influencers Response ==== Get influencers response
The returned +{response}+ contains the requested influencers: The returned +{response}+ contains the requested influencers:
@ -78,5 +79,5 @@ The returned +{response}+ contains the requested influencers:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of influencers that were matched <1> The count of influencers that were matched.
<2> The influencers retrieved <2> The influencers retrieved.

View File

@ -4,30 +4,31 @@
:response: MlInfoResponse :response: MlInfoResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== ML Get Info API === ML get info API
The ML Get API provides defaults and limits used internally by {ml}. Provides defaults and limits used internally by {ml}.
These may be useful to a user interface that needs to interpret machine learning These may be useful to a user interface that needs to interpret machine learning
configurations where certain fields are missing because the end user was happy with the default value. configurations where certain fields are missing because the end user was happy
with the default value.
It accepts a +{request}+ object and responds with a +{response}+ object. It accepts a +{request}+ object and responds with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Info Request ==== Get info request
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request <1> Constructing a new request.
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== ML Get Info Response ==== ML get info response
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> `info` from the +{response}+ contains ml info details <1> `info` from the +{response}+ contains {ml} info details.
include::../execution.asciidoc[] include::../execution.asciidoc[]

View File

@ -4,37 +4,36 @@
:response: GetJobStatsResponse :response: GetJobStatsResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Job Stats API === Get {anomaly-job} stats API
The Get Job Stats API provides the ability to get any number of Retrieves statistics for any number of {anomaly-jobs} in the cluster.
{ml} job's statistics in the cluster. It accepts a +{request}+ object and responds with a +{response}+ object.
It accepts a +{request}+ object and responds
with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Job Stats Request ==== Get job stats request
A `GetJobsStatsRequest` object can have any number of `jobId` A `GetJobsStatsRequest` object can have any number of `jobId`
entries. However, they all must be non-null. An empty list is the same as entries. However, they all must be non-null. An empty list is the same as
requesting statistics for all jobs. requesting statistics for all {anomaly-jobs}.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing existing `jobIds`, can contain wildcards <1> Constructing a new request referencing existing `jobIds`. It can contain
<2> Whether to ignore if a wildcard expression matches no jobs. wildcards.
(This includes `_all` string or when no jobs have been specified) <2> Whether to ignore if a wildcard expression matches no {anomaly-jobs}.
(This includes `_all` string or when no jobs have been specified).
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Job Stats Response ==== Get job stats response
The returned +{response}+ contains the requested job statistics: The returned +{response}+ contains the requested job statistics:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> `getCount()` indicates the number of jobs statistics found <1> `getCount()` indicates the number of jobs statistics found.
<2> `getJobStats()` is the collection of {ml} `JobStats` objects found <2> `getJobStats()` is the collection of {ml} `JobStats` objects found.

View File

@ -4,35 +4,35 @@
:response: GetJobResponse :response: GetJobResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Job API === Get {anomaly-jobs} API
The Get Job API provides the ability to get {ml} jobs in the cluster. Retrieves configuration information for {anomaly-jobs} in the cluster.
It accepts a +{request}+ object and responds It accepts a +{request}+ object and responds with a +{response}+ object.
with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Job Request ==== Get {anomaly-jobs} request
A +{request}+ object gets can have any number of `jobId` or `groupName` A +{request}+ object gets can have any number of `jobId` or `groupName`
entries. However, they all must be non-null. An empty list is the same as entries. However, they all must be non-null. An empty list is the same as
requesting for all jobs. requesting for all {anomaly-jobs}.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing existing `jobIds`, can contain wildcards <1> Constructing a new request referencing existing `jobIds`. It can contain
<2> Whether to ignore if a wildcard expression matches no jobs. wildcards.
(This includes `_all` string or when no jobs have been specified) <2> Whether to ignore if a wildcard expression matches no {anomaly-jobs}.
(This includes `_all` string or when no jobs have been specified).
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Job Response ==== Get {anomaly-jobs} response
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> `getCount()` from the +{response}+ indicates the number of jobs found <1> `getCount()` from the +{response}+ indicates the number of jobs found.
<2> `getJobs()` is the collection of {ml} `Job` objects found <2> `getJobs()` is the collection of {ml} `Job` objects found.
include::../execution.asciidoc[] include::../execution.asciidoc[]

View File

@ -4,14 +4,13 @@
:response: GetModelSnapshotsResponse :response: GetModelSnapshotsResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Model Snapshots API === Get model snapshots API
The Get Model Snapshots API retrieves one or more model snapshot results. Retrieves one or more model snapshot results.
It accepts a +{request}+ object and responds It accepts a +{request}+ object and responds with a +{response}+ object.
with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Model Snapshots Request ==== Get model snapshots request
A +{request}+ object gets created with an existing non-null `jobId`. A +{request}+ object gets created with an existing non-null `jobId`.
@ -19,16 +18,16 @@ A +{request}+ object gets created with an existing non-null `jobId`.
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing an existing `jobId` <1> Constructing a new request referencing an existing `jobId`.
==== Optional Arguments ==== Optional arguments
The following arguments are optional: The following arguments are optional:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-snapshot-id] include-tagged::{doc-tests-file}[{api}-snapshot-id]
-------------------------------------------------- --------------------------------------------------
<1> The id of the snapshot to get. Otherwise it will return all snapshots. <1> The ID of the snapshot to get. Otherwise, it will return all snapshots.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
@ -58,13 +57,14 @@ include-tagged::{doc-tests-file}[{api}-start]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-page] include-tagged::{doc-tests-file}[{api}-page]
-------------------------------------------------- --------------------------------------------------
<1> The page parameters `from` and `size`. `from` specifies the number of snapshots to skip. <1> The page parameters `from` and `size`. `from` specifies the number of
`size` specifies the maximum number of snapshots to retrieve. Defaults to `0` and `100` respectively. snapshots to skip. `size` specifies the maximum number of snapshots to retrieve.
Defaults to `0` and `100` respectively.
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Model Snapshots Response ==== Get model snapshots response
The returned +{response}+ contains the requested snapshots: The returned +{response}+ contains the requested snapshots:
@ -72,5 +72,5 @@ The returned +{response}+ contains the requested snapshots:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of snapshots that were matched <1> The count of snapshots that were matched.
<2> The snapshots retrieved <2> The snapshots retrieved.

View File

@ -4,15 +4,15 @@
:response: GetOverallBucketsResponse :response: GetOverallBucketsResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Overall Buckets API === Get overall buckets API
The Get Overall Buckets API retrieves overall bucket results that Retrieves overall bucket results that summarize the bucket results of multiple
summarize the bucket results of multiple jobs. {anomaly-jobs}.
It accepts a +{request}+ object and responds It accepts a +{request}+ object and responds
with a +{response}+ object. with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Overall Buckets Request ==== Get overall buckets request
A +{request}+ object gets created with one or more `jobId`. A +{request}+ object gets created with one or more `jobId`.
@ -22,14 +22,15 @@ include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing job IDs `jobId1` and `jobId2`. <1> Constructing a new request referencing job IDs `jobId1` and `jobId2`.
==== Optional Arguments ==== Optional arguments
The following arguments are optional: The following arguments are optional:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-bucket-span] include-tagged::{doc-tests-file}[{api}-bucket-span]
-------------------------------------------------- --------------------------------------------------
<1> The span of the overall buckets. Must be greater or equal to the jobs' largest `bucket_span`. <1> The span of the overall buckets. Must be greater or equal to the jobs'
largest `bucket_span`.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
@ -41,14 +42,16 @@ include-tagged::{doc-tests-file}[{api}-end]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-exclude-interim] include-tagged::{doc-tests-file}[{api}-exclude-interim]
-------------------------------------------------- --------------------------------------------------
<1> If `true`, interim results will be excluded. Overall buckets are interim if any of the job buckets <1> If `true`, interim results will be excluded. Overall buckets are interim if
within the overall bucket interval are interim. Defaults to `false`. any of the job buckets within the overall bucket interval are interim. Defaults
to `false`.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-overall-score] include-tagged::{doc-tests-file}[{api}-overall-score]
-------------------------------------------------- --------------------------------------------------
<1> Overall buckets with overall scores greater or equal than this value will be returned. <1> Overall buckets with overall scores greater or equal than this value will be
returned.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
@ -60,12 +63,13 @@ include-tagged::{doc-tests-file}[{api}-start]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-top-n] include-tagged::{doc-tests-file}[{api}-top-n]
-------------------------------------------------- --------------------------------------------------
<1> The number of top job bucket scores to be used in the `overall_score` calculation. Defaults to `1`. <1> The number of top job bucket scores to be used in the `overall_score`
calculation. Defaults to `1`.
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Overall Buckets Response ==== Get overall buckets response
The returned +{response}+ contains the requested buckets: The returned +{response}+ contains the requested buckets:
@ -73,5 +77,5 @@ The returned +{response}+ contains the requested buckets:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of overall buckets that were matched <1> The count of overall buckets that were matched.
<2> The overall buckets retrieved <2> The overall buckets retrieved.

View File

@ -4,14 +4,13 @@
:response: GetRecordsResponse :response: GetRecordsResponse
-- --
[id="{upid}-{api}"] [id="{upid}-{api}"]
=== Get Records API === Get records API
The Get Records API retrieves one or more record results. Retrieves one or more record results.
It accepts a +{request}+ object and responds It accepts a +{request}+ object and responds with a +{response}+ object.
with a +{response}+ object.
[id="{upid}-{api}-request"] [id="{upid}-{api}-request"]
==== Get Records Request ==== Get records request
A +{request}+ object gets created with an existing non-null `jobId`. A +{request}+ object gets created with an existing non-null `jobId`.
@ -19,9 +18,9 @@ A +{request}+ object gets created with an existing non-null `jobId`.
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request] include-tagged::{doc-tests-file}[{api}-request]
-------------------------------------------------- --------------------------------------------------
<1> Constructing a new request referencing an existing `jobId` <1> Constructing a new request referencing an existing `jobId`.
==== Optional Arguments ==== Optional arguments
The following arguments are optional: The following arguments are optional:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
@ -46,8 +45,9 @@ include-tagged::{doc-tests-file}[{api}-exclude-interim]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-page] include-tagged::{doc-tests-file}[{api}-page]
-------------------------------------------------- --------------------------------------------------
<1> The page parameters `from` and `size`. `from` specifies the number of records to skip. <1> The page parameters `from` and `size`. `from` specifies the number of
`size` specifies the maximum number of records to get. Defaults to `0` and `100` respectively. records to skip. `size` specifies the maximum number of records to get. Defaults
to `0` and `100` respectively.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
@ -70,7 +70,7 @@ include-tagged::{doc-tests-file}[{api}-start]
include::../execution.asciidoc[] include::../execution.asciidoc[]
[id="{upid}-{api}-response"] [id="{upid}-{api}-response"]
==== Get Records Response ==== Get records response
The returned +{response}+ contains the requested records: The returned +{response}+ contains the requested records:
@ -78,5 +78,5 @@ The returned +{response}+ contains the requested records:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response] include-tagged::{doc-tests-file}[{api}-response]
-------------------------------------------------- --------------------------------------------------
<1> The count of records that were matched <1> The count of records that were matched.
<2> The records retrieved <2> The records retrieved.

View File

@ -541,7 +541,7 @@ document is most commonly accessible through an input called `doc`.
+ +
[source,Painless] [source,Painless]
---- ----
def input = doc['input_datetime'].value; ZonedDateTime input = doc['input_datetime'].value;
String output = input.format(DateTimeFormatter.ISO_INSTANT); <1> String output = input.format(DateTimeFormatter.ISO_INSTANT); <1>
---- ----
<1> Note the use of a built-in DateTimeFormatter. <1> Note the use of a built-in DateTimeFormatter.
@ -584,8 +584,8 @@ if (doc.containsKey('start') && doc.containsKey('end')) { <1>
if (doc['start'].size() > 0 && doc['end'].size() > 0) { <2> if (doc['start'].size() > 0 && doc['end'].size() > 0) { <2>
def start = doc['start'].value; ZonedDateTime start = doc['start'].value;
def end = doc['end'].value; ZonedDateTime end = doc['end'].value;
long differenceInMillis = ChronoUnit.MILLIS.between(start, end); long differenceInMillis = ChronoUnit.MILLIS.between(start, end);
// handle difference in times // handle difference in times
@ -660,7 +660,7 @@ preferred as there is no need to parse it for comparision.
[source,Painless] [source,Painless]
---- ----
long now = params['now']; long now = params['now'];
def inputDateTime = doc['input_datetime']; ZonedDateTime inputDateTime = doc['input_datetime'];
long millisDateTime = zdt.toInstant().toEpochMilli(); long millisDateTime = zdt.toInstant().toEpochMilli();
long elapsedTime = now - millisDateTime; long elapsedTime = now - millisDateTime;
---- ----
@ -712,9 +712,194 @@ long elapsedTime = now - millisDateTime;
String nowString = params['now']; String nowString = params['now'];
ZonedDateTime nowZdt = ZonedDateTime.parse(datetime); <1> ZonedDateTime nowZdt = ZonedDateTime.parse(datetime); <1>
long now = ZonedDateTime.toInstant().toEpochMilli(); long now = ZonedDateTime.toInstant().toEpochMilli();
def inputDateTime = doc['input_datetime']; ZonedDateTime inputDateTime = doc['input_datetime'];
long millisDateTime = zdt.toInstant().toEpochMilli(); long millisDateTime = zdt.toInstant().toEpochMilli();
long elapsedTime = now - millisDateTime; long elapsedTime = now - millisDateTime;
---- ----
<1> Note this parses the same string datetime every time the script runs. Use a <1> Note this parses the same string datetime every time the script runs. Use a
numeric datetime to avoid a significant performance hit. numeric datetime to avoid a significant performance hit.
==== Datetime Examples in Contexts
===== Load the Example Data
Run the following curl commands to load the data necessary for the context
examples into an Elasticsearch cluster:
. Create {ref}/mapping.html[mappings] for the sample data.
+
[source,js]
----
PUT /messages
{
"mappings": {
"properties": {
"priority": {
"type": "integer"
},
"datetime": {
"type": "date"
},
"message": {
"type": "text"
}
}
}
}
----
+
// CONSOLE
+
. Load the sample data.
+
[source,js]
----
POST /_bulk
{ "index" : { "_index" : "messages", "_id" : "1" } }
{ "priority": 1, "datetime": "2019-07-17T12:13:14Z", "message": "m1" }
{ "index" : { "_index" : "messages", "_id" : "2" } }
{ "priority": 1, "datetime": "2019-07-24T01:14:59Z", "message": "m2" }
{ "index" : { "_index" : "messages", "_id" : "3" } }
{ "priority": 2, "datetime": "1983-10-14T00:36:42Z", "message": "m3" }
{ "index" : { "_index" : "messages", "_id" : "4" } }
{ "priority": 3, "datetime": "1983-10-10T02:15:15Z", "message": "m4" }
{ "index" : { "_index" : "messages", "_id" : "5" } }
{ "priority": 3, "datetime": "1983-10-10T17:18:19Z", "message": "m5" }
{ "index" : { "_index" : "messages", "_id" : "6" } }
{ "priority": 1, "datetime": "2019-08-03T17:19:31Z", "message": "m6" }
{ "index" : { "_index" : "messages", "_id" : "7" } }
{ "priority": 3, "datetime": "2019-08-04T17:20:00Z", "message": "m7" }
{ "index" : { "_index" : "messages", "_id" : "8" } }
{ "priority": 2, "datetime": "2019-08-04T18:01:01Z", "message": "m8" }
{ "index" : { "_index" : "messages", "_id" : "9" } }
{ "priority": 3, "datetime": "1983-10-10T19:00:45Z", "message": "m9" }
{ "index" : { "_index" : "messages", "_id" : "10" } }
{ "priority": 2, "datetime": "2019-07-23T23:39:54Z", "message": "m10" }
----
+
// CONSOLE
// TEST[continued]
===== Day-of-the-Week Bucket Aggregation Example
The following example uses a
{ref}/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-script[terms aggregation]
as part of the
<<painless-bucket-script-agg-context, bucket script aggregation context>> to
display the number of messages from each day-of-the-week.
[source,js]
----
GET /messages/_search?pretty=true
{
"aggs": {
"day-of-week-count": {
"terms": {
"script": "return doc[\"datetime\"].value.getDayOfWeekEnum();"
}
}
}
}
----
// CONSOLE
// TEST[continued]
===== Morning/Evening Bucket Aggregation Example
The following example uses a
{ref}/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-script[terms aggregation]
as part of the
<<painless-bucket-script-agg-context, bucket script aggregation context>> to
display the number of messages received in the morning versus the evening.
[source,js]
----
GET /messages/_search?pretty=true
{
"aggs": {
"am-pm-count": {
"terms": {
"script": "return doc[\"datetime\"].value.getHour() < 12 ? \"AM\" : \"PM\";"
}
}
}
}
----
// CONSOLE
// TEST[continued]
===== Age of a Message Script Field Example
The following example uses a
{ref}/search-request-script-fields.html[script field] as part of the
<<painless-field-context, field context>> to display the elapsed time between
"now" and when a message was received.
[source,js]
----
GET /_search?pretty=true
{
"query" : {
"match_all": {}
},
"script_fields" : {
"message_age" : {
"script" : {
"source": "ZonedDateTime now = ZonedDateTime.ofInstant(Instant.ofEpochMilli(params[\"now\"]), ZoneId.of(\"Z\")); ZonedDateTime mdt = doc[\"datetime\"].value; String age; long years = mdt.until(now, ChronoUnit.YEARS); age = years + \"Y \"; mdt = mdt.plusYears(years); long months = mdt.until(now, ChronoUnit.MONTHS); age += months + \"M \"; mdt = mdt.plusMonths(months); long days = mdt.until(now, ChronoUnit.DAYS); age += days + \"D \"; mdt = mdt.plusDays(days); long hours = mdt.until(now, ChronoUnit.HOURS); age += hours + \"h \"; mdt = mdt.plusHours(hours); long minutes = mdt.until(now, ChronoUnit.MINUTES); age += minutes + \"m \"; mdt = mdt.plusMinutes(minutes); long seconds = mdt.until(now, ChronoUnit.SECONDS); age += hours + \"s\"; return age;",
"params": {
"now": 1574005645830
}
}
}
}
}
----
// CONSOLE
// TEST[continued]
The following shows the script broken into multiple lines:
[source,Painless]
----
ZonedDateTime now = ZonedDateTime.ofInstant(
Instant.ofEpochMilli(params['now']), ZoneId.of('Z')); <1>
ZonedDateTime mdt = doc['datetime'].value; <2>
String age;
long years = mdt.until(now, ChronoUnit.YEARS); <3>
age = years + 'Y '; <4>
mdt = mdt.plusYears(years); <5>
long months = mdt.until(now, ChronoUnit.MONTHS);
age += months + 'M ';
mdt = mdt.plusMonths(months);
long days = mdt.until(now, ChronoUnit.DAYS);
age += days + 'D ';
mdt = mdt.plusDays(days);
long hours = mdt.until(now, ChronoUnit.HOURS);
age += hours + 'h ';
mdt = mdt.plusHours(hours);
long minutes = mdt.until(now, ChronoUnit.MINUTES);
age += minutes + 'm ';
mdt = mdt.plusMinutes(minutes);
long seconds = mdt.until(now, ChronoUnit.SECONDS);
age += hours + 's';
return age; <6>
----
<1> Parse the datetime "now" as input from the user-defined params.
<2> Store the datetime the message was received as a `ZonedDateTime`.
<3> Find the difference in years between "now" and the datetime the message was
received.
<4> Add the difference in years later returned in the format
`Y <years> ...` for the age of a message.
<5> Add the years so only the remainder of the months, days, etc. remain as the
difference between "now" and the datetime the message was received. Repeat this
pattern until the desired granularity is reached (seconds in this example).
<6> Return the age of the message in the format
`Y <years> M <months> D <days> h <hours> m <minutes> s <seconds>`.

View File

@ -104,7 +104,7 @@ POST /sales/_search?size=0
// TEST[setup:sales] // TEST[setup:sales]
<1> Documents without a value in the `date` field will be added to the "Older" <1> Documents without a value in the `date` field will be added to the "Older"
bucket, as if they had a date value of "1899-12-31". bucket, as if they had a date value of "1976-11-30".
[[date-format-pattern]] [[date-format-pattern]]
==== Date Format/Pattern ==== Date Format/Pattern

View File

@ -162,7 +162,7 @@ respective document counts in brackets:
| 6 | Product F (2) | Product H (14) | Product H (28) | 6 | Product F (2) | Product H (14) | Product H (28)
| 7 | Product G (2) | Product I (10) | Product Q (2) | 7 | Product G (2) | Product I (10) | Product Q (2)
| 8 | Product H (2) | Product Q (6) | Product D (1) | 8 | Product H (2) | Product Q (6) | Product D (1)
| 9 | Product I (1) | Product J (8) | | 9 | Product I (1) | Product J (6) |
| 10 | Product J (1) | Product C (4) | | 10 | Product J (1) | Product C (4) |
|========================================================= |=========================================================

View File

@ -9,21 +9,17 @@ your application to Elasticsearch 7.3.
See also <<release-highlights>> and <<es-release-notes>>. See also <<release-highlights>> and <<es-release-notes>>.
coming[7.3.0]
//NOTE: The notable-breaking-changes tagged regions are re-used in the //NOTE: The notable-breaking-changes tagged regions are re-used in the
//Installation and Upgrade Guide //Installation and Upgrade Guide
//tag::notable-breaking-changes[] //tag::notable-breaking-changes[]
[discrete]
// end::notable-breaking-changes[]
[[breaking_73_mapping_changes]] [[breaking_73_mapping_changes]]
=== Mapping changes === Mapping changes
`dense_vector` field now requires `dims` parameter, specifying the number of `dense_vector` field now requires `dims` parameter, specifying the number of
dimensions for document and query vectors for this field. dimensions for document and query vectors for this field.
[float] [discrete]
==== Defining multi-fields within multi-fields ==== Defining multi-fields within multi-fields
Previously, it was possible to define a multi-field within a multi-field. Previously, it was possible to define a multi-field within a multi-field.
@ -33,29 +29,41 @@ in 8.0. To resolve the issue, all instances of `fields` that occur within a
chained `fields` blocks into a single level, or by switching to `copy_to` if chained `fields` blocks into a single level, or by switching to `copy_to` if
appropriate. appropriate.
[discrete]
[[breaking_73_plugin_changes]] [[breaking_73_plugin_changes]]
=== Plugins changes === Plugins changes
[float] [discrete]
==== IndexStorePlugin changes ==== IndexStorePlugin changes
IndexStore and DirectoryService have been replaced by a stateless and simple IndexStore and DirectoryService have been replaced by a stateless and simple
DirectoryFactory interface to create custom Lucene directory instances per shard. DirectoryFactory interface to create custom Lucene directory instances per shard.
[float] [discrete]
[[breaking_73_search_changes]] [[breaking_73_search_changes]]
=== Search Changes === Search changes
[float] [discrete]
==== Deprecation of queries ==== Deprecation of queries
The `common` query has been deprecated. The same functionality can be achieved The `common` query has been deprecated. The same functionality can be achieved
by the `match` query if the total number of hits is not tracked. by the `match` query if the total number of hits is not tracked.
[float] [discrete]
===== Deprecation of query parameters ===== Deprecation of query parameters
The `cutoff_frequency` parameter has been deprecated for `match` and `multi_match` The `cutoff_frequency` parameter has been deprecated for `match` and `multi_match`
queries. The same functionality can be achieved without any configuration provided queries. The same functionality can be achieved without any configuration provided
that the total number of hits is not tracked. that the total number of hits is not tracked.
[discrete]
[[breaking_73_ccr_changes]]
=== CCR changes
[discrete]
==== Directly modifying aliases of follower indices is no longer allowed
Aliases are now replicated to a follower from its leader, so directly modifying
aliases on follower indices is no longer allowed.
// end::notable-breaking-changes[]

View File

@ -64,6 +64,20 @@ results the job might have recently produced or might produce in the future.
[[ml-close-job-query-parms]] [[ml-close-job-query-parms]]
==== {api-query-parms-title} ==== {api-query-parms-title}
`allow_no_jobs`::
(Optional, boolean) Specifies what to do when the request:
+
--
* Contains wildcard expressions and there are no jobs that match.
* Contains the `_all` string or no identifiers and there are no matches.
* Contains wildcard expressions and there are only partial matches.
The default value is `true`, which returns an empty `jobs` array
when there are no matches and the subset of results when there are partial
matches. If this parameter is `false`, the request returns a `404` status code
when there are no matches or only partial matches.
--
`force`:: `force`::
(Optional, boolean) Use to close a failed job, or to forcefully close a job (Optional, boolean) Use to close a failed job, or to forcefully close a job
which has not responded to its initial close request. which has not responded to its initial close request.
@ -72,6 +86,13 @@ results the job might have recently produced or might produce in the future.
(Optional, time units) Controls the time to wait until a job has closed. (Optional, time units) Controls the time to wait until a job has closed.
The default value is 30 minutes. The default value is 30 minutes.
[[ml-close-job-response-codes]]
==== {api-response-codes-title}
`404` (Missing resources)::
If `allow_no_jobs` is `false`, this code indicates that there are no
resources that match the request or only partial matches for the request.
[[ml-close-job-example]] [[ml-close-job-example]]
==== {api-examples-title} ==== {api-examples-title}

View File

@ -6,7 +6,7 @@
<titleabbrev>Get buckets</titleabbrev> <titleabbrev>Get buckets</titleabbrev>
++++ ++++
Retrieves job results for one or more buckets. Retrieves {anomaly-job} results for one or more buckets.
[[ml-get-bucket-request]] [[ml-get-bucket-request]]
==== {api-request-title} ==== {api-request-title}
@ -36,7 +36,7 @@ bucket.
==== {api-path-parms-title} ==== {api-path-parms-title}
`<job_id>`:: `<job_id>`::
(Required, string) Identifier for the job (Required, string) Identifier for the {anomaly-job}.
`<timestamp>`:: `<timestamp>`::
(Optional, string) The timestamp of a single bucket result. If you do not (Optional, string) The timestamp of a single bucket result. If you do not

View File

@ -59,7 +59,8 @@ The API returns the following information:
(string) A numerical character string that uniquely identifies the calendar. (string) A numerical character string that uniquely identifies the calendar.
`job_ids`::: `job_ids`:::
(array) An array of job identifiers. For example: `["total-requests"]`. (array) An array of {anomaly-job} identifiers. For example:
`["total-requests"]`.
[[ml-get-calendar-example]] [[ml-get-calendar-example]]
==== {api-examples-title} ==== {api-examples-title}

View File

@ -6,7 +6,7 @@
<titleabbrev>Get categories</titleabbrev> <titleabbrev>Get categories</titleabbrev>
++++ ++++
Retrieves job results for one or more categories. Retrieves {anomaly-job} results for one or more categories.
[[ml-get-category-request]] [[ml-get-category-request]]
==== {api-request-title} ==== {api-request-title}
@ -35,11 +35,12 @@ For more information about categories, see
==== {api-path-parms-title} ==== {api-path-parms-title}
`<job_id>`:: `<job_id>`::
(Required, string) Identifier for the job. (Required, string) Identifier for the {anomaly-job}.
`<category_id>`:: `<category_id>`::
(Optional, long) Identifier for the category. If you do not specify this (Optional, long) Identifier for the category. If you do not specify this
parameter, the API returns information about all categories in the job. parameter, the API returns information about all categories in the
{anomaly-job}.
[[ml-get-category-request-body]] [[ml-get-category-request-body]]
==== {api-request-body-title} ==== {api-request-body-title}

View File

@ -6,7 +6,7 @@
<titleabbrev>Get influencers</titleabbrev> <titleabbrev>Get influencers</titleabbrev>
++++ ++++
Retrieves job results for one or more influencers. Retrieves {anomaly-job} results for one or more influencers.
[[ml-get-influencer-request]] [[ml-get-influencer-request]]
==== {api-request-title} ==== {api-request-title}
@ -27,7 +27,7 @@ privileges. See {stack-ov}/security-privileges.html[Security privileges] and
==== {api-path-parms-title} ==== {api-path-parms-title}
`<job_id>`:: `<job_id>`::
(Required, string) Identifier for the job. (Required, string) Identifier for the {anomaly-job}.
[[ml-get-influencer-request-body]] [[ml-get-influencer-request-body]]
==== {api-request-body-title} ==== {api-request-body-title}

View File

@ -1,12 +1,12 @@
[role="xpack"] [role="xpack"]
[testenv="platinum"] [testenv="platinum"]
[[ml-get-job-stats]] [[ml-get-job-stats]]
=== Get job statistics API === Get {anomaly-job} statistics API
++++ ++++
<titleabbrev>Get job statistics</titleabbrev> <titleabbrev>Get job statistics</titleabbrev>
++++ ++++
Retrieves usage information for jobs. Retrieves usage information for {anomaly-jobs}.
[[ml-get-job-stats-request]] [[ml-get-job-stats-request]]
==== {api-request-title} ==== {api-request-title}
@ -29,10 +29,10 @@ Retrieves usage information for jobs.
[[ml-get-job-stats-desc]] [[ml-get-job-stats-desc]]
==== {api-description-title} ==== {api-description-title}
You can get statistics for multiple jobs in a single API request by using a You can get statistics for multiple {anomaly-jobs} in a single API request by
group name, a comma-separated list of jobs, or a wildcard expression. You can using a group name, a comma-separated list of jobs, or a wildcard expression.
get statistics for all jobs by using `_all`, by specifying `*` as the You can get statistics for all {anomaly-jobs} by using `_all`, by specifying `*`
`<job_id>`, or by omitting the `<job_id>`. as the `<job_id>`, or by omitting the `<job_id>`.
IMPORTANT: This API returns a maximum of 10,000 jobs. IMPORTANT: This API returns a maximum of 10,000 jobs.
@ -40,9 +40,26 @@ IMPORTANT: This API returns a maximum of 10,000 jobs.
==== {api-path-parms-title} ==== {api-path-parms-title}
`<job_id>`:: `<job_id>`::
(Optional, string) An identifier for the job. It can be a job identifier, a (Optional, string) An identifier for the {anomaly-job}. It can be a
group name, or a wildcard expression. If you do not specify one of these job identifier, a group name, or a wildcard expression. If you do not specify
options, the API returns statistics for all jobs. one of these options, the API returns statistics for all {anomaly-jobs}.
[[ml-get-job-stats-query-parms]]
==== {api-query-parms-title}
`allow_no_jobs`::
(Optional, boolean) Specifies what to do when the request:
+
--
* Contains wildcard expressions and there are no jobs that match.
* Contains the `_all` string or no identifiers and there are no matches.
* Contains wildcard expressions and there are only partial matches.
The default value is `true`, which returns an empty `jobs` array
when there are no matches and the subset of results when there are partial
matches. If this parameter is `false`, the request returns a `404` status code
when there are no matches or only partial matches.
--
[[ml-get-job-stats-results]] [[ml-get-job-stats-results]]
==== {api-response-body-title} ==== {api-response-body-title}
@ -50,8 +67,15 @@ IMPORTANT: This API returns a maximum of 10,000 jobs.
The API returns the following information: The API returns the following information:
`jobs`:: `jobs`::
(array) An array of job statistics objects. (array) An array of {anomaly-job} statistics objects.
For more information, see <<ml-jobstats,Job Statistics>>. For more information, see <<ml-jobstats>>.
[[ml-get-job-stats-response-codes]]
==== {api-response-codes-title}
`404` (Missing resources)::
If `allow_no_jobs` is `false`, this code indicates that there are no
resources that match the request or only partial matches for the request.
[[ml-get-job-stats-example]] [[ml-get-job-stats-example]]
==== {api-examples-title} ==== {api-examples-title}

View File

@ -1,12 +1,12 @@
[role="xpack"] [role="xpack"]
[testenv="platinum"] [testenv="platinum"]
[[ml-get-job]] [[ml-get-job]]
=== Get jobs API === Get {anomaly-jobs} API
++++ ++++
<titleabbrev>Get jobs</titleabbrev> <titleabbrev>Get jobs</titleabbrev>
++++ ++++
Retrieves configuration information for jobs. Retrieves configuration information for {anomaly-jobs}.
[[ml-get-job-request]] [[ml-get-job-request]]
==== {api-request-title} ==== {api-request-title}
@ -29,10 +29,10 @@ Retrieves configuration information for jobs.
[[ml-get-job-desc]] [[ml-get-job-desc]]
==== {api-description-title} ==== {api-description-title}
You can get information for multiple jobs in a single API request by using a You can get information for multiple {anomaly-jobs} in a single API request by
group name, a comma-separated list of jobs, or a wildcard expression. You can using a group name, a comma-separated list of jobs, or a wildcard expression.
get information for all jobs by using `_all`, by specifying `*` as the You can get information for all {anomaly-jobs} by using `_all`, by specifying
`<job_id>`, or by omitting the `<job_id>`. `*` as the `<job_id>`, or by omitting the `<job_id>`.
IMPORTANT: This API returns a maximum of 10,000 jobs. IMPORTANT: This API returns a maximum of 10,000 jobs.
@ -40,9 +40,26 @@ IMPORTANT: This API returns a maximum of 10,000 jobs.
==== {api-path-parms-title} ==== {api-path-parms-title}
`<job_id>`:: `<job_id>`::
(Optional, string) Identifier for the job. It can be a job identifier, a group (Optional, string) Identifier for the {anomaly-job}. It can be a job
name, or a wildcard expression. If you do not specify one of these options, identifier, a group name, or a wildcard expression. If you do not specify one
the API returns information for all jobs. of these options, the API returns information for all {anomaly-jobs}.
[[ml-get-job-query-parms]]
==== {api-query-parms-title}
`allow_no_jobs`::
(Optional, boolean) Specifies what to do when the request:
+
--
* Contains wildcard expressions and there are no jobs that match.
* Contains the `_all` string or no identifiers and there are no matches.
* Contains wildcard expressions and there are only partial matches.
The default value is `true`, which returns an empty `jobs` array
when there are no matches and the subset of results when there are partial
matches. If this parameter is `false`, the request returns a `404` status code
when there are no matches or only partial matches.
--
[[ml-get-job-results]] [[ml-get-job-results]]
==== {api-response-body-title} ==== {api-response-body-title}
@ -50,8 +67,15 @@ IMPORTANT: This API returns a maximum of 10,000 jobs.
The API returns the following information: The API returns the following information:
`jobs`:: `jobs`::
(array) An array of job resources. (array) An array of {anomaly-job} resources.
For more information, see <<ml-job-resource,Job Resources>>. For more information, see <<ml-job-resource>>.
[[ml-get-job-response-codes]]
==== {api-response-codes-title}
`404` (Missing resources)::
If `allow_no_jobs` is `false`, this code indicates that there are no
resources that match the request or only partial matches for the request.
[[ml-get-job-example]] [[ml-get-job-example]]
==== {api-examples-title} ==== {api-examples-title}

View File

@ -6,8 +6,8 @@
<titleabbrev>Get overall buckets</titleabbrev> <titleabbrev>Get overall buckets</titleabbrev>
++++ ++++
Retrieves overall bucket results that summarize the Retrieves overall bucket results that summarize the bucket results of multiple
bucket results of multiple jobs. {anomaly-jobs}.
[[ml-get-overall-buckets-request]] [[ml-get-overall-buckets-request]]
==== {api-request-title} ==== {api-request-title}
@ -31,45 +31,46 @@ privileges. See {stack-ov}/security-privileges.html[Security privileges] and
[[ml-get-overall-buckets-desc]] [[ml-get-overall-buckets-desc]]
==== {api-description-title} ==== {api-description-title}
You can summarize the bucket results for all jobs by using `_all` or by You can summarize the bucket results for all {anomaly-jobs} by using `_all` or
specifying `*` as the `<job_id>`. by specifying `*` as the `<job_id>`.
An overall bucket has a span equal to the largest `bucket_span` value for the An overall bucket has a span equal to the largest `bucket_span` value for the
specified jobs. specified {anomaly-jobs}.
The `overall_score` is calculated by combining the scores of all The `overall_score` is calculated by combining the scores of all the buckets
the buckets within the overall bucket span. First, the maximum `anomaly_score` per within the overall bucket span. First, the maximum `anomaly_score` per
job in the overall bucket is calculated. Then the `top_n` of those scores are {anomaly-job} in the overall bucket is calculated. Then the `top_n` of those
averaged to result in the `overall_score`. This means that you can fine-tune scores are averaged to result in the `overall_score`. This means that you can
the `overall_score` so that it is more or less sensitive to the number fine-tune the `overall_score` so that it is more or less sensitive to the number
of jobs that detect an anomaly at the same time. For example, if you set `top_n` of jobs that detect an anomaly at the same time. For example, if you set `top_n`
to `1`, the `overall_score` is the maximum bucket to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is
score in the overall bucket. Alternatively, if you set `top_n` to the number of high only when all jobs detect anomalies in that overall bucket.
jobs, the `overall_score` is high only when all jobs detect anomalies in that
overall bucket.
In addition, the optional parameter `bucket_span` may be used in order In addition, the optional parameter `bucket_span` may be used in order
to request overall buckets that span longer than the largest job's `bucket_span`. to request overall buckets that span longer than the `bucket_span` of the
When set, the `overall_score` will be the max `overall_score` of the corresponding largest {anomaly-job}. When set, the `overall_score` will be the max
overall buckets with a span equal to the largest job's `bucket_span`. `overall_score` of the corresponding overall buckets with a span equal to the
`bucket_span` of the largest {anomaly-job}.
[[ml-get-overall-buckets-path-parms]] [[ml-get-overall-buckets-path-parms]]
==== {api-path-parms-title} ==== {api-path-parms-title}
`<job_id>`:: `<job_id>`::
(Required, string) Identifier for the job. It can be a job identifier, a group (Required, string) Identifier for the {anomaly-job}. It can be a job
name, a comma-separated list of jobs or groups, or a wildcard expression. identifier, a group name, a comma-separated list of jobs or groups, or a
wildcard expression.
[[ml-get-overall-buckets-request-body]] [[ml-get-overall-buckets-request-body]]
==== {api-request-body-title} ==== {api-request-body-title}
`allow_no_jobs`:: `allow_no_jobs`::
(Optional, boolean) If `false` and the `job_id` does not match any job, an (Optional, boolean) If `false` and the `job_id` does not match any
error occurs. The default value is `true`. {anomaly-jobs}, an error occurs. The default value is `true`.
`bucket_span`:: `bucket_span`::
(Optional, string) The span of the overall buckets. Must be greater or equal (Optional, string) The span of the overall buckets. Must be greater or equal
to the largest job's `bucket_span`. Defaults to the largest job's `bucket_span`. to the `bucket_span` of the largest {anomaly-job}. Defaults to the
`bucket_span` of the largest {anomaly-job}.
`end`:: `end`::
(Optional, string) Returns overall buckets with timestamps earlier than this (Optional, string) Returns overall buckets with timestamps earlier than this
@ -88,8 +89,8 @@ overall buckets with a span equal to the largest job's `bucket_span`.
(Optional, string) Returns overall buckets with timestamps after this time. (Optional, string) Returns overall buckets with timestamps after this time.
`top_n`:: `top_n`::
(Optional, integer) The number of top job bucket scores to be used in the (Optional, integer) The number of top {anomaly-job} bucket scores to be used
`overall_score` calculation. The default value is `1`. in the `overall_score` calculation. The default value is `1`.
[[ml-get-overall-buckets-results]] [[ml-get-overall-buckets-results]]
==== {api-response-body-title} ==== {api-response-body-title}
@ -103,7 +104,8 @@ The API returns the following information:
[[ml-get-overall-buckets-example]] [[ml-get-overall-buckets-example]]
==== {api-examples-title} ==== {api-examples-title}
The following example gets overall buckets for jobs with IDs matching `job-*`: The following example gets overall buckets for {anomaly-jobs} with IDs matching
`job-*`:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------

View File

@ -6,7 +6,7 @@
<titleabbrev>Get records</titleabbrev> <titleabbrev>Get records</titleabbrev>
++++ ++++
Retrieves anomaly records for a job. Retrieves anomaly records for an {anomaly-job}.
[[ml-get-record-request]] [[ml-get-record-request]]
==== {api-request-title} ==== {api-request-title}
@ -27,7 +27,7 @@ privileges. See {stack-ov}/security-privileges.html[Security privileges] and
==== {api-path-parms-title} ==== {api-path-parms-title}
`<job_id>`:: `<job_id>`::
(Required, string) Identifier for the job. (Required, string) Identifier for the {anomaly-job}.
[[ml-get-record-request-body]] [[ml-get-record-request-body]]
==== {api-request-body-title} ==== {api-request-body-title}
@ -66,7 +66,7 @@ The API returns the following information:
`records`:: `records`::
(array) An array of record objects. For more information, see (array) An array of record objects. For more information, see
<<ml-results-records,Records>>. <<ml-results-records>>.
[[ml-get-record-example]] [[ml-get-record-example]]
==== {api-examples-title} ==== {api-examples-title}

View File

@ -26,7 +26,7 @@ Retrieves information about model snapshots.
==== {api-path-parms-title} ==== {api-path-parms-title}
`<job_id>`:: `<job_id>`::
(Required, string) Identifier for the job. (Required, string) Identifier for the {anomaly-job}.
`<snapshot_id>`:: `<snapshot_id>`::
(Optional, string) Identifier for the model snapshot. If you do not specify (Optional, string) Identifier for the model snapshot. If you do not specify
@ -61,7 +61,7 @@ The API returns the following information:
`model_snapshots`:: `model_snapshots`::
(array) An array of model snapshot objects. For more information, see (array) An array of model snapshot objects. For more information, see
<<ml-snapshot-resource,Model Snapshots>>. <<ml-snapshot-resource>>.
[[ml-get-snapshot-example]] [[ml-get-snapshot-example]]
==== {api-examples-title} ==== {api-examples-title}

View File

@ -6,8 +6,8 @@ causing an OutOfMemoryError. Each breaker specifies a limit for how much memory
it can use. Additionally, there is a parent-level breaker that specifies the it can use. Additionally, there is a parent-level breaker that specifies the
total amount of memory that can be used across all breakers. total amount of memory that can be used across all breakers.
These settings can be dynamically updated on a live cluster with the Except where noted otherwise, these settings can be dynamically updated on a
<<cluster-update-settings,cluster-update-settings>> API. live cluster with the <<cluster-update-settings,cluster-update-settings>> API.
[[parent-circuit-breaker]] [[parent-circuit-breaker]]
[float] [float]
@ -17,8 +17,9 @@ The parent-level breaker can be configured with the following settings:
`indices.breaker.total.use_real_memory`:: `indices.breaker.total.use_real_memory`::
Whether the parent breaker should take real memory usage into account (`true`) or only _Static_ setting determining whether the parent breaker should take real
consider the amount that is reserved by child circuit breakers (`false`). Defaults to `true`. memory usage into account (`true`) or only consider the amount that is
reserved by child circuit breakers (`false`). Defaults to `true`.
`indices.breaker.total.limit`:: `indices.breaker.total.limit`::

View File

@ -40,14 +40,15 @@ GET /_search
[[boosting-top-level-params]] [[boosting-top-level-params]]
==== Top-level parameters for `boosting` ==== Top-level parameters for `boosting`
`positive` (Required):: `positive`::
Query you wish to run. Any returned documents must match this query. (Required, query object) Query you wish to run. Any returned documents must
match this query.
`negative` (Required):: `negative`::
+ +
-- --
Query used to decrease the <<query-filter-context, relevance score>> of matching (Required, query object) Query used to decrease the <<query-filter-context,
documents. relevance score>> of matching documents.
If a returned document matches the `positive` query and this query, the If a returned document matches the `positive` query and this query, the
`boosting` query calculates the final <<query-filter-context, relevance score>> `boosting` query calculates the final <<query-filter-context, relevance score>>
@ -57,7 +58,7 @@ for the document as follows:
. Multiply the score by the `negative_boost` value. . Multiply the score by the `negative_boost` value.
-- --
`negative_boost` (Required):: `negative_boost`::
Floating point number between `0` and `1.0` used to decrease the (Required, float) Floating point number between `0` and `1.0` used to decrease
<<query-filter-context, relevance scores>> of documents matching the `negative` the <<query-filter-context, relevance scores>> of documents matching the
query. `negative` query.

View File

@ -29,14 +29,14 @@ GET /_search
`filter`:: `filter`::
+ +
-- --
<<query-dsl-bool-query, Filter query>> you wish to run. Any returned documents (Required, query object) <<query-dsl-bool-query, Filter query>> you wish to run.
must match this query. Required. Any returned documents must match this query.
Filter queries do not calculate <<query-filter-context, relevance scores>>. To Filter queries do not calculate <<query-filter-context, relevance scores>>. To
speed up performance, {es} automatically caches frequently used filter queries. speed up performance, {es} automatically caches frequently used filter queries.
-- --
`boost`:: `boost`::
Floating point number used as the constant <<query-filter-context, relevance (Optional, float) Floating point number used as the constant
score>> for every document matching the `filter` query. Default is `1.0`. <<query-filter-context, relevance score>> for every document matching the
Optional. `filter` query. Defaults to `1.0`.

View File

@ -37,17 +37,18 @@ GET /_search
[[query-dsl-dis-max-query-top-level-params]] [[query-dsl-dis-max-query-top-level-params]]
==== Top-level parameters for `dis_max` ==== Top-level parameters for `dis_max`
`queries` (Required):: `queries`::
(array of query objects) Contains one or more query clauses. Returned documents (Required, array of query objects) Contains one or more query clauses. Returned
**must match one or more** of these queries. If a document matches multiple documents **must match one or more** of these queries. If a document matches
queries, {es} uses the highest <<query-filter-context, relevance score>>. multiple queries, {es} uses the highest <<query-filter-context, relevance
score>>.
`tie_breaker` (Optional):: `tie_breaker`::
+ +
-- --
(float) Floating point number between `0` and `1.0` used to increase the (Optional, float) Floating point number between `0` and `1.0` used to increase
<<query-filter-context, relevance scores>> of documents matching multiple query the <<query-filter-context, relevance scores>> of documents matching multiple
clauses. Defaults to `0.0`. query clauses. Defaults to `0.0`.
You can use the `tie_breaker` value to assign higher relevance scores to You can use the `tie_breaker` value to assign higher relevance scores to
documents that contain the same term in multiple fields than documents that documents that contain the same term in multiple fields than documents that

View File

@ -4,81 +4,38 @@
<titleabbrev>Distance feature</titleabbrev> <titleabbrev>Distance feature</titleabbrev>
++++ ++++
The `distance_feature` query is a specialized query that only works Boosts the <<query-filter-context, relevance score>> of documents closer to a
on <<date, `date`>>, <<date_nanos, `date_nanos`>> or <<geo-point,`geo_point`>> provided `origin` date or point. For example, you can use this query to give
fields. Its goal is to boost documents' scores based on proximity more weight to documents closer to a certain date or location.
to some given origin. For example, use this query if you want to
give more weight to documents with dates closer to a certain date,
or to documents with locations closer to a certain location.
This query is called `distance_feature` query, because it dynamically You can use the `distance_feature` query to find the nearest neighbors to a
calculates distances between the given origin and documents' field values, location. You can also use the query in a <<query-dsl-bool-query,`bool`>>
and use these distances as features to boost the documents' scores. search's `should` filter to add boosted relevance scores to the `bool` query's
scores.
`distance_feature` query is typically used on its own to find the nearest
neighbors to a given point, or put in a `should` clause of a
<<query-dsl-bool-query,`bool`>> query so that its score is added to the score
of the query.
Compared to using <<query-dsl-function-score-query,`function_score`>> or other
ways to modify the score, this query has the benefit of being able to
efficiently skip non-competitive hits when
<<search-uri-request,`track_total_hits`>> is not set to `true`.
==== Syntax of distance_feature query
`distance_feature` query has the following syntax:
[source,js]
--------------------------------------------------
"distance_feature": {
"field": <field>,
"origin": <origin>,
"pivot": <pivot>,
"boost" : <boost>
}
--------------------------------------------------
// NOTCONSOLE
[horizontal]
`field`::
Required parameter. Defines the name of the field on which to calculate
distances. Must be a field of the type `date`, `date_nanos` or `geo_point`,
and must be indexed (`"index": true`, which is the default) and has
<<doc-values, doc values>> (`"doc_values": true`, which is the default).
`origin`::
Required parameter. Defines a point of origin used for calculating
distances. Must be a date for date and date_nanos fields,
and a geo-point for geo_point fields. Date math (for example `now-1h`) is
supported for a date origin.
`pivot`::
Required parameter. Defines the distance from origin at which the computed
score will equal to a half of the `boost` parameter. Must be
a `number+date unit` ("1h", "10d",...) for date and date_nanos fields,
and a `number + geo unit` ("1km", "12m",...) for geo fields.
`boost`::
Optional parameter with a default value of `1`. Defines the factor by which
to multiply the score. Must be a non-negative float number.
The `distance_feature` query computes a document's score as following: [[distance-feature-query-ex-request]]
==== Example request
`score = boost * pivot / (pivot + distance)` [[distance-feature-index-setup]]
===== Index setup
To use the `distance_feature` query, your index must include a <<date, `date`>>,
<<date_nanos, `date_nanos`>> or <<geo-point,`geo_point`>> field.
where `distance` is the absolute difference between the origin and To see how you can set up an index for the `distance_feature` query, try the
a document's field value. following example.
==== Example using distance_feature query . Create an `items` index with the following field mapping:
+
--
Let's look at an example. We index several documents containing * `name`, a <<keyword,`keyword`>> field
information about sales items, such as name, production date, * `production_date`, a <<date, `date`>> field
and location. * `location`, a <<geo-point,`geo_point`>> field
[source,js] [source,js]
-------------------------------------------------- ----
PUT items PUT /items
{ {
"mappings": { "mappings": {
"properties": { "properties": {
@ -94,15 +51,24 @@ PUT items
} }
} }
} }
----
// CONSOLE
// TESTSETUP
--
PUT items/_doc/1 . Index several documents to this index.
+
--
[source,js]
----
PUT /items/_doc/1?refresh
{ {
"name" : "chocolate", "name" : "chocolate",
"production_date": "2018-02-01", "production_date": "2018-02-01",
"location": [-71.34, 41.12] "location": [-71.34, 41.12]
} }
PUT items/_doc/2 PUT /items/_doc/2?refresh
{ {
"name" : "chocolate", "name" : "chocolate",
"production_date": "2018-01-01", "production_date": "2018-01-01",
@ -110,24 +76,29 @@ PUT items/_doc/2
} }
PUT items/_doc/3 PUT /items/_doc/3?refresh
{ {
"name" : "chocolate", "name" : "chocolate",
"production_date": "2017-12-01", "production_date": "2017-12-01",
"location": [-71.3, 41.12] "location": [-71.3, 41.12]
} }
----
POST items/_refresh
--------------------------------------------------
// CONSOLE // CONSOLE
--
We look for all chocolate items, but we also want chocolates
that are produced recently (closer to the date `now`) [[distance-feature-query-ex-query]]
to be ranked higher. ===== Example queries
[[distance-feature-query-date-ex]]
====== Boost documents based on date
The following `bool` search returns documents with a `name` value of
`chocolate`. The search also uses the `distance_feature` query to increase the
relevance score of documents with a `production_date` value closer to `now`.
[source,js] [source,js]
-------------------------------------------------- ----
GET items/_search GET /items/_search
{ {
"query": { "query": {
"bool": { "bool": {
@ -146,17 +117,18 @@ GET items/_search
} }
} }
} }
-------------------------------------------------- ----
// CONSOLE // CONSOLE
// TEST[continued]
We can look for all chocolate items, but we also want chocolates [[distance-feature-query-distance-ex]]
that are produced locally (closer to our geo origin) ====== Boost documents based on location
come first in the result list. The following `bool` search returns documents with a `name` value of
`chocolate`. The search also uses the `distance_feature` query to increase the
relevance score of documents with a `location` value closer to `[-71.3, 41.15]`.
[source,js] [source,js]
-------------------------------------------------- ----
GET items/_search GET /items/_search
{ {
"query": { "query": {
"bool": { "bool": {
@ -175,6 +147,83 @@ GET items/_search
} }
} }
} }
-------------------------------------------------- ----
// CONSOLE // CONSOLE
// TEST[continued]
[[distance-feature-top-level-params]]
==== Top-level parameters for `distance_feature`
`field`::
(Required, string) Name of the field used to calculate distances. This field
must meet the following criteria:
* Be a <<date, `date`>>, <<date_nanos, `date_nanos`>> or
<<geo-point,`geo_point`>> field
* Have an <<mapping-index,`index`>> mapping parameter value of `true`, which is
the default
* Have an <<doc-values,`doc_values`>> mapping parameter value of `true`, which
is the default
`origin`::
+
--
(Required, string) Date or point of origin used to calculate distances.
If the `field` value is a <<date, `date`>> or <<date_nanos, `date_nanos`>>
field, the `origin` value must be a <<date-format-pattern,date>>.
<<date-math,Date Math>>, such as `now-1h`, is supported.
If the `field` value is a <<geo-point,`geo_point`>> field, the `origin` value
must be a geopoint.
--
`pivot`::
+
--
(Required, <<time-units,time unit>> or <<distance-units,distance unit>>)
Distance from the `origin` at which relevance scores receive half of the `boost`
value.
If the `field` value is a <<date, `date`>> or <<date_nanos, `date_nanos`>>
field, the `pivot` value must be a <<time-units,time unit>>, such as `1h` or
`10d`.
If the `field` value is a <<geo-point,`geo_point`>> field, the `pivot` value
must be a <<distance-units,distance unit>>, such as `1km` or `12m`.
--
`boost`::
+
--
(Optional, float) Floating point number used to multiply the
<<query-filter-context, relevance score>> of matching documents. This value
cannot be negative. Defaults to `1.0`.
--
[[distance-feature-notes]]
==== Notes
[[distance-feature-calculation]]
===== How the `distance_feature` query calculates relevance scores
The `distance_feature` query dynamically calculates the distance between the
`origin` value and a document's field values. It then uses this distance as a
feature to boost the <<query-filter-context, relevance score>> of closer
documents.
The `distance_feature` query calculates a document's <<query-filter-context,
relevance score>> as follows:
```
relevance score = boost * pivot / (pivot + distance)
```
The `distance` is the absolute difference between the `origin` value and a
document's field value.
[[distance-feature-skip-hits]]
===== Skip non-competitive hits
Unlike the <<query-dsl-function-score-query,`function_score`>> query or other
ways to change <<query-filter-context, relevance scores>>, the
`distance_feature` query efficiently skips non-competitive hits when the
<<search-uri-request,`track_total_hits`>> parameter is **not** `true`.

View File

@ -32,9 +32,10 @@ GET /_search
[[exists-query-top-level-params]] [[exists-query-top-level-params]]
==== Top-level parameters for `exists` ==== Top-level parameters for `exists`
`field`:: `field`::
Name of the field you wish to search. (Required, string) Name of the field you wish to search.
+ +
While a field is deemed non-existant if the JSON value is `null` or `[]`, these values will indicate the field does exist: While a field is deemed non-existant if the JSON value is `null` or `[]`, these
values will indicate the field does exist:
+ +
* Empty strings, such as `""` or `"-"` * Empty strings, such as `""` or `"-"`
* Arrays containing `null` and another value, such as `[null, "foo"]` * Arrays containing `null` and another value, such as `[null, "foo"]`

View File

@ -73,19 +73,19 @@ GET /_search
==== Top-level parameters for `has_child` ==== Top-level parameters for `has_child`
`type`:: `type`::
(string) Required. Name of the child relationship mapped for the (Required, string) Name of the child relationship mapped for the
<<parent-join,join>> field. <<parent-join,join>> field.
`query`:: `query`::
(query object) Required. Query you wish to run on child documents of the `type` (Required, query object) Query you wish to run on child documents of the `type`
field. If a child document matches the search, the query returns the parent field. If a child document matches the search, the query returns the parent
document. document.
`ignore_unmapped`:: `ignore_unmapped`::
+ +
-- --
(boolean) Optional. Indicates whether to ignore an unmapped `type` and not return (Optional, boolean) Indicates whether to ignore an unmapped `type` and not
any documents instead of an error. Defaults to `false`. return any documents instead of an error. Defaults to `false`.
If `false`, {es} returns an error if the `type` is unmapped. If `false`, {es} returns an error if the `type` is unmapped.
@ -94,19 +94,19 @@ You can use this parameter to query multiple indices that may not contain the
-- --
`max_children`:: `max_children`::
(integer) Optional. Maximum number of child documents that match the `query` (Optional, integer) Maximum number of child documents that match the `query`
allowed for a returned parent document. If the parent document exceeds this allowed for a returned parent document. If the parent document exceeds this
limit, it is excluded from the search results. limit, it is excluded from the search results.
`min_children`:: `min_children`::
(integer) Optional. Minimum number of child documents that match the `query` (Optional, integer) Minimum number of child documents that match the `query`
required to match the query for a returned parent document. If the parent required to match the query for a returned parent document. If the parent
document does not meet this limit, it is excluded from the search results. document does not meet this limit, it is excluded from the search results.
`score_mode`:: `score_mode`::
+ +
-- --
(string) Optional. Indicates how scores for matching child documents affect the (Optional, string) Indicates how scores for matching child documents affect the
root parent document's <<query-filter-context,relevance score>>. Valid values root parent document's <<query-filter-context,relevance score>>. Valid values
are: are:

View File

@ -26,4 +26,4 @@ GET /_search
==== Top-level parameters for `ids` ==== Top-level parameters for `ids`
`values`:: `values`::
An array of <<mapping-id-field, document IDs>>. (Required, array of strings) An array of <<mapping-id-field, document IDs>>.

View File

@ -64,14 +64,15 @@ GET /my_index/_search
[[nested-top-level-params]] [[nested-top-level-params]]
==== Top-level parameters for `nested` ==== Top-level parameters for `nested`
`path` (Required):: `path`::
(string) Path to the nested object you wish to search. (Required, string) Path to the nested object you wish to search.
`query` (Required):: `query`::
+ +
-- --
(query object) Query you wish to run on nested objects in the `path`. If an (Required, query object) Query you wish to run on nested objects in the `path`.
object matches the search, the `nested` query returns the root parent document. If an object matches the search, the `nested` query returns the root parent
document.
You can search nested fields using dot notation that includes the complete path, You can search nested fields using dot notation that includes the complete path,
such as `obj1.name`. such as `obj1.name`.
@ -81,11 +82,12 @@ inner nested query to automatically match the relevant nesting level, rather
than root, if it exists within another nested query. than root, if it exists within another nested query.
-- --
`score_mode` (Optional):: `score_mode`::
+ +
-- --
(string) Indicates how scores for matching child objects affect the root (Optional, string) Indicates how scores for matching child objects affect the
parent document's <<query-filter-context,relevance score>>. Valid values are: root parent document's <<query-filter-context,relevance score>>. Valid values
are:
`avg` (Default):: `avg` (Default)::
Use the mean relevance score of all matching child objects. Use the mean relevance score of all matching child objects.
@ -104,11 +106,11 @@ parent documents a score of `0`.
Add together the relevance scores of all matching child objects. Add together the relevance scores of all matching child objects.
-- --
`ignore_unmapped` (Optional):: `ignore_unmapped`::
+ +
-- --
(boolean) Indicates whether to ignore an unmapped `path` and not return any (Optional, boolean) Indicates whether to ignore an unmapped `path` and not
documents instead of an error. Defaults to `false`. return any documents instead of an error. Defaults to `false`.
If `false`, {es} returns an error if the `path` is an unmapped field. If `false`, {es} returns an error if the `path` is an unmapped field.

View File

@ -4,33 +4,66 @@
<titleabbrev>Prefix</titleabbrev> <titleabbrev>Prefix</titleabbrev>
++++ ++++
Matches documents that have fields containing terms with a specified Returns documents that contain a specific prefix in a provided field.
prefix (*not analyzed*). The prefix query maps to Lucene `PrefixQuery`.
The following matches documents where the user field contains a term [[prefix-query-ex-request]]
that starts with `ki`: ==== Example request
The following search returns documents where the `user` field contains a term
that begins with `ki`.
[source,js] [source,js]
-------------------------------------------------- ----
GET /_search GET /_search
{ "query": { {
"prefix" : { "user" : "ki" } "query": {
} "prefix": {
"user": {
"value": "ki"
}
}
}
} }
-------------------------------------------------- ----
// CONSOLE // CONSOLE
A boost can also be associated with the query: [[prefix-query-top-level-params]]
==== Top-level parameters for `prefix`
`<field>`::
(Required, object) Field you wish to search.
[[prefix-query-field-params]]
==== Parameters for `<field>`
`value`::
(Required, string) Beginning characters of terms you wish to find in the
provided `<field>`.
`rewrite`::
(Optional, string) Method used to rewrite the query. For valid values and more
information, see the <<query-dsl-multi-term-rewrite, `rewrite` parameter>>.
[[prefix-query-notes]]
==== Notes
[[prefix-query-short-ex]]
===== Short request example
You can simplify the `prefix` query syntax by combining the `<field>` and
`value` parameters. For example:
[source,js] [source,js]
-------------------------------------------------- ----
GET /_search GET /_search
{ "query": { {
"prefix" : { "user" : { "value" : "ki", "boost" : 2.0 } } "query": {
} "prefix" : { "user" : "ki" }
}
} }
-------------------------------------------------- ----
// CONSOLE // CONSOLE
This multi term query allows you to control how it gets rewritten using the [[prefix-query-index-prefixes]]
<<query-dsl-multi-term-rewrite,rewrite>> ===== Speed up prefix queries
parameter. You can speed up prefix queries using the <<index-prefixes,`index_prefixes`>>
mapping parameter. If enabled, {es} indexes prefixes between 2 and 5
characters in a separate field. This lets {es} run prefix queries more
efficiently at the cost of a larger index.

View File

@ -1,27 +1,38 @@
[[query-filter-context]] [[query-filter-context]]
== Query and filter context == Query and filter context
The behaviour of a query clause depends on whether it is used in _query context_ or [float]
in _filter context_: [[relevance-scores]]
=== Relevance scores
Query context:: By default, Elasticsearch sorts matching search results by **relevance
+ score**, which measures how well each document matches a query.
--
A query clause used in query context answers the question ``__How well does this The relevance score is a positive floating point number, returned in the
`_score` meta-field of the <<search-request-body,search>> API. The higher the
`_score`, the more relevant the document. While each query type can calculate
relevance scores differently, score calculation also depends on whether the
query clause is run in a **query** or **filter** context.
[float]
[[query-context]]
=== Query context
In the query context, a query clause answers the question ``__How well does this
document match this query clause?__'' Besides deciding whether or not the document match this query clause?__'' Besides deciding whether or not the
document matches, the query clause also calculates a `_score` representing how document matches, the query clause also calculates a relevance score in the
well the document matches, relative to other documents. `_score` meta-field.
Query context is in effect whenever a query clause is passed to a `query` parameter, Query context is in effect whenever a query clause is passed to a `query`
such as the `query` parameter in the <<request-body-search-query,`search`>> API. parameter, such as the `query` parameter in the
-- <<request-body-search-query,search>> API.
Filter context:: [float]
+ [[filter-context]]
-- === Filter context
In _filter_ context, a query clause answers the question ``__Does this document In a filter context, a query clause answers the question ``__Does this
match this query clause?__'' The answer is a simple Yes or No -- no scores are document match this query clause?__'' The answer is a simple Yes or No -- no
calculated. Filter context is mostly used for filtering structured data, e.g. scores are calculated. Filter context is mostly used for filtering structured
data, e.g.
* __Does this +timestamp+ fall into the range 2015 to 2016?__ * __Does this +timestamp+ fall into the range 2015 to 2016?__
* __Is the +status+ field set to ++"published"++__? * __Is the +status+ field set to ++"published"++__?
@ -34,8 +45,10 @@ parameter, such as the `filter` or `must_not` parameters in the
<<query-dsl-bool-query,`bool`>> query, the `filter` parameter in the <<query-dsl-bool-query,`bool`>> query, the `filter` parameter in the
<<query-dsl-constant-score-query,`constant_score`>> query, or the <<query-dsl-constant-score-query,`constant_score`>> query, or the
<<search-aggregations-bucket-filter-aggregation,`filter`>> aggregation. <<search-aggregations-bucket-filter-aggregation,`filter`>> aggregation.
--
[float]
[[query-filter-context-ex]]
=== Example of query and filter contexts
Below is an example of query clauses being used in query and filter context Below is an example of query clauses being used in query and filter context
in the `search` API. This query will match documents where all of the following in the `search` API. This query will match documents where all of the following
conditions are met: conditions are met:
@ -80,4 +93,4 @@ significand's precision will be converted to floats with loss of precision.
TIP: Use query clauses in query context for conditions which should affect the TIP: Use query clauses in query context for conditions which should affect the
score of matching documents (i.e. how well does the document match), and use score of matching documents (i.e. how well does the document match), and use
all other query clauses in filter context. all other query clauses in filter context.

View File

@ -35,33 +35,33 @@ GET _search
`<field>`:: `<field>`::
+ +
-- --
Field you wish to search. (Required, object) Field you wish to search.
-- --
[[range-query-field-params]] [[range-query-field-params]]
==== Parameters for `<field>` ==== Parameters for `<field>`
`gt`:: `gt`::
Greater than. Optional. (Optional) Greater than.
`gte`:: `gte`::
Greater than or equal to. Optional. (Optional) Greater than or equal to.
`lt`:: `lt`::
Less than. Optional. (Optional) Less than.
`lte`:: `lte`::
Less than or equal to. Optional. (Optional) Less than or equal to.
`format`:: `format`::
+ +
-- --
Date format used to convert `date` values in the query. (Optional, string) Date format used to convert `date` values in the query.
By default, {es} uses the <<mapping-date-format,date `format`>> provided in the By default, {es} uses the <<mapping-date-format,date `format`>> provided in the
`<field>`'s mapping. This value overrides that mapping format. `<field>`'s mapping. This value overrides that mapping format.
For valid syntax, see <<mapping-date-format,`format`>>. Optional. For valid syntax, see <<mapping-date-format,`format`>>.
[WARNING] [WARNING]
==== ====
@ -79,8 +79,8 @@ to `1970-01-10T00:00:00.000Z`.
`relation`:: `relation`::
+ +
-- --
Indicates how the range query matches values for `range` fields. Optional. Valid (Optional, string) Indicates how the range query matches values for `range`
values are: fields. Valid values are:
`INTERSECTS` (Default):: `INTERSECTS` (Default)::
Matches documents with a range field value that intersects the query's range. Matches documents with a range field value that intersects the query's range.
@ -95,10 +95,11 @@ Matches documents with a range field value entirely within the query's range.
`time_zone`:: `time_zone`::
+ +
-- --
(Optional, string)
https://en.wikipedia.org/wiki/List_of_UTC_time_offsets[Coordinated Universal https://en.wikipedia.org/wiki/List_of_UTC_time_offsets[Coordinated Universal
Time (UTC) offset] or Time (UTC) offset] or
https://en.wikipedia.org/wiki/List_of_tz_database_time_zones[IANA time zone] https://en.wikipedia.org/wiki/List_of_tz_database_time_zones[IANA time zone]
used to convert `date` values in the query to UTC. Optional. used to convert `date` values in the query to UTC.
Valid values are ISO 8601 UTC offsets, such as `+01:00` or -`08:00`, and IANA Valid values are ISO 8601 UTC offsets, such as `+01:00` or -`08:00`, and IANA
time zone IDs, such as `America/Los_Angeles`. time zone IDs, such as `America/Los_Angeles`.
@ -120,9 +121,8 @@ convert a value of `now/d`.
`boost`:: `boost`::
+ +
-- --
Floating point number used to decrease or increase the (Optional, float) Floating point number used to decrease or increase the
<<query-filter-context, relevance scores>> of a query. Default is `1.0`. <<query-filter-context, relevance scores>> of a query. Defaults to `1.0`.
Optional.
You can use the `boost` parameter to adjust relevance scores for searches You can use the `boost` parameter to adjust relevance scores for searches
containing two or more queries. containing two or more queries.

View File

@ -4,33 +4,58 @@
<titleabbrev>Rank feature</titleabbrev> <titleabbrev>Rank feature</titleabbrev>
++++ ++++
The `rank_feature` query is a specialized query that only works on Boosts the <<relevance-scores,relevance score>> of documents based on the
<<rank-feature,`rank_feature`>> fields and <<rank-features,`rank_features`>> fields. numeric value of a <<rank-feature,`rank_feature`>> or
Its goal is to boost the score of documents based on the values of numeric <<rank-features,`rank_features`>> field.
features. It is typically put in a `should` clause of a
<<query-dsl-bool-query,`bool`>> query so that its score is added to the score
of the query.
Compared to using <<query-dsl-function-score-query,`function_score`>> or other The `rank_feature` query is typically used in the `should` clause of a
ways to modify the score, this query has the benefit of being able to <<query-dsl-bool-query,`bool`>> query so its relevance scores are added to other
efficiently skip non-competitive hits when scores from the `bool` query.
<<search-uri-request,`track_total_hits`>> is not set to `true`. Speedups may be
spectacular.
Here is an example that indexes various features: Unlike the <<query-dsl-function-score-query,`function_score`>> query or other
- https://en.wikipedia.org/wiki/PageRank[`pagerank`], a measure of the ways to change <<relevance-scores,relevance scores>>, the
importance of a website, `rank_feature` query efficiently skips non-competitive hits when the
- `url_length`, the length of the url, which typically correlates negatively <<search-uri-request,`track_total_hits`>> parameter is **not** `true`. This can
with relevance, dramatically improve query speed.
- `topics`, which associates a list of topics with every document alongside a
measure of how well the document is connected to this topic.
Then the example includes an example query that searches for `"2016"` and boosts [[rank-feature-query-functions]]
based or `pagerank`, `url_length` and the `sports` topic. ==== Rank feature functions
To calculate relevance scores based on rank feature fields, the `rank_feature`
query supports the following mathematical functions:
* <<rank-feature-query-saturation,Saturation>>
* <<rank-feature-query-logarithm,Logarithm>>
* <<rank-feature-query-sigmoid,Sigmoid>>
If you don't know where to start, we recommend using the `saturation` function.
If no function is provided, the `rank_feature` query uses the `saturation`
function by default.
[[rank-feature-query-ex-request]]
==== Example request
[[rank-feature-query-index-setup]]
===== Index setup
To use the `rank_feature` query, your index must include a
<<rank-feature,`rank_feature`>> or <<rank-features,`rank_features`>> field
mapping. To see how you can set up an index for the `rank_feature` query, try
the following example.
Create a `test` index with the following field mappings:
- `pagerank`, a <<rank-feature,`rank_feature`>> field which measures the
importance of a website
- `url_length`, a <<rank-feature,`rank_feature`>> field which contains the
length of the website's URL. For this example, a long URL correlates negatively
to relevance, indicated by a `positive_score_impact` value of `false`.
- `topics`, a <<rank-features,`rank_features`>> field which contains a list of
topics and a measure of how well each document is connected to this topic
[source,js] [source,js]
-------------------------------------------------- ----
PUT test PUT /test
{ {
"mappings": { "mappings": {
"properties": { "properties": {
@ -47,8 +72,16 @@ PUT test
} }
} }
} }
----
// CONSOLE
// TESTSETUP
PUT test/_doc/1
Index several documents to the `test` index.
[source,js]
----
PUT /test/_doc/1?refresh
{ {
"url": "http://en.wikipedia.org/wiki/2016_Summer_Olympics", "url": "http://en.wikipedia.org/wiki/2016_Summer_Olympics",
"content": "Rio 2016", "content": "Rio 2016",
@ -60,10 +93,10 @@ PUT test/_doc/1
} }
} }
PUT test/_doc/2 PUT /test/_doc/2?refresh
{ {
"url": "http://en.wikipedia.org/wiki/2016_Brazilian_Grand_Prix", "url": "http://en.wikipedia.org/wiki/2016_Brazilian_Grand_Prix",
"content": "Formula One motor race held on 13 November 2016 at the Autódromo José Carlos Pace in São Paulo, Brazil", "content": "Formula One motor race held on 13 November 2016",
"pagerank": 50.3, "pagerank": 50.3,
"url_length": 47, "url_length": 47,
"topics": { "topics": {
@ -73,7 +106,7 @@ PUT test/_doc/2
} }
} }
PUT test/_doc/3 PUT /test/_doc/3?refresh
{ {
"url": "http://en.wikipedia.org/wiki/Deadpool_(film)", "url": "http://en.wikipedia.org/wiki/Deadpool_(film)",
"content": "Deadpool is a 2016 American superhero film", "content": "Deadpool is a 2016 American superhero film",
@ -84,10 +117,18 @@ PUT test/_doc/3
"super hero": 65 "super hero": 65
} }
} }
----
// CONSOLE
POST test/_refresh [[rank-feature-query-ex-query]]
===== Example query
GET test/_search The following query searches for `2016` and boosts relevance scores based or
`pagerank`, `url_length`, and the `sports` topic.
[source,js]
----
GET /test/_search
{ {
"query": { "query": {
"bool": { "bool": {
@ -120,31 +161,80 @@ GET test/_search
} }
} }
} }
-------------------------------------------------- ----
// CONSOLE // CONSOLE
[float]
=== Supported functions
The `rank_feature` query supports 3 functions in order to boost scores using the [[rank-feature-top-level-params]]
values of rank features. If you do not know where to start, we recommend that you ==== Top-level parameters for `rank_feature`
start with the `saturation` function, which is the default when no function is
provided.
[float] `field`::
==== Saturation (Required, string) <<rank-feature,`rank_feature`>> or
<<rank-features,`rank_features`>> field used to boost
<<relevance-scores,relevance scores>>.
This function gives a score that is equal to `S / (S + pivot)` where `S` is the `boost`::
value of the rank feature and `pivot` is a configurable pivot value so that the +
result will be less than +0.5+ if `S` is less than pivot and greater than +0.5+ --
otherwise. Scores are always is +(0, 1)+. (Optional, float) Floating point number used to decrease or increase
<<relevance-scores,relevance scores>>. Defaults to `1.0`.
If the rank feature has a negative score impact then the function will be computed as Boost values are relative to the default value of `1.0`. A boost value between
`pivot / (S + pivot)`, which decreases when `S` increases. `0` and `1.0` decreases the relevance score. A value greater than `1.0`
increases the relevance score.
--
`saturation`::
+
--
(Optional, <<rank-feature-query-saturation,function object>>) Saturation
function used to boost <<relevance-scores,relevance scores>> based on the
value of the rank feature `field`. If no function is provided, the `rank_feature`
query defaults to the `saturation` function. See
<<rank-feature-query-saturation,Saturation>> for more information.
Only one function `saturation`, `log`, or `sigmoid` can be provided.
--
`log`::
+
--
(Optional, <<rank-feature-query-logarithm,function object>>) Logarithmic
function used to boost <<relevance-scores,relevance scores>> based on the
value of the rank feature `field`. See
<<rank-feature-query-logarithm,Logarithm>> for more information.
Only one function `saturation`, `log`, or `sigmoid` can be provided.
--
`sigmoid`::
+
--
(Optional, <<rank-feature-query-sigmoid,function object>>) Sigmoid function used
to boost <<relevance-scores,relevance scores>> based on the value of the
rank feature `field`. See <<rank-feature-query-sigmoid,Sigmoid>> for more
information.
Only one function `saturation`, `log`, or `sigmoid` can be provided.
--
[[rank-feature-query-notes]]
==== Notes
[[rank-feature-query-saturation]]
===== Saturation
The `saturation` function gives a score equal to `S / (S + pivot)`, where `S` is
the value of the rank feature field and `pivot` is a configurable pivot value so
that the result will be less than `0.5` if `S` is less than pivot and greater
than `0.5` otherwise. Scores are always `(0,1)`.
If the rank feature has a negative score impact then the function will be
computed as `pivot / (S + pivot)`, which decreases when `S` increases.
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
GET test/_search GET /test/_search
{ {
"query": { "query": {
"rank_feature": { "rank_feature": {
@ -157,16 +247,15 @@ GET test/_search
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[continued]
If +pivot+ is not supplied then Elasticsearch will compute a default value that If a `pivot` value is not provided, {es} computes a default value equal to the
will be approximately equal to the geometric mean of all feature values that approximate geometric mean of all rank feature values in the index. We recommend
exist in the index. We recommend this if you haven't had the opportunity to using this default value if you haven't had the opportunity to train a good
train a good pivot value. pivot value.
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
GET test/_search GET /test/_search
{ {
"query": { "query": {
"rank_feature": { "rank_feature": {
@ -177,20 +266,18 @@ GET test/_search
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[continued]
[float] [[rank-feature-query-logarithm]]
==== Logarithm ===== Logarithm
The `log` function gives a score equal to `log(scaling_factor + S)`, where `S`
This function gives a score that is equal to `log(scaling_factor + S)` where is the value of the rank feature field and `scaling_factor` is a configurable
`S` is the value of the rank feature and `scaling_factor` is a configurable scaling scaling factor. Scores are unbounded.
factor. Scores are unbounded.
This function only supports rank features that have a positive score impact. This function only supports rank features that have a positive score impact.
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
GET test/_search GET /test/_search
{ {
"query": { "query": {
"rank_feature": { "rank_feature": {
@ -203,23 +290,21 @@ GET test/_search
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[continued]
[float] [[rank-feature-query-sigmoid]]
==== Sigmoid ===== Sigmoid
The `sigmoid` function is an extension of `saturation` which adds a configurable
This function is an extension of `saturation` which adds a configurable
exponent. Scores are computed as `S^exp^ / (S^exp^ + pivot^exp^)`. Like for the exponent. Scores are computed as `S^exp^ / (S^exp^ + pivot^exp^)`. Like for the
`saturation` function, `pivot` is the value of `S` that gives a score of +0.5+ `saturation` function, `pivot` is the value of `S` that gives a score of `0.5`
and scores are in +(0, 1)+. and scores are `(0,1)`.
`exponent` must be positive, but is typically in +[0.5, 1]+. A good value should The `exponent` must be positive and is typically in `[0.5, 1]`. A
be computed via training. If you don't have the opportunity to do so, we recommend good value should be computed via training. If you don't have the opportunity to
that you stick to the `saturation` function instead. do so, we recommend you use the `saturation` function instead.
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
GET test/_search GET /test/_search
{ {
"query": { "query": {
"rank_feature": { "rank_feature": {
@ -232,5 +317,4 @@ GET test/_search
} }
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[continued]

View File

@ -4,12 +4,15 @@
<titleabbrev>Script</titleabbrev> <titleabbrev>Script</titleabbrev>
++++ ++++
A query allowing to define Filters documents based on a provided <<modules-scripting-using,script>>. The
<<modules-scripting,scripts>> as queries. They are typically used in a filter `script` query is typically used in a <<query-filter-context,filter context>>.
context, for example:
[[script-query-ex-request]]
==== Example request
[source,js] [source,js]
---------------------------------------------- ----
GET /_search GET /_search
{ {
"query": { "query": {
@ -25,18 +28,29 @@ GET /_search
} }
} }
} }
---------------------------------------------- ----
// CONSOLE // CONSOLE
[float]
==== Custom Parameters
Scripts are compiled and cached for faster execution. If the same script [[script-top-level-params]]
can be used, just with different parameters provider, it is preferable ==== Top-level parameters for `script`
to use the ability to pass parameters to the script itself, for example:
`script`::
(Required, <<modules-scripting-using, script object>>) Contains a script to run
as a query. This script must return a boolean value, `true` or `false`.
[[script-query-notes]]
==== Notes
[[script-query-custom-params]]
===== Custom Parameters
Like <<query-filter-context,filters>>, scripts are cached for faster execution.
If you frequently change the arguments of a script, we recommend you store them
in the script's `params` parameter. For example:
[source,js] [source,js]
---------------------------------------------- ----
GET /_search GET /_search
{ {
"query": { "query": {
@ -55,6 +69,5 @@ GET /_search
} }
} }
} }
---------------------------------------------- ----
// CONSOLE // CONSOLE

View File

@ -43,18 +43,18 @@ GET /_search
[[term-top-level-params]] [[term-top-level-params]]
==== Top-level parameters for `term` ==== Top-level parameters for `term`
`<field>`:: `<field>`::
Field you wish to search. (Required, object) Field you wish to search.
[[term-field-params]] [[term-field-params]]
==== Parameters for `<field>` ==== Parameters for `<field>`
`value`:: `value`::
Term you wish to find in the provided `<field>`. To return a document, the term (Required, string) Term you wish to find in the provided `<field>`. To return a
must exactly match the field value, including whitespace and capitalization. document, the term must exactly match the field value, including whitespace and
capitalization.
`boost`:: `boost`::
Floating point number used to decrease or increase the (Optional, float) Floating point number used to decrease or increase the
<<query-filter-context, relevance scores>> of a query. Default is `1.0`. <<query-filter-context, relevance scores>> of a query. Defaults to `1.0`.
Optional.
+ +
You can use the `boost` parameter to adjust relevance scores for searches You can use the `boost` parameter to adjust relevance scores for searches
containing two or more queries. containing two or more queries.

View File

@ -34,7 +34,7 @@ GET /_search
`<field>`:: `<field>`::
+ +
-- --
Field you wish to search. (Optional, object) Field you wish to search.
The value of this parameter is an array of terms you wish to find in the The value of this parameter is an array of terms you wish to find in the
provided field. To return a document, one or more terms must exactly match a provided field. To return a document, one or more terms must exactly match a
@ -52,9 +52,8 @@ To use the field values of an existing document as search terms, use the
`boost`:: `boost`::
+ +
-- --
Floating point number used to decrease or increase the (Optional, float) Floating point number used to decrease or increase the
<<query-filter-context, relevance scores>> of a query. Default is `1.0`. <<query-filter-context, relevance scores>> of a query. Defaults to `1.0`.
Optional.
You can use the `boost` parameter to adjust relevance scores for searches You can use the `boost` parameter to adjust relevance scores for searches
containing two or more queries. containing two or more queries.
@ -95,15 +94,16 @@ To perform a terms lookup, use the following parameters.
[[query-dsl-terms-lookup-params]] [[query-dsl-terms-lookup-params]]
====== Terms lookup parameters ====== Terms lookup parameters
`index`:: `index`::
Name of the index from which to fetch field values. (Optional, string) Name of the index from which to fetch field values.
`id`:: `id`::
<<mapping-id-field,ID>> of the document from which to fetch field values. (Optional, string) <<mapping-id-field,ID>> of the document from which to fetch
field values.
`path`:: `path`::
+ +
-- --
Name of the field from which to fetch field values. {es} uses (Optional, string) Name of the field from which to fetch field values. {es} uses
these values as search terms for the query. these values as search terms for the query.
If the field values include an array of nested inner objects, you can access If the field values include an array of nested inner objects, you can access
@ -111,9 +111,9 @@ those objects using dot notation syntax.
-- --
`routing`:: `routing`::
Custom <<mapping-routing-field, routing value>> of the document from which to (Optional, string) Custom <<mapping-routing-field, routing value>> of the
fetch term values. If a custom routing value was provided when the document was document from which to fetch term values. If a custom routing value was provided
indexed, this parameter is required. when the document was indexed, this parameter is required.
[[query-dsl-terms-lookup-example]] [[query-dsl-terms-lookup-example]]
====== Terms lookup example ====== Terms lookup example

View File

@ -155,7 +155,7 @@ GET /job-candidates/_search
==== Top-level parameters for `terms_set` ==== Top-level parameters for `terms_set`
`<field>`:: `<field>`::
Field you wish to search. (Required, object) Field you wish to search.
[[terms-set-field-params]] [[terms-set-field-params]]
==== Parameters for `<field>` ==== Parameters for `<field>`
@ -163,23 +163,23 @@ Field you wish to search.
`terms`:: `terms`::
+ +
-- --
Array of terms you wish to find in the provided `<field>`. To return a document, (Required, array of strings) Array of terms you wish to find in the provided
a required number of terms must exactly match the field values, including `<field>`. To return a document, a required number of terms must exactly match
whitespace and capitalization. the field values, including whitespace and capitalization.
The required number of matching terms is defined in the The required number of matching terms is defined in the
`minimum_should_match_field` or `minimum_should_match_script` parameter. `minimum_should_match_field` or `minimum_should_match_script` parameter.
-- --
`minimum_should_match_field`:: `minimum_should_match_field`::
<<number, Numeric>> field containing the number of matching terms (Optional, string) <<number, Numeric>> field containing the number of matching
required to return a document. terms required to return a document.
`minimum_should_match_script`:: `minimum_should_match_script`::
+ +
-- --
Custom script containing the number of matching terms required to return a (Optional, string) Custom script containing the number of matching terms
document. required to return a document.
For parameters and valid values, see <<modules-scripting, Scripting>>. For parameters and valid values, see <<modules-scripting, Scripting>>.

View File

@ -37,12 +37,13 @@ GET /_search
[[wildcard-top-level-params]] [[wildcard-top-level-params]]
==== Top-level parameters for `wildcard` ==== Top-level parameters for `wildcard`
`<field>`:: `<field>`::
Field you wish to search. (Required, object) Field you wish to search.
[[wildcard-query-field-params]] [[wildcard-query-field-params]]
==== Parameters for `<field>` ==== Parameters for `<field>`
`value`:: `value`::
Wildcard pattern for terms you wish to find in the provided `<field>`. (Required, string) Wildcard pattern for terms you wish to find in the provided
`<field>`.
+ +
-- --
This parameter supports two wildcard operators: This parameter supports two wildcard operators:
@ -55,9 +56,8 @@ the iterations needed to find matching terms and slow search performance.
-- --
`boost`:: `boost`::
Floating point number used to decrease or increase the (Optional, float) Floating point number used to decrease or increase the
<<query-filter-context, relevance scores>> of a query. Default is `1.0`. <<query-filter-context, relevance scores>> of a query. Defaults to `1.0`.
Optional.
+ +
You can use the `boost` parameter to adjust relevance scores for searches You can use the `boost` parameter to adjust relevance scores for searches
containing two or more queries. containing two or more queries.
@ -66,6 +66,6 @@ Boost values are relative to the default value of `1.0`. A boost value between
`0` and `1.0` decreases the relevance score. A value greater than `1.0` `0` and `1.0` decreases the relevance score. A value greater than `1.0`
increases the relevance score. increases the relevance score.
`rewrite` (Expert):: `rewrite`::
Method used to rewrite the query. For valid values and more information, see the (Optional, string) Method used to rewrite the query. For valid values and more information, see the
<<query-dsl-multi-term-rewrite, `rewrite` parameter>>. Optional. <<query-dsl-multi-term-rewrite, `rewrite` parameter>>.

View File

@ -4,8 +4,6 @@
<titleabbrev>7.3.0</titleabbrev> <titleabbrev>7.3.0</titleabbrev>
++++ ++++
coming[7.3.0]
//NOTE: The notable-highlights tagged regions are re-used in the //NOTE: The notable-highlights tagged regions are re-used in the
//Installation and Upgrade Guide //Installation and Upgrade Guide
@ -13,8 +11,9 @@ coming[7.3.0]
[float] [float]
==== Voting-only master nodes ==== Voting-only master nodes
A new <<voting-only-node,`node.voting-only`>> role has been introduced that A new {ref}/modules-node.html#voting-only-node[`node.voting-only`] role has been
allows nodes to participate in elections even though they are not eligible to become the master. introduced that allows nodes to participate in elections even though they are
not eligible to become the master.
The benefit is that these nodes still help with high availability while The benefit is that these nodes still help with high availability while
requiring less CPU and heap than master nodes. requiring less CPU and heap than master nodes.
@ -27,8 +26,8 @@ distribution of {es}.
[float] [float]
==== Reloading of search-time synonyms ==== Reloading of search-time synonyms
A new <<indices-reload-analyzers,Analyzer reload API>> allows to reload the A new {ref}/indices-reload-analyzers.html[Analyzer reload API] allows to reload
definition of search-time analyzers and their associated resources. A common the definition of search-time analyzers and their associated resources. A common
use-case for this API is the reloading of search-time synonyms. In earlier use-case for this API is the reloading of search-time synonyms. In earlier
versions of Elasticsearch, users could force synonyms to be reloaded by closing versions of Elasticsearch, users could force synonyms to be reloaded by closing
the index and then opening it again. With this new API, synonyms can be updated the index and then opening it again. With this new API, synonyms can be updated
@ -43,12 +42,12 @@ of {es}.
[float] [float]
==== New `flattened` field type ==== New `flattened` field type
A new <<flattened,`flattened`>> field type has been added, which can index A new {ref}/flattened.html[`flattened`] field type has been added, which can index
arbitrary `json` objects into a single field. This helps avoid hitting issues arbitrary `json` objects into a single field. This helps avoid hitting issues
due to many fields in mappings, at the cost of more limited search due to many fields in mappings, at the cost of more limited search
functionality. functionality.
NOTE: The <<flattened,`flattened`>> field type is only available with the NOTE: The `flattened` field type is only available with the
default distribution of {es}. default distribution of {es}.
// end::notable-highlights[] // end::notable-highlights[]
@ -57,9 +56,12 @@ default distribution of {es}.
[float] [float]
==== Functions on vector fields ==== Functions on vector fields
Painless now support computing the <<vector-functions,cosine similarity>> and Painless now support computing the
the <<vector-functions,dot product>> of a query vector and either values of a {ref}/query-dsl-script-score-query.html#vector-functions[cosine similarity] and
<<sparse-vector,`sparse_vector`>> or <<dense-vector,`dense_vector`>> field. the {ref}/query-dsl-script-score-query.html#vector-functions[dot product] of a
query vector and either values of a
{ref}/sparse-vector.html[`sparse_vector`] or
{ref}/dense-vector.html[`dense_vector`] field.
NOTE: These functions are only available with the default distribution of {es}. NOTE: These functions are only available with the default distribution of {es}.
@ -69,8 +71,9 @@ NOTE: These functions are only available with the default distribution of {es}.
[float] [float]
==== Prefix and wildcard support for intervals ==== Prefix and wildcard support for intervals
<<query-dsl-intervals-query,Intervals>> now support querying by {ref}/query-dsl-intervals-query.html[Intervals] now support querying by
<<intervals-prefix,prefix>> or <<intervals-wildcard,wildcard>>. {ref}/query-dsl-intervals-query.html#intervals-prefix[prefix] or
{ref}/query-dsl-intervals-query.html#intervals-wildcard[wildcard].
// end::notable-highlights[] // end::notable-highlights[]
@ -79,10 +82,10 @@ NOTE: These functions are only available with the default distribution of {es}.
==== Rare terms aggregation ==== Rare terms aggregation
A new A new
<<search-aggregations-bucket-rare-terms-aggregation,Rare Terms aggregation>> {ref}/search-aggregations-bucket-rare-terms-aggregation.html[Rare Terms aggregation]
allows to find the least frequent values in a field. It is intended to replace allows to find the least frequent values in a field. It is intended to replace
the `"order" : { "_count" : "asc" }` option of the the `"order" : { "_count" : "asc" }` option of the
<<search-aggregations-bucket-terms-aggregation,terms aggregations>>. {ref}/search-aggregations-bucket-terms-aggregation.html[terms aggregations].
// end::notable-highlights[] // end::notable-highlights[]
@ -90,8 +93,8 @@ the `"order" : { "_count" : "asc" }` option of the
[float] [float]
==== Aliases are replicated via {ccr} ==== Aliases are replicated via {ccr}
Read aliases are now replicated via <<ccr-put-follow,{ccr}>>. Note that write Read aliases are now replicated via {ref}/ccr-put-follow.html[{ccr}]. Note that
aliases are still not replicated since they only make sense for indices that write aliases are still not replicated since they only make sense for indices that
are being written to while follower indices do not receive direct writes. are being written to while follower indices do not receive direct writes.
// end::notable-highlights[] // end::notable-highlights[]
@ -100,8 +103,8 @@ are being written to while follower indices do not receive direct writes.
[float] [float]
==== SQL supports frozen indices ==== SQL supports frozen indices
{es-sql} now supports querying <<frozen-indices, frozen indices>> via the new {es-sql} now supports querying {ref}/frozen-indices.html[frozen indices] via the
<<sql-index-frozen,`FROZEN`>> keyword. new {ref}/sql-index-frozen.html[`FROZEN`] keyword.
// end::notable-highlights[] // end::notable-highlights[]
@ -109,7 +112,7 @@ are being written to while follower indices do not receive direct writes.
[float] [float]
==== Fixed memory leak when using templates in document-level security ==== Fixed memory leak when using templates in document-level security
{xpack-ref}/document-level-security.html[Document-level security] was using an {stack-ov}/document-level-security.html[Document-level security] was using an
unbounded cache for the set of visible documents. This could lead to a memory unbounded cache for the set of visible documents. This could lead to a memory
leak when using a templated query as a role query. The cache has been fixed to leak when using a templated query as a role query. The cache has been fixed to
evict based on memory usage and has a limit of 50MB. evict based on memory usage and has a limit of 50MB.
@ -120,9 +123,9 @@ evict based on memory usage and has a limit of 50MB.
[float] [float]
==== More memory-efficient aggregations on `keyword` fields ==== More memory-efficient aggregations on `keyword` fields
<<search-aggregations-bucket-terms-aggregation,Terms aggregations>> generally {ref}/search-aggregations-bucket-terms-aggregation.html[Terms aggregations]
need to build generally need to build
<<search-aggregations-bucket-terms-aggregation-execution-hint,global ordinals>> {ref}/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-execution-hint[global ordinals]
in order to run. Unfortunately this operation became more memory-intensive in in order to run. Unfortunately this operation became more memory-intensive in
6.0 due to the move to doc-value iterators in order to improve handling of 6.0 due to the move to doc-value iterators in order to improve handling of
sparse fields. Memory pressure of global ordinals now goes back to a more sparse fields. Memory pressure of global ordinals now goes back to a more
@ -131,15 +134,23 @@ similar level as what you could have on pre-6.0 releases.
// end::notable-highlights[] // end::notable-highlights[]
// tag::notable-highlights[] // tag::notable-highlights[]
[float] [discrete]
==== Data frame pivot transforms to create entity-centric indexes [[release-highlights-7.3.0-transforms]]
==== {dataframes-cap}: transform and pivot your streaming data
<<put-dfanalytics,Data frames>>, released in 7.2, allow to transform an beta[] {stack-ov}/ml-dataframes.html[{dataframe-transforms-cap}] are a core new
existing index to a secondary, summarized index. 7.3 now introduces Data frame feature in {es} that enable you to transform an existing index to a secondary,
pivot transforms in order to create entity-centric indexes that can summarize summarized index. {dataframe-transforms-cap} enable you to pivot your data and
the behavior of an entity.  create entity-centric indices that can summarize the behavior of an entity. This
organizes the data into an analysis-friendly format.
NOTE: Data frames are only available with the default distribution of {es}. {dataframe-transforms-cap} were originally available in 7.2. With 7.3 they can
now run either as a single batch transform or continuously incorporating new
data as it is ingested.
{dataframes-cap} enable new possibilities for {ml} analysis (such as
_outlier detection_), but they can also be useful for other types of
visualizations and custom types of analysis.
// end::notable-highlights[] // end::notable-highlights[]
@ -162,7 +173,8 @@ bulk of the data in the index. We assign to each analysed data point an
index. index.
In addition to new {oldetection} functionality, we are introducing the In addition to new {oldetection} functionality, we are introducing the
{ref}/evaluate-dfanalytics.html[evaluate {dfanalytics} API], which enables you to compute a range of performance metrics such {ref}/evaluate-dfanalytics.html[evaluate {dfanalytics} API], which enables you
to compute a range of performance metrics such
as confusion matrices, precision, recall, the as confusion matrices, precision, recall, the
https://en.wikipedia.org/wiki/Receiver_operating_characteristic[receiver-operating characteristics (ROC) curve] https://en.wikipedia.org/wiki/Receiver_operating_characteristic[receiver-operating characteristics (ROC) curve]
and the area under the ROC curve. If you are running {oldetection} on a source and the area under the ROC curve. If you are running {oldetection} on a source

View File

@ -14,7 +14,7 @@ not be included yet.
* <<cat, cat APIs>> * <<cat, cat APIs>>
* <<cluster, Cluster APIs>> * <<cluster, Cluster APIs>>
* <<ccr-apis,{ccr-cap} APIs>> * <<ccr-apis,{ccr-cap} APIs>>
* <<data-frame-apis,{dataframe-cap} APIs>> * <<data-frame-apis,{dataframe-transform-cap} APIs>>
* <<docs, Document APIs>> * <<docs, Document APIs>>
* <<graph-explore-api,Graph Explore API>> * <<graph-explore-api,Graph Explore API>>
* <<indices, Index APIs>> * <<indices, Index APIs>>

View File

@ -14,7 +14,7 @@ _precision_ or _discounted cumulative gain_.
Search quality evaluation starts with looking at the users of your search application, and the things that they are searching for. Search quality evaluation starts with looking at the users of your search application, and the things that they are searching for.
Users have a specific _information need_, e.g. they are looking for gift in a web shop or want to book a flight for their next holiday. Users have a specific _information need_, e.g. they are looking for gift in a web shop or want to book a flight for their next holiday.
They usually enters some search terms into a search box or some other web form. They usually enter some search terms into a search box or some other web form.
All of this information, together with meta information about the user (e.g. the browser, location, earlier preferences etc...) then gets translated into a query to the underlying search system. All of this information, together with meta information about the user (e.g. the browser, location, earlier preferences etc...) then gets translated into a query to the underlying search system.
The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information need. The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information need.

View File

@ -165,10 +165,10 @@ https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}
endif::[] endif::[]
ifdef::include-xpack[] ifdef::include-xpack[]
[role="xpack"]
[[deb-enable-indices]] [[deb-enable-indices]]
==== Enable automatic creation of {xpack} indices ==== Enable automatic creation of system indices
{xpack} will try to automatically create a number of indices within Elasticsearch.
include::xpack-indices.asciidoc[] include::xpack-indices.asciidoc[]
endif::include-xpack[] endif::include-xpack[]

View File

@ -152,10 +152,10 @@ endif::[]
include::skip-set-kernel-parameters.asciidoc[] include::skip-set-kernel-parameters.asciidoc[]
ifdef::include-xpack[] ifdef::include-xpack[]
[role="xpack"]
[[rpm-enable-indices]] [[rpm-enable-indices]]
==== Enable automatic creation of {xpack} indices ==== Enable automatic creation of system indices
{xpack} will try to automatically create a number of indices within {es}.
include::xpack-indices.asciidoc[] include::xpack-indices.asciidoc[]
endif::include-xpack[] endif::include-xpack[]

View File

@ -77,10 +77,10 @@ https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}
endif::[] endif::[]
ifdef::include-xpack[] ifdef::include-xpack[]
[role="xpack"]
[[targz-enable-indices]] [[targz-enable-indices]]
==== Enable automatic creation of {xpack} indices ==== Enable automatic creation of system indices
{xpack} will try to automatically create a number of indices within {es}.
include::xpack-indices.asciidoc[] include::xpack-indices.asciidoc[]
endif::include-xpack[] endif::include-xpack[]

View File

@ -342,11 +342,10 @@ Consult the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=
for additional rules related to values containing quotation marks. for additional rules related to values containing quotation marks.
ifdef::include-xpack[] ifdef::include-xpack[]
[role="xpack"]
[[msi-installer-enable-indices]] [[msi-installer-enable-indices]]
==== Enable automatic creation of {xpack} indices ==== Enable automatic creation of system indices
The {stack} features try to automatically create a number of indices within {es}.
include::xpack-indices.asciidoc[] include::xpack-indices.asciidoc[]
endif::include-xpack[] endif::include-xpack[]

View File

@ -1,8 +1,9 @@
Some commercial features automatically create system indices within {es}.
By default, {es} is configured to allow automatic index creation, and no By default, {es} is configured to allow automatic index creation, and no
additional steps are required. However, if you have disabled automatic index additional steps are required. However, if you have disabled automatic index
creation in {es}, you must configure creation in {es}, you must configure
<<index-creation,`action.auto_create_index`>> in `elasticsearch.yml` to allow <<index-creation,`action.auto_create_index`>> in `elasticsearch.yml` to allow
{xpack} to create the following indices: the commercial features to create the following indices:
[source,yaml] [source,yaml]
----------------------------------------------------------- -----------------------------------------------------------

View File

@ -57,10 +57,10 @@ cd c:\elasticsearch-{version}
endif::[] endif::[]
ifdef::include-xpack[] ifdef::include-xpack[]
[role="xpack"]
[[windows-enable-indices]] [[windows-enable-indices]]
==== Enable automatic creation of {xpack} indices ==== Enable automatic creation of system indices
{xpack} will try to automatically create a number of indices within {es}.
include::xpack-indices.asciidoc[] include::xpack-indices.asciidoc[]
endif::include-xpack[] endif::include-xpack[]

View File

@ -7,7 +7,6 @@
:sql-specs: {sql-tests}/src/main/resources/ :sql-specs: {sql-tests}/src/main/resources/
:jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc :jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc
:security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/sql/qa/security :security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/sql/qa/security
:es-sql: Elasticsearch SQL
[partintro] [partintro]
-- --

View File

@ -359,7 +359,9 @@ public class NioSelector implements Closeable {
} }
if (shouldFlushAfterQueuing) { if (shouldFlushAfterQueuing) {
if (context.selectorShouldClose() == false) { // We only attempt the write if the connect process is complete and the context is not
// signalling that it should be closed.
if (context.isConnectComplete() && context.selectorShouldClose() == false) {
handleWrite(context); handleWrite(context);
} }
eventHandler.postHandling(context); eventHandler.postHandling(context);

View File

@ -355,8 +355,6 @@ public class NioSelectorTests extends ESTestCase {
public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { public void testQueueDirectlyInChannelBufferSuccessful() throws Exception {
WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener);
assertEquals(0, (selectionKey.interestOps() & SelectionKey.OP_WRITE));
when(channelContext.readyForFlush()).thenReturn(true); when(channelContext.readyForFlush()).thenReturn(true);
selector.queueWrite(writeOperation); selector.queueWrite(writeOperation);
@ -368,8 +366,6 @@ public class NioSelectorTests extends ESTestCase {
public void testShouldFlushIfNoPendingFlushes() throws Exception { public void testShouldFlushIfNoPendingFlushes() throws Exception {
WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener);
assertEquals(0, (selectionKey.interestOps() & SelectionKey.OP_WRITE));
when(channelContext.readyForFlush()).thenReturn(false); when(channelContext.readyForFlush()).thenReturn(false);
selector.queueWrite(writeOperation); selector.queueWrite(writeOperation);
@ -378,6 +374,18 @@ public class NioSelectorTests extends ESTestCase {
verify(eventHandler).postHandling(channelContext); verify(eventHandler).postHandling(channelContext);
} }
public void testShouldNotFlushIfChannelNotConnectedPendingFlushes() throws Exception {
WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener);
when(channelContext.readyForFlush()).thenReturn(false);
when(channelContext.isConnectComplete()).thenReturn(false);
selector.queueWrite(writeOperation);
verify(channelContext).queueWriteOperation(writeOperation);
verify(eventHandler, times(0)).handleWrite(channelContext);
verify(eventHandler).postHandling(channelContext);
}
public void testConnectEvent() throws Exception { public void testConnectEvent() throws Exception {
selectionKey.setReadyOps(SelectionKey.OP_CONNECT); selectionKey.setReadyOps(SelectionKey.OP_CONNECT);

View File

@ -87,7 +87,6 @@ public class PemTrustConfigTests extends ESTestCase {
assertFileNotFound(trustConfig, cert2); assertFileNotFound(trustConfig, cert2);
} }
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42509")
public void testTrustConfigReloadsFileContents() throws Exception { public void testTrustConfigReloadsFileContents() throws Exception {
final Path cert1 = getDataPath("/certs/ca1/ca.crt"); final Path cert1 = getDataPath("/certs/ca1/ca.crt");
final Path cert2 = getDataPath("/certs/ca2/ca.crt"); final Path cert2 = getDataPath("/certs/ca2/ca.crt");

View File

@ -22,7 +22,9 @@ package org.elasticsearch.painless;
import org.elasticsearch.painless.lookup.PainlessCast; import org.elasticsearch.painless.lookup.PainlessCast;
import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessLookupUtility;
import org.elasticsearch.painless.lookup.def; import org.elasticsearch.painless.lookup.def;
import org.elasticsearch.script.JodaCompatibleZonedDateTime;
import java.time.ZonedDateTime;
import java.util.Objects; import java.util.Objects;
/** /**
@ -72,11 +74,19 @@ public final class AnalyzerCaster {
return PainlessCast.originalTypetoTargetType(def.class, Float.class, explicit); return PainlessCast.originalTypetoTargetType(def.class, Float.class, explicit);
} else if (expected == Double.class) { } else if (expected == Double.class) {
return PainlessCast.originalTypetoTargetType(def.class, Double.class, explicit); return PainlessCast.originalTypetoTargetType(def.class, Double.class, explicit);
// TODO: remove this when the transition from Joda to Java datetimes is completed
} else if (expected == ZonedDateTime.class) {
return PainlessCast.originalTypetoTargetType(def.class, ZonedDateTime.class, explicit);
} }
} else if (actual == String.class) { } else if (actual == String.class) {
if (expected == char.class && explicit) { if (expected == char.class && explicit) {
return PainlessCast.originalTypetoTargetType(String.class, char.class, true); return PainlessCast.originalTypetoTargetType(String.class, char.class, true);
} }
// TODO: remove this when the transition from Joda to Java datetimes is completed
} else if (actual == JodaCompatibleZonedDateTime.class) {
if (expected == ZonedDateTime.class) {
return PainlessCast.originalTypetoTargetType(JodaCompatibleZonedDateTime.class, ZonedDateTime.class, explicit);
}
} else if (actual == boolean.class) { } else if (actual == boolean.class) {
if (expected == def.class) { if (expected == def.class) {
return PainlessCast.boxOriginalType(Boolean.class, def.class, explicit, boolean.class); return PainlessCast.boxOriginalType(Boolean.class, def.class, explicit, boolean.class);

View File

@ -23,11 +23,13 @@ import org.elasticsearch.painless.Locals.LocalMethod;
import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookup;
import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessLookupUtility;
import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.lookup.PainlessMethod;
import org.elasticsearch.script.JodaCompatibleZonedDateTime;
import java.lang.invoke.CallSite; import java.lang.invoke.CallSite;
import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType; import java.lang.invoke.MethodType;
import java.time.ZonedDateTime;
import java.util.BitSet; import java.util.BitSet;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
@ -1185,6 +1187,15 @@ public final class Def {
} }
} }
// TODO: remove this when the transition from Joda to Java datetimes is completed
public static ZonedDateTime defToZonedDateTime(final Object value) {
if (value instanceof JodaCompatibleZonedDateTime) {
return ((JodaCompatibleZonedDateTime)value).getZonedDateTime();
}
return (ZonedDateTime)value;
}
/** /**
* "Normalizes" the index into a {@code Map} by making no change to the index. * "Normalizes" the index into a {@code Map} by making no change to the index.
*/ */

View File

@ -22,6 +22,7 @@ package org.elasticsearch.painless;
import org.elasticsearch.painless.lookup.PainlessCast; import org.elasticsearch.painless.lookup.PainlessCast;
import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.lookup.PainlessMethod;
import org.elasticsearch.painless.lookup.def; import org.elasticsearch.painless.lookup.def;
import org.elasticsearch.script.JodaCompatibleZonedDateTime;
import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.ClassVisitor;
import org.objectweb.asm.Label; import org.objectweb.asm.Label;
import org.objectweb.asm.Opcodes; import org.objectweb.asm.Opcodes;
@ -30,6 +31,7 @@ import org.objectweb.asm.commons.GeneratorAdapter;
import org.objectweb.asm.commons.Method; import org.objectweb.asm.commons.Method;
import java.lang.reflect.Modifier; import java.lang.reflect.Modifier;
import java.time.ZonedDateTime;
import java.util.ArrayDeque; import java.util.ArrayDeque;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -71,8 +73,10 @@ import static org.elasticsearch.painless.WriterConstants.DEF_TO_P_SHORT_EXPLICIT
import static org.elasticsearch.painless.WriterConstants.DEF_TO_P_SHORT_IMPLICIT; import static org.elasticsearch.painless.WriterConstants.DEF_TO_P_SHORT_IMPLICIT;
import static org.elasticsearch.painless.WriterConstants.DEF_TO_STRING_EXPLICIT; import static org.elasticsearch.painless.WriterConstants.DEF_TO_STRING_EXPLICIT;
import static org.elasticsearch.painless.WriterConstants.DEF_TO_STRING_IMPLICIT; import static org.elasticsearch.painless.WriterConstants.DEF_TO_STRING_IMPLICIT;
import static org.elasticsearch.painless.WriterConstants.DEF_TO_ZONEDDATETIME;
import static org.elasticsearch.painless.WriterConstants.DEF_UTIL_TYPE; import static org.elasticsearch.painless.WriterConstants.DEF_UTIL_TYPE;
import static org.elasticsearch.painless.WriterConstants.INDY_STRING_CONCAT_BOOTSTRAP_HANDLE; import static org.elasticsearch.painless.WriterConstants.INDY_STRING_CONCAT_BOOTSTRAP_HANDLE;
import static org.elasticsearch.painless.WriterConstants.JCZDT_TO_ZONEDDATETIME;
import static org.elasticsearch.painless.WriterConstants.LAMBDA_BOOTSTRAP_HANDLE; import static org.elasticsearch.painless.WriterConstants.LAMBDA_BOOTSTRAP_HANDLE;
import static org.elasticsearch.painless.WriterConstants.MAX_INDY_STRING_CONCAT_ARGS; import static org.elasticsearch.painless.WriterConstants.MAX_INDY_STRING_CONCAT_ARGS;
import static org.elasticsearch.painless.WriterConstants.PAINLESS_ERROR_TYPE; import static org.elasticsearch.painless.WriterConstants.PAINLESS_ERROR_TYPE;
@ -156,6 +160,9 @@ public final class MethodWriter extends GeneratorAdapter {
invokeStatic(UTILITY_TYPE, CHAR_TO_STRING); invokeStatic(UTILITY_TYPE, CHAR_TO_STRING);
} else if (cast.originalType == String.class && cast.targetType == char.class) { } else if (cast.originalType == String.class && cast.targetType == char.class) {
invokeStatic(UTILITY_TYPE, STRING_TO_CHAR); invokeStatic(UTILITY_TYPE, STRING_TO_CHAR);
// TODO: remove this when the transition from Joda to Java datetimes is completed
} else if (cast.originalType == JodaCompatibleZonedDateTime.class && cast.targetType == ZonedDateTime.class) {
invokeStatic(UTILITY_TYPE, JCZDT_TO_ZONEDDATETIME);
} else if (cast.unboxOriginalType != null && cast.boxTargetType != null) { } else if (cast.unboxOriginalType != null && cast.boxTargetType != null) {
unbox(getType(cast.unboxOriginalType)); unbox(getType(cast.unboxOriginalType));
writeCast(cast.unboxOriginalType, cast.boxTargetType); writeCast(cast.unboxOriginalType, cast.boxTargetType);
@ -191,6 +198,8 @@ public final class MethodWriter extends GeneratorAdapter {
else if (cast.targetType == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_FLOAT_EXPLICIT); else if (cast.targetType == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_FLOAT_EXPLICIT);
else if (cast.targetType == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_DOUBLE_EXPLICIT); else if (cast.targetType == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_DOUBLE_EXPLICIT);
else if (cast.targetType == String.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_STRING_EXPLICIT); else if (cast.targetType == String.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_STRING_EXPLICIT);
// TODO: remove this when the transition from Joda to Java datetimes is completed
else if (cast.targetType == ZonedDateTime.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_ZONEDDATETIME);
else { else {
writeCast(cast.originalType, cast.targetType); writeCast(cast.originalType, cast.targetType);
} }
@ -212,6 +221,8 @@ public final class MethodWriter extends GeneratorAdapter {
else if (cast.targetType == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_FLOAT_IMPLICIT); else if (cast.targetType == Float.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_FLOAT_IMPLICIT);
else if (cast.targetType == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_DOUBLE_IMPLICIT); else if (cast.targetType == Double.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_B_DOUBLE_IMPLICIT);
else if (cast.targetType == String.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_STRING_IMPLICIT); else if (cast.targetType == String.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_STRING_IMPLICIT);
// TODO: remove this when the transition from Joda to Java datetimes is completed
else if (cast.targetType == ZonedDateTime.class) invokeStatic(DEF_UTIL_TYPE, DEF_TO_ZONEDDATETIME);
else { else {
writeCast(cast.originalType, cast.targetType); writeCast(cast.originalType, cast.targetType);
} }

View File

@ -19,6 +19,10 @@
package org.elasticsearch.painless; package org.elasticsearch.painless;
import org.elasticsearch.script.JodaCompatibleZonedDateTime;
import java.time.ZonedDateTime;
/** /**
* A set of methods for non-native boxing and non-native * A set of methods for non-native boxing and non-native
* exact math operations used at both compile-time and runtime. * exact math operations used at both compile-time and runtime.
@ -43,5 +47,10 @@ public class Utility {
return value.charAt(0); return value.charAt(0);
} }
// TODO: remove this when the transition from Joda to Java datetimes is completed
public static ZonedDateTime JCZDTToZonedDateTime(final JodaCompatibleZonedDateTime jczdt) {
return jczdt.getZonedDateTime();
}
private Utility() {} private Utility() {}
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.painless;
import org.elasticsearch.painless.api.Augmentation; import org.elasticsearch.painless.api.Augmentation;
import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookup;
import org.elasticsearch.script.JodaCompatibleZonedDateTime;
import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.ScriptException;
import org.objectweb.asm.Handle; import org.objectweb.asm.Handle;
import org.objectweb.asm.Opcodes; import org.objectweb.asm.Opcodes;
@ -31,6 +32,7 @@ import java.lang.invoke.CallSite;
import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType; import java.lang.invoke.MethodType;
import java.time.ZonedDateTime;
import java.util.BitSet; import java.util.BitSet;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
@ -98,13 +100,16 @@ public final class WriterConstants {
public static final Method STRING_TO_CHAR = getAsmMethod(char.class, "StringTochar", String.class); public static final Method STRING_TO_CHAR = getAsmMethod(char.class, "StringTochar", String.class);
public static final Method CHAR_TO_STRING = getAsmMethod(String.class, "charToString", char.class); public static final Method CHAR_TO_STRING = getAsmMethod(String.class, "charToString", char.class);
// TODO: remove this when the transition from Joda to Java datetimes is completed
public static final Method JCZDT_TO_ZONEDDATETIME =
getAsmMethod(ZonedDateTime.class, "JCZDTToZonedDateTime", JodaCompatibleZonedDateTime.class);
public static final Type METHOD_HANDLE_TYPE = Type.getType(MethodHandle.class); public static final Type METHOD_HANDLE_TYPE = Type.getType(MethodHandle.class);
public static final Type AUGMENTATION_TYPE = Type.getType(Augmentation.class); public static final Type AUGMENTATION_TYPE = Type.getType(Augmentation.class);
/** /**
* A Method instance for {@linkplain Pattern#compile}. This isn't available from PainlessLookup because we intentionally don't add it * A Method instance for {@linkplain Pattern}. This isn't available from PainlessLookup because we intentionally don't add it
* there so that the script can't create regexes without this syntax. Essentially, our static regex syntax has a monopoly on building * there so that the script can't create regexes without this syntax. Essentially, our static regex syntax has a monopoly on building
* regexes because it can do it statically. This is both faster and prevents the script from doing something super slow like building a * regexes because it can do it statically. This is both faster and prevents the script from doing something super slow like building a
* regex per time it is run. * regex per time it is run.
@ -161,6 +166,9 @@ public final class WriterConstants {
public static final Method DEF_TO_STRING_IMPLICIT = getAsmMethod(String.class, "defToStringImplicit", Object.class); public static final Method DEF_TO_STRING_IMPLICIT = getAsmMethod(String.class, "defToStringImplicit", Object.class);
public static final Method DEF_TO_STRING_EXPLICIT = getAsmMethod(String.class, "defToStringExplicit", Object.class); public static final Method DEF_TO_STRING_EXPLICIT = getAsmMethod(String.class, "defToStringExplicit", Object.class);
// TODO: remove this when the transition from Joda to Java datetimes is completed
public static final Method DEF_TO_ZONEDDATETIME = getAsmMethod(ZonedDateTime.class, "defToZonedDateTime", Object.class);
public static final Type DEF_ARRAY_LENGTH_METHOD_TYPE = Type.getMethodType(Type.INT_TYPE, Type.getType(Object.class)); public static final Type DEF_ARRAY_LENGTH_METHOD_TYPE = Type.getMethodType(Type.INT_TYPE, Type.getType(Object.class));
/** invokedynamic bootstrap for lambda expression/method references */ /** invokedynamic bootstrap for lambda expression/method references */

View File

@ -27,6 +27,7 @@ import org.elasticsearch.painless.MethodWriter;
import org.elasticsearch.painless.lookup.def; import org.elasticsearch.painless.lookup.def;
import org.objectweb.asm.Type; import org.objectweb.asm.Type;
import java.time.ZonedDateTime;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
@ -53,7 +54,8 @@ final class PSubDefArray extends AStoreable {
index.expected = index.actual; index.expected = index.actual;
index = index.cast(locals); index = index.cast(locals);
actual = expected == null || explicit ? def.class : expected; // TODO: remove ZonedDateTime exception when JodaCompatibleDateTime is removed
actual = expected == null || expected == ZonedDateTime.class || explicit ? def.class : expected;
} }
@Override @Override

View File

@ -27,6 +27,7 @@ import org.elasticsearch.painless.MethodWriter;
import org.elasticsearch.painless.lookup.def; import org.elasticsearch.painless.lookup.def;
import org.objectweb.asm.Type; import org.objectweb.asm.Type;
import java.time.ZonedDateTime;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@ -84,7 +85,8 @@ final class PSubDefCall extends AExpression {
arguments.set(argument, expression.cast(locals)); arguments.set(argument, expression.cast(locals));
} }
actual = expected == null || explicit ? def.class : expected; // TODO: remove ZonedDateTime exception when JodaCompatibleDateTime is removed
actual = expected == null || expected == ZonedDateTime.class || explicit ? def.class : expected;
} }
@Override @Override

View File

@ -26,6 +26,7 @@ import org.elasticsearch.painless.Location;
import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.MethodWriter;
import org.elasticsearch.painless.lookup.def; import org.elasticsearch.painless.lookup.def;
import java.time.ZonedDateTime;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
@ -49,7 +50,8 @@ final class PSubDefField extends AStoreable {
@Override @Override
void analyze(Locals locals) { void analyze(Locals locals) {
actual = expected == null || explicit ? def.class : expected; // TODO: remove ZonedDateTime exception when JodaCompatibleDateTime is removed
actual = expected == null || expected == ZonedDateTime.class || explicit ? def.class : expected;
} }
@Override @Override

View File

@ -87,7 +87,6 @@ class org.elasticsearch.script.JodaCompatibleZonedDateTime {
int getNano() int getNano()
int getSecond() int getSecond()
int getYear() int getYear()
ZoneId getZone()
ZonedDateTime minus(TemporalAmount) ZonedDateTime minus(TemporalAmount)
ZonedDateTime minus(long,TemporalUnit) ZonedDateTime minus(long,TemporalUnit)
ZonedDateTime minusYears(long) ZonedDateTime minusYears(long)
@ -108,7 +107,6 @@ class org.elasticsearch.script.JodaCompatibleZonedDateTime {
ZonedDateTime plusSeconds(long) ZonedDateTime plusSeconds(long)
ZonedDateTime plusWeeks(long) ZonedDateTime plusWeeks(long)
ZonedDateTime plusYears(long) ZonedDateTime plusYears(long)
Instant toInstant()
OffsetDateTime toOffsetDateTime() OffsetDateTime toOffsetDateTime()
ZonedDateTime truncatedTo(TemporalUnit) ZonedDateTime truncatedTo(TemporalUnit)
ZonedDateTime with(TemporalAdjuster) ZonedDateTime with(TemporalAdjuster)
@ -127,25 +125,6 @@ class org.elasticsearch.script.JodaCompatibleZonedDateTime {
ZonedDateTime withZoneSameLocal(ZoneId) ZonedDateTime withZoneSameLocal(ZoneId)
ZonedDateTime withZoneSameInstant(ZoneId) ZonedDateTime withZoneSameInstant(ZoneId)
#### ChronoZonedDateTime
int compareTo(JodaCompatibleZonedDateTime)
Chronology getChronology()
String format(DateTimeFormatter)
int get(TemporalField)
long getLong(TemporalField)
ZoneOffset getOffset()
boolean isSupported(TemporalField)
long toEpochSecond()
LocalTime toLocalTime()
#### Joda methods that exist in java time
boolean equals(Object)
int hashCode()
boolean isAfter(JodaCompatibleZonedDateTime)
boolean isBefore(JodaCompatibleZonedDateTime)
boolean isEqual(JodaCompatibleZonedDateTime)
String toString()
#### Joda time methods #### Joda time methods
long getMillis() long getMillis()
int getCenturyOfEra() int getCenturyOfEra()

View File

@ -139,4 +139,14 @@ public class BasicAPITests extends ScriptTestCase {
assertEquals(10, exec("staticAddIntsTest(7, 3)")); assertEquals(10, exec("staticAddIntsTest(7, 3)"));
assertEquals(15.5f, exec("staticAddFloatsTest(6.5f, 9.0f)")); assertEquals(15.5f, exec("staticAddFloatsTest(6.5f, 9.0f)"));
} }
// TODO: remove this when the transition from Joda to Java datetimes is completed
public void testJCZDTToZonedDateTime() {
assertEquals(0L, exec(
"Instant instant = Instant.ofEpochMilli(434931330000L);" +
"JodaCompatibleZonedDateTime d = new JodaCompatibleZonedDateTime(instant, ZoneId.of('Z'));" +
"ZonedDateTime t = d;" +
"return ChronoUnit.MILLIS.between(d, t);"
));
}
} }

View File

@ -683,4 +683,17 @@ public class DefCastTests extends ScriptTestCase {
public void testdefToStringExplicit() { public void testdefToStringExplicit() {
assertEquals("s", exec("def d = (char)'s'; String b = (String)d; b")); assertEquals("s", exec("def d = (char)'s'; String b = (String)d; b"));
} }
// TODO: remove this when the transition from Joda to Java datetimes is completed
public void testdefToZonedDateTime() {
assertEquals(0L, exec(
"Instant instant = Instant.ofEpochMilli(434931330000L);" +
"def d = new JodaCompatibleZonedDateTime(instant, ZoneId.of('Z'));" +
"def x = new HashMap(); x.put('dt', d);" +
"ZonedDateTime t = x['dt'];" +
"def y = t;" +
"t = y;" +
"return ChronoUnit.MILLIS.between(d, t);"
));
}
} }

View File

@ -1,4 +1,10 @@
# whitelist for tests # whitelist for tests
# TODO: remove this when the transition from Joda to Java datetimes is completed
class org.elasticsearch.script.JodaCompatibleZonedDateTime {
(Instant, ZoneId)
}
class org.elasticsearch.painless.BindingsTests$BindingsTestScript { class org.elasticsearch.painless.BindingsTests$BindingsTestScript {
} }

View File

@ -41,9 +41,9 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelined
protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest<FullHttpRequest> msg) { protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest<FullHttpRequest> msg) {
Netty4HttpChannel channel = ctx.channel().attr(Netty4HttpServerTransport.HTTP_CHANNEL_KEY).get(); Netty4HttpChannel channel = ctx.channel().attr(Netty4HttpServerTransport.HTTP_CHANNEL_KEY).get();
FullHttpRequest request = msg.getRequest(); FullHttpRequest request = msg.getRequest();
final FullHttpRequest copiedRequest;
try { try {
final FullHttpRequest copiedRequest = copiedRequest =
new DefaultFullHttpRequest( new DefaultFullHttpRequest(
request.protocolVersion(), request.protocolVersion(),
request.method(), request.method(),
@ -51,24 +51,23 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelined
Unpooled.copiedBuffer(request.content()), Unpooled.copiedBuffer(request.content()),
request.headers(), request.headers(),
request.trailingHeaders()); request.trailingHeaders());
Netty4HttpRequest httpRequest = new Netty4HttpRequest(copiedRequest, msg.getSequence());
if (request.decoderResult().isFailure()) {
Throwable cause = request.decoderResult().cause();
if (cause instanceof Error) {
ExceptionsHelper.maybeDieOnAnotherThread(cause);
serverTransport.incomingRequestError(httpRequest, channel, new Exception(cause));
} else {
serverTransport.incomingRequestError(httpRequest, channel, (Exception) cause);
}
} else {
serverTransport.incomingRequest(httpRequest, channel);
}
} finally { } finally {
// As we have copied the buffer, we can release the request // As we have copied the buffer, we can release the request
request.release(); request.release();
} }
Netty4HttpRequest httpRequest = new Netty4HttpRequest(copiedRequest, msg.getSequence());
if (request.decoderResult().isFailure()) {
Throwable cause = request.decoderResult().cause();
if (cause instanceof Error) {
ExceptionsHelper.maybeDieOnAnotherThread(cause);
serverTransport.incomingRequestError(httpRequest, channel, new Exception(cause));
} else {
serverTransport.incomingRequestError(httpRequest, channel, (Exception) cause);
}
} else {
serverTransport.incomingRequest(httpRequest, channel);
}
} }
@Override @Override

View File

@ -35,20 +35,20 @@ final class Netty4SizeHeaderFrameDecoder extends ByteToMessageDecoder {
@Override @Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
try { try {
boolean continueDecode = true; while (in.readableBytes() >= HEADER_SIZE) {
while (continueDecode) {
int messageLength = TcpTransport.readMessageLength(Netty4Utils.toBytesReference(in)); int messageLength = TcpTransport.readMessageLength(Netty4Utils.toBytesReference(in));
if (messageLength == -1) { if (messageLength == -1) {
continueDecode = false; break;
} else { } else {
int messageLengthWithHeader = messageLength + HEADER_SIZE; int messageLengthWithHeader = messageLength + HEADER_SIZE;
// If the message length is greater than the network bytes available, we have not read a complete frame. // If the message length is greater than the network bytes available, we have not read a complete frame.
if (messageLengthWithHeader > in.readableBytes()) { if (messageLengthWithHeader > in.readableBytes()) {
continueDecode = false; break;
} else { } else {
final ByteBuf message = in.retainedSlice(in.readerIndex() + HEADER_SIZE, messageLength); final int readerIndex = in.readerIndex();
final ByteBuf message = in.retainedSlice(readerIndex + HEADER_SIZE, messageLength);
out.add(message); out.add(message);
in.readerIndex(in.readerIndex() + messageLengthWithHeader); in.readerIndex(readerIndex + messageLengthWithHeader);
} }
} }
} }

View File

@ -26,6 +26,7 @@ import io.netty.util.NettyRuntime;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.BytesRefIterator;
import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import java.io.IOException; import java.io.IOException;
@ -108,14 +109,10 @@ public class Netty4Utils {
* Wraps the given ChannelBuffer with a BytesReference * Wraps the given ChannelBuffer with a BytesReference
*/ */
public static BytesReference toBytesReference(final ByteBuf buffer) { public static BytesReference toBytesReference(final ByteBuf buffer) {
return toBytesReference(buffer, buffer.readableBytes()); final int readableBytes = buffer.readableBytes();
if (readableBytes == 0) {
return BytesArray.EMPTY;
}
return new ByteBufBytesReference(buffer, buffer.readableBytes());
} }
/**
* Wraps the given ChannelBuffer with a BytesReference of a given size
*/
static BytesReference toBytesReference(final ByteBuf buffer, final int size) {
return new ByteBufBytesReference(buffer, size);
}
} }

View File

@ -50,6 +50,7 @@ import org.elasticsearch.common.util.MockBigArrays;
import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.MockPageCacheRecycler;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.BindHttpException;
import org.elasticsearch.http.CorsHandler;
import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.http.NullDispatcher;
@ -71,6 +72,8 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.RestStatus.OK;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
@ -193,7 +196,7 @@ public class Netty4HttpServerTransportTests extends ESTestCase {
Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build();
try (Netty4HttpServerTransport otherTransport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, try (Netty4HttpServerTransport otherTransport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool,
xContentRegistry(), new NullDispatcher())) { xContentRegistry(), new NullDispatcher())) {
BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start);
assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage());
} }
} }
@ -260,6 +263,65 @@ public class Netty4HttpServerTransportTests extends ESTestCase {
assertThat(causeReference.get(), instanceOf(TooLongFrameException.class)); assertThat(causeReference.get(), instanceOf(TooLongFrameException.class));
} }
public void testCorsRequest() throws InterruptedException {
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
throw new AssertionError();
}
@Override
public void dispatchBadRequest(final RestRequest request,
final RestChannel channel,
final ThreadContext threadContext,
final Throwable cause) {
throw new AssertionError();
}
};
final Settings settings = Settings.builder()
.put(SETTING_CORS_ENABLED.getKey(), true)
.put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "elastic.co").build();
try (Netty4HttpServerTransport transport =
new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
// Test pre-flight request
try (Netty4HttpClient client = new Netty4HttpClient()) {
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/");
request.headers().add(CorsHandler.ORIGIN, "elastic.co");
request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST");
final FullHttpResponse response = client.post(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(HttpResponseStatus.OK));
assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("elastic.co"));
assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN));
assertTrue(response.headers().contains(CorsHandler.DATE));
} finally {
response.release();
}
}
// Test short-circuited request
try (Netty4HttpClient client = new Netty4HttpClient()) {
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
request.headers().add(CorsHandler.ORIGIN, "elastic2.co");
final FullHttpResponse response = client.post(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN));
} finally {
response.release();
}
}
}
}
public void testReadTimeout() throws Exception { public void testReadTimeout() throws Exception {
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {

View File

@ -291,6 +291,10 @@ testClusters.integTest {
} }
} }
task s3ThirdPartyTests {
dependsOn check
}
if (useFixture) { if (useFixture) {
task integTestECS(type: RestIntegTestTask.class) { task integTestECS(type: RestIntegTestTask.class) {
description = "Runs tests using the ECS repository." description = "Runs tests using the ECS repository."
@ -311,6 +315,13 @@ if (useFixture) {
plugin file(tasks.bundlePlugin.archiveFile) plugin file(tasks.bundlePlugin.archiveFile)
environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', { "http://${s3Fixture.addressAndPort}/ecs_credentials_endpoint" }, IGNORE_VALUE environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', { "http://${s3Fixture.addressAndPort}/ecs_credentials_endpoint" }, IGNORE_VALUE
} }
gradle.taskGraph.whenReady {
if (it.hasTask(s3ThirdPartyTests)) {
throw new IllegalStateException("Tried to run third party tests but not all of the necessary environment variables 'amazon_s3_access_key', " +
"'amazon_s3_secret_key', 'amazon_s3_bucket', and 'amazon_s3_base_path' are set.");
}
}
} }
thirdPartyAudit.ignoreMissingClasses ( thirdPartyAudit.ignoreMissingClasses (

View File

@ -159,33 +159,32 @@ public class HttpReadWriteHandler implements NioChannelHandler {
final HttpPipelinedRequest<FullHttpRequest> pipelinedRequest = (HttpPipelinedRequest<FullHttpRequest>) msg; final HttpPipelinedRequest<FullHttpRequest> pipelinedRequest = (HttpPipelinedRequest<FullHttpRequest>) msg;
FullHttpRequest request = pipelinedRequest.getRequest(); FullHttpRequest request = pipelinedRequest.getRequest();
final FullHttpRequest copiedRequest;
try { try {
final FullHttpRequest copiedRequest = copiedRequest = new DefaultFullHttpRequest(
new DefaultFullHttpRequest( request.protocolVersion(),
request.protocolVersion(), request.method(),
request.method(), request.uri(),
request.uri(), Unpooled.copiedBuffer(request.content()),
Unpooled.copiedBuffer(request.content()), request.headers(),
request.headers(), request.trailingHeaders());
request.trailingHeaders());
NioHttpRequest httpRequest = new NioHttpRequest(copiedRequest, pipelinedRequest.getSequence());
if (request.decoderResult().isFailure()) {
Throwable cause = request.decoderResult().cause();
if (cause instanceof Error) {
ExceptionsHelper.maybeDieOnAnotherThread(cause);
transport.incomingRequestError(httpRequest, nioHttpChannel, new Exception(cause));
} else {
transport.incomingRequestError(httpRequest, nioHttpChannel, (Exception) cause);
}
} else {
transport.incomingRequest(httpRequest, nioHttpChannel);
}
} finally { } finally {
// As we have copied the buffer, we can release the request // As we have copied the buffer, we can release the request
request.release(); request.release();
} }
NioHttpRequest httpRequest = new NioHttpRequest(copiedRequest, pipelinedRequest.getSequence());
if (request.decoderResult().isFailure()) {
Throwable cause = request.decoderResult().cause();
if (cause instanceof Error) {
ExceptionsHelper.maybeDieOnAnotherThread(cause);
transport.incomingRequestError(httpRequest, nioHttpChannel, new Exception(cause));
} else {
transport.incomingRequestError(httpRequest, nioHttpChannel, (Exception) cause);
}
} else {
transport.incomingRequest(httpRequest, nioHttpChannel);
}
} }
private void maybeReadTimeout() { private void maybeReadTimeout() {

View File

@ -110,7 +110,7 @@ class NioHttpClient implements Closeable {
return sendRequests(remoteAddress, requests); return sendRequests(remoteAddress, requests);
} }
public final FullHttpResponse post(InetSocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException { public final FullHttpResponse send(InetSocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException {
Collection<FullHttpResponse> responses = sendRequests(remoteAddress, Collections.singleton(httpRequest)); Collection<FullHttpResponse> responses = sendRequests(remoteAddress, Collections.singleton(httpRequest));
assert responses.size() == 1 : "expected 1 and only 1 http response"; assert responses.size() == 1 : "expected 1 and only 1 http response";
return responses.iterator().next(); return responses.iterator().next();
@ -271,7 +271,7 @@ class NioHttpClient implements Closeable {
int bytesConsumed = adaptor.read(channelBuffer.sliceAndRetainPagesTo(channelBuffer.getIndex())); int bytesConsumed = adaptor.read(channelBuffer.sliceAndRetainPagesTo(channelBuffer.getIndex()));
Object message; Object message;
while ((message = adaptor.pollInboundMessage()) != null) { while ((message = adaptor.pollInboundMessage()) != null) {
handleRequest(message); handleResponse(message);
} }
return bytesConsumed; return bytesConsumed;
@ -286,12 +286,18 @@ class NioHttpClient implements Closeable {
public void close() throws IOException { public void close() throws IOException {
try { try {
adaptor.close(); adaptor.close();
// After closing the pipeline, we must poll to see if any new messages are available. This
// is because HTTP supports a channel being closed as an end of content marker.
Object message;
while ((message = adaptor.pollInboundMessage()) != null) {
handleResponse(message);
}
} catch (Exception e) { } catch (Exception e) {
throw new IOException(e); throw new IOException(e);
} }
} }
private void handleRequest(Object message) { private void handleResponse(Object message) {
final FullHttpResponse response = (FullHttpResponse) message; final FullHttpResponse response = (FullHttpResponse) message;
DefaultFullHttpResponse newResponse = new DefaultFullHttpResponse(response.protocolVersion(), DefaultFullHttpResponse newResponse = new DefaultFullHttpResponse(response.protocolVersion(),
response.status(), response.status(),

View File

@ -43,6 +43,7 @@ import org.elasticsearch.common.util.MockBigArrays;
import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.MockPageCacheRecycler;
import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.BindHttpException;
import org.elasticsearch.http.CorsHandler;
import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.http.NullDispatcher;
@ -66,6 +67,8 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.RestStatus.OK;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
@ -159,13 +162,13 @@ public class NioHttpServerTransportTests extends ESTestCase {
request.headers().set(HttpHeaderNames.EXPECT, expectation); request.headers().set(HttpHeaderNames.EXPECT, expectation);
HttpUtil.setContentLength(request, contentLength); HttpUtil.setContentLength(request, contentLength);
final FullHttpResponse response = client.post(remoteAddress.address(), request); final FullHttpResponse response = client.send(remoteAddress.address(), request);
try { try {
assertThat(response.status(), equalTo(expectedStatus)); assertThat(response.status(), equalTo(expectedStatus));
if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) { if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) {
final FullHttpRequest continuationRequest = final FullHttpRequest continuationRequest =
new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", Unpooled.EMPTY_BUFFER); new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", Unpooled.EMPTY_BUFFER);
final FullHttpResponse continuationResponse = client.post(remoteAddress.address(), continuationRequest); final FullHttpResponse continuationResponse = client.send(remoteAddress.address(), continuationRequest);
try { try {
assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); assertThat(continuationResponse.status(), is(HttpResponseStatus.OK));
assertThat( assertThat(
@ -196,6 +199,65 @@ public class NioHttpServerTransportTests extends ESTestCase {
} }
} }
public void testCorsRequest() throws InterruptedException {
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
throw new AssertionError();
}
@Override
public void dispatchBadRequest(final RestRequest request,
final RestChannel channel,
final ThreadContext threadContext,
final Throwable cause) {
throw new AssertionError();
}
};
final Settings settings = Settings.builder()
.put(SETTING_CORS_ENABLED.getKey(), true)
.put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "elastic.co").build();
try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler,
threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger))) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());
// Test pre-flight request
try (NioHttpClient client = new NioHttpClient()) {
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.OPTIONS, "/");
request.headers().add(CorsHandler.ORIGIN, "elastic.co");
request.headers().add(CorsHandler.ACCESS_CONTROL_REQUEST_METHOD, "POST");
final FullHttpResponse response = client.send(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(HttpResponseStatus.OK));
assertThat(response.headers().get(CorsHandler.ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("elastic.co"));
assertThat(response.headers().get(CorsHandler.VARY), equalTo(CorsHandler.ORIGIN));
assertTrue(response.headers().contains(CorsHandler.DATE));
} finally {
response.release();
}
}
// Test short-circuited request
try (NioHttpClient client = new NioHttpClient()) {
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
request.headers().add(CorsHandler.ORIGIN, "elastic2.co");
final FullHttpResponse response = client.send(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(HttpResponseStatus.FORBIDDEN));
} finally {
response.release();
}
}
}
}
public void testBadRequest() throws InterruptedException { public void testBadRequest() throws InterruptedException {
final AtomicReference<Throwable> causeReference = new AtomicReference<>(); final AtomicReference<Throwable> causeReference = new AtomicReference<>();
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@ -241,7 +303,7 @@ public class NioHttpServerTransportTests extends ESTestCase {
final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8"));
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url);
final FullHttpResponse response = client.post(remoteAddress.address(), request); final FullHttpResponse response = client.send(remoteAddress.address(), request);
try { try {
assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST)); assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST));
assertThat( assertThat(

View File

@ -97,13 +97,16 @@ public class Version implements Comparable<Version>, ToXContentFragment {
public static final Version V_6_8_0 = new Version(6080099, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final Version V_6_8_0 = new Version(6080099, org.apache.lucene.util.Version.LUCENE_7_7_0);
public static final Version V_6_8_1 = new Version(6080199, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final Version V_6_8_1 = new Version(6080199, org.apache.lucene.util.Version.LUCENE_7_7_0);
public static final Version V_6_8_2 = new Version(6080299, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final Version V_6_8_2 = new Version(6080299, org.apache.lucene.util.Version.LUCENE_7_7_0);
public static final Version V_6_8_3 = new Version(6080399, org.apache.lucene.util.Version.LUCENE_7_7_0);
public static final Version V_7_0_0 = new Version(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_0_0 = new Version(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version V_7_0_1 = new Version(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_0_1 = new Version(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version V_7_1_0 = new Version(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_1_0 = new Version(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version V_7_1_1 = new Version(7010199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_1_1 = new Version(7010199, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version V_7_2_0 = new Version(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_2_0 = new Version(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version V_7_2_2 = new Version(7020299, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0);
public static final Version V_7_3_1 = new Version(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0);
public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0);
public static final Version CURRENT = V_7_4_0; public static final Version CURRENT = V_7_4_0;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action; package org.elasticsearch.action;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
/** /**
@ -29,6 +30,23 @@ public abstract class ActionRunnable<Response> extends AbstractRunnable {
protected final ActionListener<Response> listener; protected final ActionListener<Response> listener;
/**
* Creates a {@link Runnable} that wraps the given listener and a consumer of it that is executed when the {@link Runnable} is run.
* Invokes {@link ActionListener#onFailure(Exception)} on it if an exception is thrown on executing the consumer.
* @param listener ActionListener to wrap
* @param consumer Consumer of wrapped {@code ActionListener}
* @param <T> Type of the given {@code ActionListener}
* @return Wrapped {@code Runnable}
*/
public static <T> ActionRunnable<T> wrap(ActionListener<T> listener, CheckedConsumer<ActionListener<T>, Exception> consumer) {
return new ActionRunnable<T>(listener) {
@Override
protected void doRun() throws Exception {
consumer.accept(listener);
}
};
}
public ActionRunnable(ActionListener<Response> listener) { public ActionRunnable(ActionListener<Response> listener) {
this.listener = listener; this.listener = listener;
} }

View File

@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
@ -32,7 +33,6 @@ import org.elasticsearch.client.OriginSettingClient;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -45,9 +45,7 @@ import org.elasticsearch.tasks.TaskInfo;
import org.elasticsearch.tasks.TaskResult; import org.elasticsearch.tasks.TaskResult;
import org.elasticsearch.tasks.TaskResultsService; import org.elasticsearch.tasks.TaskResultsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
@ -118,27 +116,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
} }
GetTaskRequest nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId()); GetTaskRequest nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId());
transportService.sendRequest(node, GetTaskAction.NAME, nodeRequest, builder.build(), transportService.sendRequest(node, GetTaskAction.NAME, nodeRequest, builder.build(),
new TransportResponseHandler<GetTaskResponse>() { new ActionListenerResponseHandler<>(listener, GetTaskResponse::new, ThreadPool.Names.SAME));
@Override
public GetTaskResponse read(StreamInput in) throws IOException {
return new GetTaskResponse(in);
}
@Override
public void handleResponse(GetTaskResponse response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
listener.onFailure(exp);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
} }
/** /**

View File

@ -28,6 +28,7 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.RoutingMissingException;
@ -58,7 +59,6 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
@ -334,10 +334,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
* retries on retryable cluster blocks, resolves item requests, * retries on retryable cluster blocks, resolves item requests,
* constructs shard bulk requests and delegates execution to shard bulk action * constructs shard bulk requests and delegates execution to shard bulk action
* */ * */
private final class BulkOperation extends AbstractRunnable { private final class BulkOperation extends ActionRunnable<BulkResponse> {
private final Task task; private final Task task;
private final BulkRequest bulkRequest; private final BulkRequest bulkRequest;
private final ActionListener<BulkResponse> listener;
private final AtomicArray<BulkItemResponse> responses; private final AtomicArray<BulkItemResponse> responses;
private final long startTimeNanos; private final long startTimeNanos;
private final ClusterStateObserver observer; private final ClusterStateObserver observer;
@ -345,9 +344,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
BulkOperation(Task task, BulkRequest bulkRequest, ActionListener<BulkResponse> listener, AtomicArray<BulkItemResponse> responses, BulkOperation(Task task, BulkRequest bulkRequest, ActionListener<BulkResponse> listener, AtomicArray<BulkItemResponse> responses,
long startTimeNanos, Map<String, IndexNotFoundException> indicesThatCannotBeCreated) { long startTimeNanos, Map<String, IndexNotFoundException> indicesThatCannotBeCreated) {
super(listener);
this.task = task; this.task = task;
this.bulkRequest = bulkRequest; this.bulkRequest = bulkRequest;
this.listener = listener;
this.responses = responses; this.responses = responses;
this.startTimeNanos = startTimeNanos; this.startTimeNanos = startTimeNanos;
this.indicesThatCannotBeCreated = indicesThatCannotBeCreated; this.indicesThatCannotBeCreated = indicesThatCannotBeCreated;
@ -355,12 +354,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
} }
@Override @Override
public void onFailure(Exception e) { protected void doRun() {
listener.onFailure(e);
}
@Override
protected void doRun() throws Exception {
final ClusterState clusterState = observer.setAndGetObservedState(); final ClusterState clusterState = observer.setAndGetObservedState();
if (handleBlockExceptions(clusterState)) { if (handleBlockExceptions(clusterState)) {
return; return;

View File

@ -65,23 +65,20 @@ class SimulateExecutionService {
} }
public void execute(SimulatePipelineRequest.Parsed request, ActionListener<SimulatePipelineResponse> listener) { public void execute(SimulatePipelineRequest.Parsed request, ActionListener<SimulatePipelineResponse> listener) {
threadPool.executor(THREAD_POOL_NAME).execute(new ActionRunnable<SimulatePipelineResponse>(listener) { threadPool.executor(THREAD_POOL_NAME).execute(ActionRunnable.wrap(listener, l -> {
@Override final AtomicInteger counter = new AtomicInteger();
protected void doRun() { final List<SimulateDocumentResult> responses = new CopyOnWriteArrayList<>();
final AtomicInteger counter = new AtomicInteger(); for (IngestDocument ingestDocument : request.getDocuments()) {
final List<SimulateDocumentResult> responses = new CopyOnWriteArrayList<>(); executeDocument(request.getPipeline(), ingestDocument, request.isVerbose(), (response, e) -> {
for (IngestDocument ingestDocument : request.getDocuments()) { if (response != null) {
executeDocument(request.getPipeline(), ingestDocument, request.isVerbose(), (response, e) -> { responses.add(response);
if (response != null) { }
responses.add(response); if (counter.incrementAndGet() == request.getDocuments().size()) {
} listener.onResponse(new SimulatePipelineResponse(request.getPipeline().getId(),
if (counter.incrementAndGet() == request.getDocuments().size()) { request.isVerbose(), responses));
listener.onResponse(new SimulatePipelineResponse(request.getPipeline().getId(), }
request.isVerbose(), responses)); });
}
});
}
} }
}); }));
} }
} }

View File

@ -64,14 +64,20 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
execute(task, request, new ActionListener<Response>() { execute(task, request, new ActionListener<Response>() {
@Override @Override
public void onResponse(Response response) { public void onResponse(Response response) {
taskManager.unregister(task); try {
listener.onResponse(response); taskManager.unregister(task);
} finally {
listener.onResponse(response);
}
} }
@Override @Override
public void onFailure(Exception e) { public void onFailure(Exception e) {
taskManager.unregister(task); try {
listener.onFailure(e); taskManager.unregister(task);
} finally {
listener.onFailure(e);
}
} }
}); });
return task; return task;
@ -86,18 +92,20 @@ public abstract class TransportAction<Request extends ActionRequest, Response ex
execute(task, request, new ActionListener<Response>() { execute(task, request, new ActionListener<Response>() {
@Override @Override
public void onResponse(Response response) { public void onResponse(Response response) {
if (task != null) { try {
taskManager.unregister(task); taskManager.unregister(task);
} finally {
listener.onResponse(task, response);
} }
listener.onResponse(task, response);
} }
@Override @Override
public void onFailure(Exception e) { public void onFailure(Exception e) {
if (task != null) { try {
taskManager.unregister(task); taskManager.unregister(task);
} finally {
listener.onFailure(task, e);
} }
listener.onFailure(task, e);
} }
}); });
return task; return task;

View File

@ -298,12 +298,8 @@ public abstract class TransportBroadcastAction<
} }
} }
protected void asyncShardOperation(ShardRequest request, Task task, ActionListener<ShardResponse> listener) { private void asyncShardOperation(ShardRequest request, Task task, ActionListener<ShardResponse> listener) {
transportService.getThreadPool().executor(shardExecutor).execute(new ActionRunnable<ShardResponse>(listener) { transportService.getThreadPool().executor(shardExecutor)
@Override .execute(ActionRunnable.wrap(listener, l -> l.onResponse(shardOperation(request, task))));
protected void doRun() throws Exception {
listener.onResponse(shardOperation(request, task));
}
});
} }
} }

View File

@ -161,12 +161,8 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
delegatedListener.onFailure(t); delegatedListener.onFailure(t);
} }
}); });
threadPool.executor(executor).execute(new ActionRunnable<Response>(delegate) { threadPool.executor(executor)
@Override .execute(ActionRunnable.wrap(delegate, l -> masterOperation(task, request, clusterState, l)));
protected void doRun() throws Exception {
masterOperation(task, request, clusterState, delegate);
}
});
} }
} else { } else {
if (nodes.getMasterNode() == null) { if (nodes.getMasterNode() == null) {

Some files were not shown because too many files have changed in this diff Show More