Merge remote-tracking branch 'origin/master' into feature/synced_flush
This commit is contained in:
commit
87af6491c9
|
@ -13,7 +13,7 @@ See the {client}/perl-api/current/index.html[official Elasticsearch Perl client]
|
||||||
See the {client}/python-api/current/index.html[official Elasticsearch Python client].
|
See the {client}/python-api/current/index.html[official Elasticsearch Python client].
|
||||||
|
|
||||||
* http://github.com/elasticsearch/elasticsearch-dsl-py[elasticsearch-dsl-py]
|
* http://github.com/elasticsearch/elasticsearch-dsl-py[elasticsearch-dsl-py]
|
||||||
chainable query and filter construction built on top of offical client.
|
chainable query and filter construction built on top of official client.
|
||||||
|
|
||||||
* http://github.com/rhec/pyelasticsearch[pyelasticsearch]:
|
* http://github.com/rhec/pyelasticsearch[pyelasticsearch]:
|
||||||
Python client.
|
Python client.
|
||||||
|
|
|
@ -60,6 +60,9 @@
|
||||||
* http://searchbox-io.github.com/wp-elasticsearch/[Wp-Elasticsearch]:
|
* http://searchbox-io.github.com/wp-elasticsearch/[Wp-Elasticsearch]:
|
||||||
Elasticsearch WordPress Plugin
|
Elasticsearch WordPress Plugin
|
||||||
|
|
||||||
|
* https://github.com/wallmanderco/elasticsearch-indexer[Elasticsearch Indexer]:
|
||||||
|
Elasticsearch WordPress Plugin
|
||||||
|
|
||||||
* https://github.com/OlegKunitsyn/eslogd[eslogd]:
|
* https://github.com/OlegKunitsyn/eslogd[eslogd]:
|
||||||
Linux daemon that replicates events to a central Elasticsearch server in real-time
|
Linux daemon that replicates events to a central Elasticsearch server in real-time
|
||||||
|
|
||||||
|
|
|
@ -134,7 +134,7 @@ be "two hop" operations).
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// on startup
|
// on startup
|
||||||
|
|
||||||
Client client = new TransportClient()
|
Client client = TransportClient.builder().build()
|
||||||
.addTransportAddress(new InetSocketTransportAddress("host1", 9300))
|
.addTransportAddress(new InetSocketTransportAddress("host1", 9300))
|
||||||
.addTransportAddress(new InetSocketTransportAddress("host2", 9300));
|
.addTransportAddress(new InetSocketTransportAddress("host2", 9300));
|
||||||
|
|
||||||
|
@ -150,7 +150,7 @@ Note that you have to set the cluster name if you use one different than
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
Settings settings = ImmutableSettings.settingsBuilder()
|
Settings settings = ImmutableSettings.settingsBuilder()
|
||||||
.put("cluster.name", "myClusterName").build();
|
.put("cluster.name", "myClusterName").build();
|
||||||
Client client = new TransportClient(settings);
|
Client client = TransportClient.builder().settings(settings).build();
|
||||||
//Add transport addresses and do something with the client...
|
//Add transport addresses and do something with the client...
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ used will be the ones that the other nodes were started with (the
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
Settings settings = ImmutableSettings.settingsBuilder()
|
Settings settings = ImmutableSettings.settingsBuilder()
|
||||||
.put("client.transport.sniff", true).build();
|
.put("client.transport.sniff", true).build();
|
||||||
TransportClient client = new TransportClient(settings);
|
TransportClient client = TransportClient.builder().settings(settings).build();
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
Other transport client level settings include:
|
Other transport client level settings include:
|
||||||
|
|
|
@ -150,7 +150,7 @@ FilterBuilder filter = geoDistanceFilter("pin.location") <1>
|
||||||
<2> center point
|
<2> center point
|
||||||
<3> distance from center point
|
<3> distance from center point
|
||||||
<4> optimize bounding box: `memory`, `indexed` or `none`
|
<4> optimize bounding box: `memory`, `indexed` or `none`
|
||||||
<5> distance computation mode: `GeoDistance.SLOPPY_ARC` (default), `GeoDistance.ARC` (slighly more precise but
|
<5> distance computation mode: `GeoDistance.SLOPPY_ARC` (default), `GeoDistance.ARC` (slightly more precise but
|
||||||
significantly slower) or `GeoDistance.PLANE` (faster, but inaccurate on long distances and close to the poles)
|
significantly slower) or `GeoDistance.PLANE` (faster, but inaccurate on long distances and close to the poles)
|
||||||
|
|
||||||
Note that you can cache the result using
|
Note that you can cache the result using
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
[[search-aggregations]]
|
[[search-aggregations]]
|
||||||
== Aggregations
|
= Aggregations
|
||||||
|
|
||||||
|
[partintro]
|
||||||
|
--
|
||||||
The aggregations framework helps provide aggregated data based on a search query. It is based on simple building blocks
|
The aggregations framework helps provide aggregated data based on a search query. It is based on simple building blocks
|
||||||
called aggregations, that can be composed in order to build complex summaries of the data.
|
called aggregations, that can be composed in order to build complex summaries of the data.
|
||||||
|
|
||||||
|
@ -11,16 +13,19 @@ query/filters of the search request).
|
||||||
There are many different types of aggregations, each with its own purpose and output. To better understand these types,
|
There are many different types of aggregations, each with its own purpose and output. To better understand these types,
|
||||||
it is often easier to break them into two main families:
|
it is often easier to break them into two main families:
|
||||||
|
|
||||||
_Bucketing_::
|
<<search-aggregations-bucket, _Bucketing_>>::
|
||||||
A family of aggregations that build buckets, where each bucket is associated with a _key_ and a document
|
A family of aggregations that build buckets, where each bucket is associated with a _key_ and a document
|
||||||
criterion. When the aggregation is executed, all the buckets criteria are evaluated on every document in
|
criterion. When the aggregation is executed, all the buckets criteria are evaluated on every document in
|
||||||
the context and when a criterion matches, the document is considered to "fall in" the relevant bucket.
|
the context and when a criterion matches, the document is considered to "fall in" the relevant bucket.
|
||||||
By the end of the aggregation process, we'll end up with a list of buckets - each one with a set of
|
By the end of the aggregation process, we'll end up with a list of buckets - each one with a set of
|
||||||
documents that "belong" to it.
|
documents that "belong" to it.
|
||||||
|
|
||||||
_Metric_::
|
<<search-aggregations-metrics, _Metric_>>::
|
||||||
Aggregations that keep track and compute metrics over a set of documents.
|
Aggregations that keep track and compute metrics over a set of documents.
|
||||||
|
|
||||||
|
<<search-aggregations-reducer, _Reducer_>>::
|
||||||
|
Aggregations that aggregate the output of other aggregations and their associated metrics
|
||||||
|
|
||||||
The interesting part comes next. Since each bucket effectively defines a document set (all documents belonging to
|
The interesting part comes next. Since each bucket effectively defines a document set (all documents belonging to
|
||||||
the bucket), one can potentially associate aggregations on the bucket level, and those will execute within the context
|
the bucket), one can potentially associate aggregations on the bucket level, and those will execute within the context
|
||||||
of that bucket. This is where the real power of aggregations kicks in: *aggregations can be nested!*
|
of that bucket. This is where the real power of aggregations kicks in: *aggregations can be nested!*
|
||||||
|
@ -31,7 +36,7 @@ NOTE: Bucketing aggregations can have sub-aggregations (bucketing or metric). Th
|
||||||
another higher-level aggregation).
|
another higher-level aggregation).
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Structuring Aggregations
|
== Structuring Aggregations
|
||||||
|
|
||||||
The following snippet captures the basic structure of aggregations:
|
The following snippet captures the basic structure of aggregations:
|
||||||
|
|
||||||
|
@ -62,7 +67,7 @@ bucketing aggregation. For example, if you define a set of aggregations under th
|
||||||
sub-aggregations will be computed for the range buckets that are defined.
|
sub-aggregations will be computed for the range buckets that are defined.
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
==== Values Source
|
=== Values Source
|
||||||
|
|
||||||
Some aggregations work on values extracted from the aggregated documents. Typically, the values will be extracted from
|
Some aggregations work on values extracted from the aggregated documents. Typically, the values will be extracted from
|
||||||
a specific document field which is set using the `field` key for the aggregations. It is also possible to define a
|
a specific document field which is set using the `field` key for the aggregations. It is also possible to define a
|
||||||
|
@ -89,142 +94,7 @@ perform optimizations when dealing with sorted values (for example, with the `mi
|
||||||
sorted, Elasticsearch will skip the iterations over all the values and rely on the first value in the list to be the
|
sorted, Elasticsearch will skip the iterations over all the values and rely on the first value in the list to be the
|
||||||
minimum value among all other values associated with the same document).
|
minimum value among all other values associated with the same document).
|
||||||
|
|
||||||
[float]
|
--
|
||||||
=== Metrics Aggregations
|
|
||||||
|
|
||||||
The aggregations in this family compute metrics based on values extracted in one way or another from the documents that
|
|
||||||
are being aggregated. The values are typically extracted from the fields of the document (using the field data), but
|
|
||||||
can also be generated using scripts.
|
|
||||||
|
|
||||||
Numeric metrics aggregations are a special type of metrics aggregation which output numeric values. Some aggregations output
|
|
||||||
a single numeric metric (e.g. `avg`) and are called `single-value numeric metrics aggregation`, others generate multiple
|
|
||||||
metrics (e.g. `stats`) and are called `multi-value numeric metrics aggregation`. The distinction between single-value and
|
|
||||||
multi-value numeric metrics aggregations plays a role when these aggregations serve as direct sub-aggregations of some
|
|
||||||
bucket aggregations (some bucket aggregations enable you to sort the returned buckets based on the numeric metrics in each bucket).
|
|
||||||
|
|
||||||
|
|
||||||
[float]
|
|
||||||
=== Bucket Aggregations
|
|
||||||
|
|
||||||
Bucket aggregations don't calculate metrics over fields like the metrics aggregations do, but instead, they create
|
|
||||||
buckets of documents. Each bucket is associated with a criterion (depending on the aggregation type) which determines
|
|
||||||
whether or not a document in the current context "falls" into it. In other words, the buckets effectively define document
|
|
||||||
sets. In addition to the buckets themselves, the `bucket` aggregations also compute and return the number of documents
|
|
||||||
that "fell in" to each bucket.
|
|
||||||
|
|
||||||
Bucket aggregations, as opposed to `metrics` aggregations, can hold sub-aggregations. These sub-aggregations will be
|
|
||||||
aggregated for the buckets created by their "parent" bucket aggregation.
|
|
||||||
|
|
||||||
There are different bucket aggregators, each with a different "bucketing" strategy. Some define a single bucket, some
|
|
||||||
define fixed number of multiple buckets, and others dynamically create the buckets during the aggregation process.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
=== Reducer Aggregations
|
|
||||||
|
|
||||||
coming[2.0.0]
|
|
||||||
|
|
||||||
experimental[]
|
|
||||||
|
|
||||||
Reducer aggregations work on the outputs produced from other aggregations rather than from document sets, adding
|
|
||||||
information to the output tree. There are many different types of reducer, each computing different information from
|
|
||||||
other aggregations, but these types can broken down into two families:
|
|
||||||
|
|
||||||
_Parent_::
|
|
||||||
A family of reducer aggregations that is provided with the output of its parent aggregation and is able
|
|
||||||
to compute new buckets or new aggregations to add to existing buckets.
|
|
||||||
|
|
||||||
_Sibling_::
|
|
||||||
Reducer aggregations that are provided with the output of a sibling aggregation and are able to compute a
|
|
||||||
new aggregation which will be at the same level as the sibling aggregation.
|
|
||||||
|
|
||||||
Reducer aggregations can reference the aggregations they need to perform their computation by using the `buckets_paths`
|
|
||||||
parameter to indicate the paths to the required metrics. The syntax for defining these paths can be found in the
|
|
||||||
<<search-aggregations-bucket-terms-aggregation-order, terms aggregation order>> section.
|
|
||||||
|
|
||||||
?????? SHOULD THE SECTION ABOUT DEFINING AGGREGATION PATHS
|
|
||||||
BE IN THIS PAGE AND REFERENCED FROM THE TERMS AGGREGATION DOCUMENTATION ???????
|
|
||||||
|
|
||||||
Reducer aggregations cannot have sub-aggregations but depending on the type it can reference another reducer in the `buckets_path`
|
|
||||||
allowing reducers to be chained.
|
|
||||||
|
|
||||||
NOTE: Because reducer aggregations only add to the output, when chaining reducer aggregations the output of each reducer will be
|
|
||||||
included in the final output.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
=== Caching heavy aggregations
|
|
||||||
|
|
||||||
Frequently used aggregations (e.g. for display on the home page of a website)
|
|
||||||
can be cached for faster responses. These cached results are the same results
|
|
||||||
that would be returned by an uncached aggregation -- you will never get stale
|
|
||||||
results.
|
|
||||||
|
|
||||||
See <<index-modules-shard-query-cache>> for more details.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
=== Returning only aggregation results
|
|
||||||
|
|
||||||
There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by
|
|
||||||
setting `size=0`. For example:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
$ curl -XGET 'http://localhost:9200/twitter/tweet/_search' -d '{
|
|
||||||
"size": 0,
|
|
||||||
"aggregations": {
|
|
||||||
"my_agg": {
|
|
||||||
"terms": {
|
|
||||||
"field": "text"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
'
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
Setting `size` to `0` avoids executing the fetch phase of the search making the request more efficient.
|
|
||||||
|
|
||||||
[float]
|
|
||||||
=== Metadata
|
|
||||||
|
|
||||||
You can associate a piece of metadata with individual aggregations at request time that will be returned in place
|
|
||||||
at response time.
|
|
||||||
|
|
||||||
Consider this example where we want to associate the color blue with our `terms` aggregation.
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
...
|
|
||||||
aggs": {
|
|
||||||
"titles": {
|
|
||||||
"terms": {
|
|
||||||
"field": "title"
|
|
||||||
},
|
|
||||||
"meta": {
|
|
||||||
"color": "blue"
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
Then that piece of metadata will be returned in place for our `titles` terms aggregation
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
...
|
|
||||||
"aggregations": {
|
|
||||||
"titles": {
|
|
||||||
"meta": {
|
|
||||||
"color" : "blue"
|
|
||||||
},
|
|
||||||
"buckets": [
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
include::aggregations/metrics.asciidoc[]
|
include::aggregations/metrics.asciidoc[]
|
||||||
|
|
||||||
|
@ -232,3 +102,4 @@ include::aggregations/bucket.asciidoc[]
|
||||||
|
|
||||||
include::aggregations/reducer.asciidoc[]
|
include::aggregations/reducer.asciidoc[]
|
||||||
|
|
||||||
|
include::aggregations/misc.asciidoc[]
|
|
@ -0,0 +1,49 @@
|
||||||
|
[[search-aggregations-bucket]]
|
||||||
|
== Bucket Aggregations
|
||||||
|
|
||||||
|
Bucket aggregations don't calculate metrics over fields like the metrics aggregations do, but instead, they create
|
||||||
|
buckets of documents. Each bucket is associated with a criterion (depending on the aggregation type) which determines
|
||||||
|
whether or not a document in the current context "falls" into it. In other words, the buckets effectively define document
|
||||||
|
sets. In addition to the buckets themselves, the `bucket` aggregations also compute and return the number of documents
|
||||||
|
that "fell in" to each bucket.
|
||||||
|
|
||||||
|
Bucket aggregations, as opposed to `metrics` aggregations, can hold sub-aggregations. These sub-aggregations will be
|
||||||
|
aggregated for the buckets created by their "parent" bucket aggregation.
|
||||||
|
|
||||||
|
There are different bucket aggregators, each with a different "bucketing" strategy. Some define a single bucket, some
|
||||||
|
define fixed number of multiple buckets, and others dynamically create the buckets during the aggregation process.
|
||||||
|
|
||||||
|
include::bucket/children-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/datehistogram-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/daterange-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/filter-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/filters-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/geodistance-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/geohashgrid-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/global-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/histogram-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/iprange-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/missing-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/nested-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/range-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/reverse-nested-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/sampler-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/significantterms-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::bucket/terms-aggregation.asciidoc[]
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
=== Date Range Aggregation
|
=== Date Range Aggregation
|
||||||
|
|
||||||
A range aggregation that is dedicated for date values. The main difference between this aggregation and the normal <<search-aggregations-bucket-range-aggregation,range>> aggregation is that the `from` and `to` values can be expressed in <<date-math,Date Math>> expressions, and it is also possible to specify a date format by which the `from` and `to` response fields will be returned.
|
A range aggregation that is dedicated for date values. The main difference between this aggregation and the normal <<search-aggregations-bucket-range-aggregation,range>> aggregation is that the `from` and `to` values can be expressed in <<date-math,Date Math>> expressions, and it is also possible to specify a date format by which the `from` and `to` response fields will be returned.
|
||||||
Note that this aggregration includes the `from` value and excludes the `to` value for each range.
|
Note that this aggregation includes the `from` value and excludes the `to` value for each range.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
|
@ -119,7 +119,7 @@ size:: Optional. The maximum number of geohash buckets to return
|
||||||
prioritised based on the volumes of documents they contain.
|
prioritised based on the volumes of documents they contain.
|
||||||
A value of `0` will return all buckets that
|
A value of `0` will return all buckets that
|
||||||
contain a hit, use with caution as this could use a lot of CPU
|
contain a hit, use with caution as this could use a lot of CPU
|
||||||
and network bandwith if there are many buckets.
|
and network bandwidth if there are many buckets.
|
||||||
|
|
||||||
shard_size:: Optional. To allow for more accurate counting of the top cells
|
shard_size:: Optional. To allow for more accurate counting of the top cells
|
||||||
returned in the final result the aggregation defaults to
|
returned in the final result the aggregation defaults to
|
|
@ -30,7 +30,7 @@ Example:
|
||||||
|
|
||||||
The above aggregation demonstrates how one would compute aggregations (`avg_price` in this example) on all the documents in the search context, regardless of the query (in our example, it will compute the average price over all products in our catalog, not just on the "shirts").
|
The above aggregation demonstrates how one would compute aggregations (`avg_price` in this example) on all the documents in the search context, regardless of the query (in our example, it will compute the average price over all products in our catalog, not just on the "shirts").
|
||||||
|
|
||||||
The response for the above aggreation:
|
The response for the above aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
|
@ -160,7 +160,7 @@ Example:
|
||||||
|
|
||||||
==== Order
|
==== Order
|
||||||
|
|
||||||
By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controled
|
By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled
|
||||||
using the `order` setting.
|
using the `order` setting.
|
||||||
|
|
||||||
Ordering the buckets by their key - descending:
|
Ordering the buckets by their key - descending:
|
|
@ -72,7 +72,7 @@ Response:
|
||||||
The `shard_size` parameter limits how many top-scoring documents are collected in the sample processed on each shard.
|
The `shard_size` parameter limits how many top-scoring documents are collected in the sample processed on each shard.
|
||||||
The default value is 100.
|
The default value is 100.
|
||||||
|
|
||||||
=== Controlling diversity
|
==== Controlling diversity
|
||||||
Optionally, you can use the `field` or `script` and `max_docs_per_value` settings to control the maximum number of documents collected on any one shard which share a common value.
|
Optionally, you can use the `field` or `script` and `max_docs_per_value` settings to control the maximum number of documents collected on any one shard which share a common value.
|
||||||
The choice of value (e.g. `author`) is loaded from a regular `field` or derived dynamically by a `script`.
|
The choice of value (e.g. `author`) is loaded from a regular `field` or derived dynamically by a `script`.
|
||||||
|
|
||||||
|
@ -139,16 +139,16 @@ The default setting is to use `global_ordinals` if this information is available
|
||||||
The `bytes_hash` setting may prove faster in some cases but introduces the possibility of false positives in de-duplication logic due to the possibility of hash collisions.
|
The `bytes_hash` setting may prove faster in some cases but introduces the possibility of false positives in de-duplication logic due to the possibility of hash collisions.
|
||||||
Please note that Elasticsearch will ignore the choice of execution hint if it is not applicable and that there is no backward compatibility guarantee on these hints.
|
Please note that Elasticsearch will ignore the choice of execution hint if it is not applicable and that there is no backward compatibility guarantee on these hints.
|
||||||
|
|
||||||
=== Limitations
|
==== Limitations
|
||||||
|
|
||||||
==== Cannot be nested under `breadth_first` aggregations
|
===== Cannot be nested under `breadth_first` aggregations
|
||||||
Being a quality-based filter the sampler aggregation needs access to the relevance score produced for each document.
|
Being a quality-based filter the sampler aggregation needs access to the relevance score produced for each document.
|
||||||
It therefore cannot be nested under a `terms` aggregation which has the `collect_mode` switched from the default `depth_first` mode to `breadth_first` as this discards scores.
|
It therefore cannot be nested under a `terms` aggregation which has the `collect_mode` switched from the default `depth_first` mode to `breadth_first` as this discards scores.
|
||||||
In this situation an error will be thrown.
|
In this situation an error will be thrown.
|
||||||
|
|
||||||
==== Limited de-dup logic.
|
===== Limited de-dup logic.
|
||||||
The de-duplication logic in the diversify settings applies only at a shard level so will not apply across shards.
|
The de-duplication logic in the diversify settings applies only at a shard level so will not apply across shards.
|
||||||
|
|
||||||
==== No specialized syntax for geo/date fields
|
===== No specialized syntax for geo/date fields
|
||||||
Currently the syntax for defining the diversifying values is defined by a choice of `field` or `script` - there is no added syntactical sugar for expressing geo or date units such as "1w" (1 week).
|
Currently the syntax for defining the diversifying values is defined by a choice of `field` or `script` - there is no added syntactical sugar for expressing geo or date units such as "1w" (1 week).
|
||||||
This support may be added in a later release and users will currently have to create these sorts of values using a script.
|
This support may be added in a later release and users will currently have to create these sorts of values using a script.
|
|
@ -0,0 +1,48 @@
|
||||||
|
[[search-aggregations-metrics]]
|
||||||
|
== Metrics Aggregations
|
||||||
|
|
||||||
|
The aggregations in this family compute metrics based on values extracted in one way or another from the documents that
|
||||||
|
are being aggregated. The values are typically extracted from the fields of the document (using the field data), but
|
||||||
|
can also be generated using scripts.
|
||||||
|
|
||||||
|
Numeric metrics aggregations are a special type of metrics aggregation which output numeric values. Some aggregations output
|
||||||
|
a single numeric metric (e.g. `avg`) and are called `single-value numeric metrics aggregation`, others generate multiple
|
||||||
|
metrics (e.g. `stats`) and are called `multi-value numeric metrics aggregation`. The distinction between single-value and
|
||||||
|
multi-value numeric metrics aggregations plays a role when these aggregations serve as direct sub-aggregations of some
|
||||||
|
bucket aggregations (some bucket aggregations enable you to sort the returned buckets based on the numeric metrics in each bucket).
|
||||||
|
|
||||||
|
include::metrics/avg-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/cardinality-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/extendedstats-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/geobounds-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/max-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/min-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/percentile-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/percentile-rank-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/scripted-metric-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/stats-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/sum-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/tophits-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
include::metrics/valuecount-aggregation.asciidoc[]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,76 @@
|
||||||
|
|
||||||
|
[[caching-heavy-aggregations]]
|
||||||
|
== Caching heavy aggregations
|
||||||
|
|
||||||
|
Frequently used aggregations (e.g. for display on the home page of a website)
|
||||||
|
can be cached for faster responses. These cached results are the same results
|
||||||
|
that would be returned by an uncached aggregation -- you will never get stale
|
||||||
|
results.
|
||||||
|
|
||||||
|
See <<index-modules-shard-query-cache>> for more details.
|
||||||
|
|
||||||
|
[[returning-only-agg-results]]
|
||||||
|
== Returning only aggregation results
|
||||||
|
|
||||||
|
There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by
|
||||||
|
setting `size=0`. For example:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
$ curl -XGET 'http://localhost:9200/twitter/tweet/_search' -d '{
|
||||||
|
"size": 0,
|
||||||
|
"aggregations": {
|
||||||
|
"my_agg": {
|
||||||
|
"terms": {
|
||||||
|
"field": "text"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
Setting `size` to `0` avoids executing the fetch phase of the search making the request more efficient.
|
||||||
|
|
||||||
|
[[agg-metadata]]
|
||||||
|
== Aggregation Metadata
|
||||||
|
|
||||||
|
You can associate a piece of metadata with individual aggregations at request time that will be returned in place
|
||||||
|
at response time.
|
||||||
|
|
||||||
|
Consider this example where we want to associate the color blue with our `terms` aggregation.
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
...
|
||||||
|
aggs": {
|
||||||
|
"titles": {
|
||||||
|
"terms": {
|
||||||
|
"field": "title"
|
||||||
|
},
|
||||||
|
"meta": {
|
||||||
|
"color": "blue"
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
Then that piece of metadata will be returned in place for our `titles` terms aggregation
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"aggregations": {
|
||||||
|
"titles": {
|
||||||
|
"meta": {
|
||||||
|
"color" : "blue"
|
||||||
|
},
|
||||||
|
"buckets": [
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
|
@ -0,0 +1,160 @@
|
||||||
|
[[search-aggregations-reducer]]
|
||||||
|
|
||||||
|
== Reducer Aggregations
|
||||||
|
|
||||||
|
coming[2.0.0]
|
||||||
|
|
||||||
|
experimental[]
|
||||||
|
|
||||||
|
Reducer aggregations work on the outputs produced from other aggregations rather than from document sets, adding
|
||||||
|
information to the output tree. There are many different types of reducer, each computing different information from
|
||||||
|
other aggregations, but these types can broken down into two families:
|
||||||
|
|
||||||
|
_Parent_::
|
||||||
|
A family of reducer aggregations that is provided with the output of its parent aggregation and is able
|
||||||
|
to compute new buckets or new aggregations to add to existing buckets.
|
||||||
|
|
||||||
|
_Sibling_::
|
||||||
|
Reducer aggregations that are provided with the output of a sibling aggregation and are able to compute a
|
||||||
|
new aggregation which will be at the same level as the sibling aggregation.
|
||||||
|
|
||||||
|
Reducer aggregations can reference the aggregations they need to perform their computation by using the `buckets_paths`
|
||||||
|
parameter to indicate the paths to the required metrics. The syntax for defining these paths can be found in the
|
||||||
|
<<bucket-path-syntax, `buckets_path` Syntax>> section below.
|
||||||
|
|
||||||
|
Reducer aggregations cannot have sub-aggregations but depending on the type it can reference another reducer in the `buckets_path`
|
||||||
|
allowing reducers to be chained. For example, you can chain together two derivatives to calculate the second derivative
|
||||||
|
(e.g. a derivative of a derivative).
|
||||||
|
|
||||||
|
NOTE: Because reducer aggregations only add to the output, when chaining reducer aggregations the output of each reducer will be
|
||||||
|
included in the final output.
|
||||||
|
|
||||||
|
[[bucket-path-syntax]]
|
||||||
|
[float]
|
||||||
|
=== `buckets_path` Syntax
|
||||||
|
|
||||||
|
Most reducers require another aggregation as their input. The input aggregation is defined via the `buckets_path`
|
||||||
|
parameter, which follows a specific format:
|
||||||
|
|
||||||
|
--------------------------------------------------
|
||||||
|
AGG_SEPARATOR := '>'
|
||||||
|
METRIC_SEPARATOR := '.'
|
||||||
|
AGG_NAME := <the name of the aggregation>
|
||||||
|
METRIC := <the name of the metric (in case of multi-value metrics aggregation)>
|
||||||
|
PATH := <AGG_NAME>[<AGG_SEPARATOR><AGG_NAME>]*[<METRIC_SEPARATOR><METRIC>]
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
For example, the path `"my_bucket>my_stats.avg"` will path to the `avg` value in the `"my_stats"` metric, which is
|
||||||
|
contained in the `"my_bucket"` bucket aggregation.
|
||||||
|
|
||||||
|
Paths are relative from the position of the reducer; they are not absolute paths, and the path cannot go back "up" the
|
||||||
|
aggregation tree. For example, this moving average is embedded inside a date_histogram and refers to a "sibling"
|
||||||
|
metric `"the_sum"`:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"my_date_histo":{
|
||||||
|
"date_histogram":{
|
||||||
|
"field":"timestamp",
|
||||||
|
"interval":"day"
|
||||||
|
},
|
||||||
|
"aggs":{
|
||||||
|
"the_sum":{
|
||||||
|
"sum":{ "field": "lemmings" } <1>
|
||||||
|
},
|
||||||
|
"the_movavg":{
|
||||||
|
"moving_avg":{ "buckets_path": "the_sum" } <2>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
<1> The metric is called `"the_sum"`
|
||||||
|
<2> The `buckets_path` refers to the metric via a relative path `"the_sum"`
|
||||||
|
|
||||||
|
`buckets_path` is also used for Sibling reducer aggregations, where the aggregation is "next" to a series of buckets
|
||||||
|
instead of embedded "inside" them. For example, the `max_bucket` aggregation uses the `buckets_path` to specify
|
||||||
|
a metric embedded inside a sibling aggregation:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"aggs" : {
|
||||||
|
"sales_per_month" : {
|
||||||
|
"date_histogram" : {
|
||||||
|
"field" : "date",
|
||||||
|
"interval" : "month"
|
||||||
|
},
|
||||||
|
"aggs": {
|
||||||
|
"sales": {
|
||||||
|
"sum": {
|
||||||
|
"field": "price"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"max_monthly_sales": {
|
||||||
|
"max_bucket": {
|
||||||
|
"buckets_paths": "sales_per_month>sales" <1>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the
|
||||||
|
`sales_per_month` date histogram.
|
||||||
|
|
||||||
|
[float]
|
||||||
|
==== Special Paths
|
||||||
|
|
||||||
|
Instead of pathing to a metric, `buckets_path` can use a special `"_count"` path. This instructs
|
||||||
|
the reducer to use the document count as it's input. For example, a moving average can be calculated on the document
|
||||||
|
count of each bucket, instead of a specific metric:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"my_date_histo":{
|
||||||
|
"date_histogram":{
|
||||||
|
"field":"timestamp",
|
||||||
|
"interval":"day"
|
||||||
|
},
|
||||||
|
"aggs":{
|
||||||
|
"the_movavg":{
|
||||||
|
"moving_avg":{ "buckets_path": "_count" } <1>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
<1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram
|
||||||
|
|
||||||
|
|
||||||
|
[float]
|
||||||
|
=== Dealing with gaps in the data
|
||||||
|
|
||||||
|
There are a couple of reasons why the data output by the enclosing histogram may have gaps:
|
||||||
|
|
||||||
|
* There are no documents matching the query for some buckets
|
||||||
|
* The data for a metric is missing in all of the documents falling into a bucket (this is most likely with either a small interval
|
||||||
|
on the enclosing histogram or with a query matching only a small number of documents)
|
||||||
|
|
||||||
|
Where there is no data available in a bucket for a given metric it presents a problem for calculating the derivative value for both
|
||||||
|
the current bucket and the next bucket. In the derivative reducer aggregation has a `gap policy` parameter to define what the behavior
|
||||||
|
should be when a gap in the data is found. There are currently two options for controlling the gap policy:
|
||||||
|
|
||||||
|
_ignore_::
|
||||||
|
This option will not produce a derivative value for any buckets where the value in the current or previous bucket is
|
||||||
|
missing
|
||||||
|
|
||||||
|
_insert_zeros_::
|
||||||
|
This option will assume the missing value is `0` and calculate the derivative with the value `0`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
include::reducer/derivative-aggregation.asciidoc[]
|
||||||
|
include::reducer/max-bucket-aggregation.asciidoc[]
|
||||||
|
include::reducer/min-bucket-aggregation.asciidoc[]
|
||||||
|
include::reducer/movavg-aggregation.asciidoc[]
|
|
@ -5,6 +5,28 @@ A parent reducer aggregation which calculates the derivative of a specified metr
|
||||||
aggregation. The specified metric must be numeric and the enclosing histogram must have `min_doc_count` set to `0` (default
|
aggregation. The specified metric must be numeric and the enclosing histogram must have `min_doc_count` set to `0` (default
|
||||||
for `histogram` aggregations).
|
for `histogram` aggregations).
|
||||||
|
|
||||||
|
==== Syntax
|
||||||
|
|
||||||
|
A `derivative` aggregation looks like this in isolation:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"derivative": {
|
||||||
|
"buckets_path": "the_sum"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
.`derivative` Parameters
|
||||||
|
|===
|
||||||
|
|Parameter Name |Description |Required |Default Value
|
||||||
|
|`buckets_path` |Path to the metric of interest (see <<bucket-path-syntax, `buckets_path` Syntax>> for more details |Required |
|
||||||
|
|===
|
||||||
|
|
||||||
|
|
||||||
|
==== First Order Derivative
|
||||||
|
|
||||||
The following snippet calculates the derivative of the total monthly `sales`:
|
The following snippet calculates the derivative of the total monthly `sales`:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
|
@ -82,7 +104,7 @@ And the following may be the response:
|
||||||
<1> No derivative for the first bucket since we need at least 2 data points to calculate the derivative
|
<1> No derivative for the first bucket since we need at least 2 data points to calculate the derivative
|
||||||
<2> Derivative value units are implicitly defined by the `sales` aggregation and the parent histogram so in this case the units
|
<2> Derivative value units are implicitly defined by the `sales` aggregation and the parent histogram so in this case the units
|
||||||
would be $/month assuming the `price` field has units of $.
|
would be $/month assuming the `price` field has units of $.
|
||||||
<3> The number of documents in the bucket are represented by the `doc_count` value
|
<3> The number of documents in the bucket are represented by the `doc_count` f
|
||||||
|
|
||||||
==== Second Order Derivative
|
==== Second Order Derivative
|
||||||
|
|
||||||
|
@ -172,23 +194,3 @@ And the following may be the response:
|
||||||
<1> No second derivative for the first two buckets since we need at least 2 data points from the first derivative to calculate the
|
<1> No second derivative for the first two buckets since we need at least 2 data points from the first derivative to calculate the
|
||||||
second derivative
|
second derivative
|
||||||
|
|
||||||
==== Dealing with gaps in the data
|
|
||||||
|
|
||||||
There are a couple of reasons why the data output by the enclosing histogram may have gaps:
|
|
||||||
|
|
||||||
* There are no documents matching the query for some buckets
|
|
||||||
* The data for a metric is missing in all of the documents falling into a bucket (this is most likely with either a small interval
|
|
||||||
on the enclosing histogram or with a query matching only a small number of documents)
|
|
||||||
|
|
||||||
Where there is no data available in a bucket for a given metric it presents a problem for calculating the derivative value for both
|
|
||||||
the current bucket and the next bucket. In the derivative reducer aggregation has a `gap_policy` parameter to define what the behavior
|
|
||||||
should be when a gap in the data is found. There are currently two options for controlling the gap policy:
|
|
||||||
|
|
||||||
_ignore_::
|
|
||||||
This option will not produce a derivative value for any buckets where the value in the current or previous bucket is
|
|
||||||
missing
|
|
||||||
|
|
||||||
_insert_zeros_::
|
|
||||||
This option will assume the missing value is `0` and calculate the derivative with the value `0`.
|
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,30 @@
|
||||||
[[search-aggregations-reducer-max-bucket-aggregation]]
|
[[search-aggregations-reducer-max-bucket-aggregation]]
|
||||||
=== Max Bucket Aggregation
|
=== Max Bucket Aggregation
|
||||||
|
|
||||||
A sibling reducer aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibing aggregation
|
A sibling reducer aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation
|
||||||
and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must
|
and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must
|
||||||
be a multi-bucket aggregation.
|
be a multi-bucket aggregation.
|
||||||
|
|
||||||
|
==== Syntax
|
||||||
|
|
||||||
|
A `max_bucket` aggregation looks like this in isolation:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"max_bucket": {
|
||||||
|
"buckets_path": "the_sum"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
.`max_bucket` Parameters
|
||||||
|
|===
|
||||||
|
|Parameter Name |Description |Required |Default Value
|
||||||
|
|`buckets_path` |The path to the buckets we wish to find the maximum for (see <<bucket-path-syntax>> for more
|
||||||
|
details |Required |
|
||||||
|
|===
|
||||||
|
|
||||||
The following snippet calculates the maximum of the total monthly `sales`:
|
The following snippet calculates the maximum of the total monthly `sales`:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
|
@ -32,7 +52,6 @@ The following snippet calculates the maximum of the total monthly `sales`:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the
|
<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the
|
||||||
`sales_per_month` date histogram.
|
`sales_per_month` date histogram.
|
||||||
|
|
|
@ -5,6 +5,26 @@ A sibling reducer aggregation which identifies the bucket(s) with the minimum va
|
||||||
and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must
|
and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must
|
||||||
be a multi-bucket aggregation.
|
be a multi-bucket aggregation.
|
||||||
|
|
||||||
|
==== Syntax
|
||||||
|
|
||||||
|
A `max_bucket` aggregation looks like this in isolation:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
--------------------------------------------------
|
||||||
|
{
|
||||||
|
"min_bucket": {
|
||||||
|
"buckets_path": "the_sum"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
.`min_bucket` Parameters
|
||||||
|
|===
|
||||||
|
|Parameter Name |Description |Required |Default Value
|
||||||
|
|`buckets_path` |Path to the metric of interest (see <<bucket-path-syntax, `buckets_path` Syntax>> for more details |Required |
|
||||||
|
|===
|
||||||
|
|
||||||
|
|
||||||
The following snippet calculates the minimum of the total monthly `sales`:
|
The following snippet calculates the minimum of the total monthly `sales`:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
|
@ -35,16 +35,14 @@ A `moving_avg` aggregation looks like this in isolation:
|
||||||
|
|
||||||
.`moving_avg` Parameters
|
.`moving_avg` Parameters
|
||||||
|===
|
|===
|
||||||
|Parameter Name |Description |Required |Default
|
|Parameter Name |Description |Required |Default Value
|
||||||
|
|`buckets_path` |Path to the metric of interest (see <<bucket-path-syntax, `buckets_path` Syntax>> for more details |Required |
|
||||||
|`buckets_path` |The path to the metric that we wish to calculate a moving average for |Required |
|
|
||||||
|`model` |The moving average weighting model that we wish to use |Optional |`simple`
|
|`model` |The moving average weighting model that we wish to use |Optional |`simple`
|
||||||
|`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero`
|
|`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero`
|
||||||
|`window` |The size of window to "slide" across the histogram. |Optional |`5`
|
|`window` |The size of window to "slide" across the histogram. |Optional |`5`
|
||||||
|`settings` |Model-specific settings, contents which differ depending on the model specified. |Optional |
|
|`settings` |Model-specific settings, contents which differ depending on the model specified. |Optional |
|
||||||
|===
|
|===
|
||||||
|
|
||||||
|
|
||||||
`moving_avg` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be
|
`moving_avg` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be
|
||||||
embedded like any other metric aggregation:
|
embedded like any other metric aggregation:
|
||||||
|
|
||||||
|
@ -73,27 +71,9 @@ embedded like any other metric aggregation:
|
||||||
|
|
||||||
Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally
|
Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally
|
||||||
add normal metrics, such as a `sum`, inside of that histogram. Finally, the `moving_avg` is embedded inside the histogram.
|
add normal metrics, such as a `sum`, inside of that histogram. Finally, the `moving_avg` is embedded inside the histogram.
|
||||||
The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram.
|
The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see
|
||||||
|
<<bucket-path-syntax>> for a description of the syntax for `buckets_path`.
|
||||||
|
|
||||||
A moving average can also be calculated on the document count of each bucket, instead of a metric:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
"my_date_histo":{
|
|
||||||
"date_histogram":{
|
|
||||||
"field":"timestamp",
|
|
||||||
"interval":"day"
|
|
||||||
},
|
|
||||||
"aggs":{
|
|
||||||
"the_movavg":{
|
|
||||||
"moving_avg":{ "buckets_path": "_count" } <1>
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
<1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram
|
|
||||||
|
|
||||||
==== Models
|
==== Models
|
||||||
|
|
||||||
|
@ -250,7 +230,7 @@ image::images/reducers_movavg/double_0.2beta.png[]
|
||||||
.Double Exponential moving average with window of size 100, alpha = 0.5, beta = 0.7
|
.Double Exponential moving average with window of size 100, alpha = 0.5, beta = 0.7
|
||||||
image::images/reducers_movavg/double_0.7beta.png[]
|
image::images/reducers_movavg/double_0.7beta.png[]
|
||||||
|
|
||||||
=== Prediction
|
==== Prediction
|
||||||
|
|
||||||
All the moving average model support a "prediction" mode, which will attempt to extrapolate into the future given the
|
All the moving average model support a "prediction" mode, which will attempt to extrapolate into the future given the
|
||||||
current smoothed, moving average. Depending on the model and parameter, these predictions may or may not be accurate.
|
current smoothed, moving average. Depending on the model and parameter, these predictions may or may not be accurate.
|
|
@ -18,6 +18,9 @@ filters.
|
||||||
|
|
||||||
|`char_filter` |An optional list of logical / registered name of char
|
|`char_filter` |An optional list of logical / registered name of char
|
||||||
filters.
|
filters.
|
||||||
|
|
||||||
|
|`position_offset_gap` |An optional number of positions to increment
|
||||||
|
between each field value of a field using this analyzer.
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|
|
||||||
Here is an example:
|
Here is an example:
|
||||||
|
@ -32,6 +35,7 @@ index :
|
||||||
tokenizer : myTokenizer1
|
tokenizer : myTokenizer1
|
||||||
filter : [myTokenFilter1, myTokenFilter2]
|
filter : [myTokenFilter1, myTokenFilter2]
|
||||||
char_filter : [my_html]
|
char_filter : [my_html]
|
||||||
|
position_offset_gap: 256
|
||||||
tokenizer :
|
tokenizer :
|
||||||
myTokenizer1 :
|
myTokenizer1 :
|
||||||
type : standard
|
type : standard
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
Basic support for hunspell stemming. Hunspell dictionaries will be
|
Basic support for hunspell stemming. Hunspell dictionaries will be
|
||||||
picked up from a dedicated hunspell directory on the filesystem
|
picked up from a dedicated hunspell directory on the filesystem
|
||||||
(defaults to `<path.conf>/hunspell`). Each dictionary is expected to
|
(`<path.conf>/hunspell`). Each dictionary is expected to
|
||||||
have its own directory named after its associated locale (language).
|
have its own directory named after its associated locale (language).
|
||||||
This dictionary directory is expected to hold a single `*.aff` and
|
This dictionary directory is expected to hold a single `*.aff` and
|
||||||
one or more `*.dic` files (all of which will automatically be picked up).
|
one or more `*.dic` files (all of which will automatically be picked up).
|
||||||
|
@ -19,10 +19,6 @@ following directory layout will define the `en_US` dictionary:
|
||||||
| | |-- en_US.aff
|
| | |-- en_US.aff
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
The location of the hunspell directory can be configured using the
|
|
||||||
`indices.analysis.hunspell.dictionary.location` settings in
|
|
||||||
_elasticsearch.yml_.
|
|
||||||
|
|
||||||
Each dictionary can be configured with one setting:
|
Each dictionary can be configured with one setting:
|
||||||
|
|
||||||
`ignore_case`::
|
`ignore_case`::
|
||||||
|
@ -91,9 +87,9 @@ the stemming is determined by the quality of the dictionary.
|
||||||
[float]
|
[float]
|
||||||
==== Dictionary loading
|
==== Dictionary loading
|
||||||
|
|
||||||
By default, the configured (`indices.analysis.hunspell.dictionary.location`)
|
By default, the default Hunspell directory (`config/hunspell/`) is checked
|
||||||
or default Hunspell directory (`config/hunspell/`) is checked for dictionaries
|
for dictionaries when the node starts up, and any dictionaries are
|
||||||
when the node starts up, and any dictionaries are automatically loaded.
|
automatically loaded.
|
||||||
|
|
||||||
Dictionary loading can be deferred until they are actually used by setting
|
Dictionary loading can be deferred until they are actually used by setting
|
||||||
`indices.analysis.hunspell.dictionary.lazy` to `true`in the config file.
|
`indices.analysis.hunspell.dictionary.lazy` to `true`in the config file.
|
||||||
|
|
|
@ -43,7 +43,7 @@ to be allocated to a node. This is in contrast to `include` which will
|
||||||
include a node if ANY rule matches.
|
include a node if ANY rule matches.
|
||||||
|
|
||||||
The `include`, `exclude` and `require` values can have generic simple
|
The `include`, `exclude` and `require` values can have generic simple
|
||||||
matching wildcards, for example, `value1*`. Additonally, special attribute
|
matching wildcards, for example, `value1*`. Additionally, special attribute
|
||||||
names called `_ip`, `_name`, `_id` and `_host` can be used to match by node
|
names called `_ip`, `_name`, `_id` and `_host` can be used to match by node
|
||||||
ip address, name, id or host name, respectively.
|
ip address, name, id or host name, respectively.
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ The period with no flush happening to force a flush. Defaults to `30m`.
|
||||||
How often to check if a flush is needed, randomized
|
How often to check if a flush is needed, randomized
|
||||||
between the interval value and 2x the interval value. Defaults to `5s`.
|
between the interval value and 2x the interval value. Defaults to `5s`.
|
||||||
|
|
||||||
`index.gateway.local.sync`::
|
`index.translog.sync_interval`::
|
||||||
|
|
||||||
How often the translog is ++fsync++ed to disk. Defaults to `5s`.
|
How often the translog is ++fsync++ed to disk. Defaults to `5s`.
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,8 @@ include::docs.asciidoc[]
|
||||||
|
|
||||||
include::search.asciidoc[]
|
include::search.asciidoc[]
|
||||||
|
|
||||||
|
include::aggregations.asciidoc[]
|
||||||
|
|
||||||
include::indices.asciidoc[]
|
include::indices.asciidoc[]
|
||||||
|
|
||||||
include::cat.asciidoc[]
|
include::cat.asciidoc[]
|
||||||
|
|
|
@ -59,7 +59,7 @@ you now use:
|
||||||
GET /_cluster/state/nodes
|
GET /_cluster/state/nodes
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Simliarly for the `nodes_stats` API, if you want the `transport` and `http`
|
Similarly for the `nodes_stats` API, if you want the `transport` and `http`
|
||||||
metrics only, instead of:
|
metrics only, instead of:
|
||||||
|
|
||||||
[source,sh]
|
[source,sh]
|
||||||
|
|
|
@ -309,7 +309,7 @@ Fields of type `murmur3` can no longer change `doc_values` or `index` setting.
|
||||||
They are always stored with doc values, and not indexed.
|
They are always stored with doc values, and not indexed.
|
||||||
|
|
||||||
==== Source field configuration
|
==== Source field configuration
|
||||||
The `_source` field no longer supports `includes` and `excludes` paramters. When
|
The `_source` field no longer supports `includes` and `excludes` parameters. When
|
||||||
`_source` is enabled, the entire original source will be stored.
|
`_source` is enabled, the entire original source will be stored.
|
||||||
|
|
||||||
==== Config based mappings
|
==== Config based mappings
|
||||||
|
@ -458,3 +458,29 @@ there is not enough disk space to complete this migration, the upgrade will be
|
||||||
cancelled and can only be resumed once enough disk space is made available.
|
cancelled and can only be resumed once enough disk space is made available.
|
||||||
|
|
||||||
The `index.store.distributor` setting has also been removed.
|
The `index.store.distributor` setting has also been removed.
|
||||||
|
|
||||||
|
=== Hunspell dictionary configuration
|
||||||
|
|
||||||
|
The parameter `indices.analysis.hunspell.dictionary.location` has been removed,
|
||||||
|
and `<path.conf>/hunspell` is always used.
|
||||||
|
|
||||||
|
=== Java API Transport API construction
|
||||||
|
|
||||||
|
The `TransportClient` construction code has changed, it now uses the builder
|
||||||
|
pattern. Instead of using:
|
||||||
|
|
||||||
|
[source,java]
|
||||||
|
--------------------------------------------------
|
||||||
|
Settings settings = ImmutableSettings.settingsBuilder()
|
||||||
|
.put("cluster.name", "myClusterName").build();
|
||||||
|
Client client = new TransportClient(settings);
|
||||||
|
--------------------------------------------------
|
||||||
|
|
||||||
|
Use:
|
||||||
|
|
||||||
|
[source,java]
|
||||||
|
--------------------------------------------------
|
||||||
|
Settings settings = ImmutableSettings.settingsBuilder()
|
||||||
|
.put("cluster.name", "myClusterName").build();
|
||||||
|
Client client = TransportClient.builder().settings(settings).build();
|
||||||
|
--------------------------------------------------
|
||||||
|
|
|
@ -69,7 +69,7 @@ Field statistics can be accessed with a subscript operator like this:
|
||||||
documents.
|
documents.
|
||||||
|
|
||||||
|
|
||||||
Field statistics are computed per shard and therfore these numbers can vary
|
Field statistics are computed per shard and therefore these numbers can vary
|
||||||
depending on the shard the current document resides in.
|
depending on the shard the current document resides in.
|
||||||
The number of terms in a field cannot be accessed using the `_index` variable. See <<mapping-core-types, word count mapping type>> on how to do that.
|
The number of terms in a field cannot be accessed using the `_index` variable. See <<mapping-core-types, word count mapping type>> on how to do that.
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ affect is your set the `index_options` to `docs` (see <<mapping-core-types, mapp
|
||||||
|
|
||||||
`_index['FIELD']['TERM'].ttf()`::
|
`_index['FIELD']['TERM'].ttf()`::
|
||||||
|
|
||||||
The sum of term frequencys of term `TERM` in field `FIELD` over all
|
The sum of term frequencies of term `TERM` in field `FIELD` over all
|
||||||
documents. Will be returned, even if the term is not present in the
|
documents. Will be returned, even if the term is not present in the
|
||||||
current document.
|
current document.
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@ If you need information on the positions of terms in a field, call
|
||||||
|
|
||||||
[horizontal]
|
[horizontal]
|
||||||
`_POSITIONS`:: if you need the positions of the term
|
`_POSITIONS`:: if you need the positions of the term
|
||||||
`_OFFSETS`:: if you need the offests of the term
|
`_OFFSETS`:: if you need the offsets of the term
|
||||||
`_PAYLOADS`:: if you need the payloads of the term
|
`_PAYLOADS`:: if you need the payloads of the term
|
||||||
`_CACHE`:: if you need to iterate over all positions several times
|
`_CACHE`:: if you need to iterate over all positions several times
|
||||||
|
|
||||||
|
|
|
@ -148,7 +148,7 @@ The following are options allowed on the filter:
|
||||||
|
|
||||||
`distance_type`::
|
`distance_type`::
|
||||||
|
|
||||||
How to compute the distance. Can either be `sloppy_arc` (default), `arc` (slighly more precise but significantly slower) or `plane` (faster, but inaccurate on long distances and close to the poles).
|
How to compute the distance. Can either be `sloppy_arc` (default), `arc` (slightly more precise but significantly slower) or `plane` (faster, but inaccurate on long distances and close to the poles).
|
||||||
|
|
||||||
`optimize_bbox`::
|
`optimize_bbox`::
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ Filters documents that exists within a range from a specific point:
|
||||||
"filter" : {
|
"filter" : {
|
||||||
"geo_distance_range" : {
|
"geo_distance_range" : {
|
||||||
"from" : "200km",
|
"from" : "200km",
|
||||||
"to" : "400km"
|
"to" : "400km",
|
||||||
"pin.location" : {
|
"pin.location" : {
|
||||||
"lat" : 40,
|
"lat" : 40,
|
||||||
"lon" : -70
|
"lon" : -70
|
||||||
|
|
|
@ -75,7 +75,7 @@ is specified.
|
||||||
|
|
||||||
In order to support parent-child joins, all of the (string) parent IDs
|
In order to support parent-child joins, all of the (string) parent IDs
|
||||||
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
||||||
Additionaly, every child document is mapped to its parent using a long
|
Additionally, every child document is mapped to its parent using a long
|
||||||
value (approximately). It is advisable to keep the string parent ID short
|
value (approximately). It is advisable to keep the string parent ID short
|
||||||
in order to reduce memory usage.
|
in order to reduce memory usage.
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ The `has_parent` filter also accepts a filter instead of a query:
|
||||||
|
|
||||||
In order to support parent-child joins, all of the (string) parent IDs
|
In order to support parent-child joins, all of the (string) parent IDs
|
||||||
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
||||||
Additionaly, every child document is mapped to its parent using a long
|
Additionally, every child document is mapped to its parent using a long
|
||||||
value (approximately). It is advisable to keep the string parent ID short
|
value (approximately). It is advisable to keep the string parent ID short
|
||||||
in order to reduce memory usage.
|
in order to reduce memory usage.
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ Terms are allocated to the high or low frequency groups based on the
|
||||||
`cutoff_frequency`, which can be specified as an absolute frequency
|
`cutoff_frequency`, which can be specified as an absolute frequency
|
||||||
(`>=1`) or as a relative frequency (`0.0 .. 1.0`). (Remember that document
|
(`>=1`) or as a relative frequency (`0.0 .. 1.0`). (Remember that document
|
||||||
frequencies are computed on a per shard level as explained in the blog post
|
frequencies are computed on a per shard level as explained in the blog post
|
||||||
{defguide}/relevance-is-broken.html[Relevence is broken].)
|
{defguide}/relevance-is-broken.html[Relevance is broken].)
|
||||||
|
|
||||||
Perhaps the most interesting property of this query is that it adapts to
|
Perhaps the most interesting property of this query is that it adapts to
|
||||||
domain specific stopwords automatically. For example, on a video hosting
|
domain specific stopwords automatically. For example, on a video hosting
|
||||||
|
|
|
@ -86,7 +86,7 @@ the `score_mode` parameter.
|
||||||
|
|
||||||
In order to support parent-child joins, all of the (string) parent IDs
|
In order to support parent-child joins, all of the (string) parent IDs
|
||||||
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
||||||
Additionaly, every child document is mapped to its parent using a long
|
Additionally, every child document is mapped to its parent using a long
|
||||||
value (approximately). It is advisable to keep the string parent ID short
|
value (approximately). It is advisable to keep the string parent ID short
|
||||||
in order to reduce memory usage.
|
in order to reduce memory usage.
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ matching parent document. The score type can be specified with the
|
||||||
|
|
||||||
In order to support parent-child joins, all of the (string) parent IDs
|
In order to support parent-child joins, all of the (string) parent IDs
|
||||||
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
||||||
Additionaly, every child document is mapped to its parent using a long
|
Additionally, every child document is mapped to its parent using a long
|
||||||
value (approximately). It is advisable to keep the string parent ID short
|
value (approximately). It is advisable to keep the string parent ID short
|
||||||
in order to reduce memory usage.
|
in order to reduce memory usage.
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ The `include` and `exclude` clauses can be any span type query. The
|
||||||
`exclude` clause is the span query whose matches must not overlap those
|
`exclude` clause is the span query whose matches must not overlap those
|
||||||
returned.
|
returned.
|
||||||
|
|
||||||
In the above example all documents with the term hoya are filtered except the ones that have 'la' preceeding them.
|
In the above example all documents with the term hoya are filtered except the ones that have 'la' preceding them.
|
||||||
|
|
||||||
Other top level options:
|
Other top level options:
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ same scope name that will work against the child documents. For example:
|
||||||
|
|
||||||
In order to support parent-child joins, all of the (string) parent IDs
|
In order to support parent-child joins, all of the (string) parent IDs
|
||||||
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
must be resident in memory (in the <<index-modules-fielddata,field data cache>>.
|
||||||
Additionaly, every child document is mapped to its parent using a long
|
Additionally, every child document is mapped to its parent using a long
|
||||||
value (approximately). It is advisable to keep the string parent ID short
|
value (approximately). It is advisable to keep the string parent ID short
|
||||||
in order to reduce memory usage.
|
in order to reduce memory usage.
|
||||||
|
|
||||||
|
|
|
@ -85,8 +85,6 @@ include::search/search-template.asciidoc[]
|
||||||
|
|
||||||
include::search/search-shards.asciidoc[]
|
include::search/search-shards.asciidoc[]
|
||||||
|
|
||||||
include::search/aggregations.asciidoc[]
|
|
||||||
|
|
||||||
include::search/facets.asciidoc[]
|
include::search/facets.asciidoc[]
|
||||||
|
|
||||||
include::search/suggesters.asciidoc[]
|
include::search/suggesters.asciidoc[]
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
[[search-aggregations-bucket]]
|
|
||||||
|
|
||||||
include::bucket/global-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/filter-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/filters-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/missing-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/nested-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/reverse-nested-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/children-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/terms-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/significantterms-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/range-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/daterange-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/iprange-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/histogram-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/datehistogram-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/geodistance-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::bucket/geohashgrid-aggregation.asciidoc[]
|
|
|
@ -1,27 +0,0 @@
|
||||||
[[search-aggregations-metrics]]
|
|
||||||
|
|
||||||
include::metrics/min-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/max-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/sum-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/avg-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/stats-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/extendedstats-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/valuecount-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/percentile-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/percentile-rank-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/cardinality-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/geobounds-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/tophits-aggregation.asciidoc[]
|
|
||||||
|
|
||||||
include::metrics/scripted-metric-aggregation.asciidoc[]
|
|
|
@ -1,6 +0,0 @@
|
||||||
[[search-aggregations-reducer]]
|
|
||||||
|
|
||||||
include::reducer/derivative-aggregation.asciidoc[]
|
|
||||||
include::reducer/max-bucket-aggregation.asciidoc[]
|
|
||||||
include::reducer/min-bucket-aggregation.asciidoc[]
|
|
||||||
include::reducer/movavg-aggregation.asciidoc[]
|
|
|
@ -73,7 +73,7 @@ curl -s -XPOST 'localhost:9200/_search' -d '{
|
||||||
'
|
'
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
The way the scores are combined can be controled with the `score_mode`:
|
The way the scores are combined can be controlled with the `score_mode`:
|
||||||
[cols="<,<",options="header",]
|
[cols="<,<",options="header",]
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|Score Mode |Description
|
|Score Mode |Description
|
||||||
|
|
|
@ -21,10 +21,10 @@ it does not take into account term frequencies and other search engine
|
||||||
information from the other shards. If we want to support accurate
|
information from the other shards. If we want to support accurate
|
||||||
ranking, we would need to first gather the term frequencies from all
|
ranking, we would need to first gather the term frequencies from all
|
||||||
shards to calculate global term frequencies, then execute the query on
|
shards to calculate global term frequencies, then execute the query on
|
||||||
each shard using these globale frequencies.
|
each shard using these global frequencies.
|
||||||
|
|
||||||
Also, because of the need to sort the results, getting back a large
|
Also, because of the need to sort the results, getting back a large
|
||||||
document set, or even scrolling it, while maintaing the correct sorting
|
document set, or even scrolling it, while maintaining the correct sorting
|
||||||
behavior can be a very expensive operation. For large result set
|
behavior can be a very expensive operation. For large result set
|
||||||
scrolling without sorting, the `scan` search type (explained below) is
|
scrolling without sorting, the `scan` search type (explained below) is
|
||||||
also available.
|
also available.
|
||||||
|
|
|
@ -192,7 +192,7 @@ Allow to sort by `_geo_distance`. Here is an example:
|
||||||
|
|
||||||
`distance_type`::
|
`distance_type`::
|
||||||
|
|
||||||
How to compute the distance. Can either be `sloppy_arc` (default), `arc` (slighly more precise but significantly slower) or `plane` (faster, but inaccurate on long distances and close to the poles).
|
How to compute the distance. Can either be `sloppy_arc` (default), `arc` (slightly more precise but significantly slower) or `plane` (faster, but inaccurate on long distances and close to the poles).
|
||||||
|
|
||||||
Note: the geo distance sorting supports `sort_mode` options: `min`,
|
Note: the geo distance sorting supports `sort_mode` options: `min`,
|
||||||
`max` and `avg`.
|
`max` and `avg`.
|
||||||
|
|
|
@ -125,7 +125,7 @@ can contain misspellings (See parameter descriptions below).
|
||||||
query terms a number `>=1` as an absolute number of query terms. The
|
query terms a number `>=1` as an absolute number of query terms. The
|
||||||
default is set to `1.0` which corresponds to that only corrections with
|
default is set to `1.0` which corresponds to that only corrections with
|
||||||
at most 1 misspelled term are returned. Note that setting this too high
|
at most 1 misspelled term are returned. Note that setting this too high
|
||||||
can negativly impact performance. Low values like `1` or `2` are recommended
|
can negatively impact performance. Low values like `1` or `2` are recommended
|
||||||
otherwise the time spend in suggest calls might exceed the time spend in
|
otherwise the time spend in suggest calls might exceed the time spend in
|
||||||
query execution.
|
query execution.
|
||||||
|
|
||||||
|
|
|
@ -134,7 +134,7 @@ by running `ulimit -l unlimited` as `root` before starting Elasticsearch.
|
||||||
|
|
||||||
Another possible reason why `mlockall` can fail is that the temporary directory
|
Another possible reason why `mlockall` can fail is that the temporary directory
|
||||||
(usually `/tmp`) is mounted with the `noexec` option. This can be solved by
|
(usually `/tmp`) is mounted with the `noexec` option. This can be solved by
|
||||||
specfying a new temp directory, by starting Elasticsearch with:
|
specifying a new temp directory, by starting Elasticsearch with:
|
||||||
|
|
||||||
[source,sh]
|
[source,sh]
|
||||||
--------------
|
--------------
|
||||||
|
|
|
@ -121,6 +121,20 @@ This syntax applies to Elasticsearch 1.0 and later:
|
||||||
|
|
||||||
* Repeat this process for all remaining nodes.
|
* Repeat this process for all remaining nodes.
|
||||||
|
|
||||||
|
[IMPORTANT]
|
||||||
|
====================================================
|
||||||
|
During a rolling upgrade, primary shards assigned to a node with the higher
|
||||||
|
version will never have their replicas assigned to a node with the lower
|
||||||
|
version, because the newer version may have a different data format which is
|
||||||
|
not understood by the older version.
|
||||||
|
|
||||||
|
If it is not possible to assign the replica shards to another node with the
|
||||||
|
higher version -- e.g. if there is only one node with the higher version in
|
||||||
|
the cluster -- then the replica shards will remain unassigned, i.e. the
|
||||||
|
cluster health will be status `yellow`. As soon as another node with the
|
||||||
|
higher version joins the cluster, the replicas should be assigned and the
|
||||||
|
cluster health will reach status `green`.
|
||||||
|
====================================================
|
||||||
|
|
||||||
It may be possible to perform the upgrade by installing the new software while the service is running. This would reduce downtime by ensuring the service was ready to run on the new version as soon as it is stopped on the node being upgraded. This can be done by installing the new version in its own directory and using the symbolic link method outlined above. It is important to test this procedure first to be sure that site-specific configuration data and production indices will not be overwritten during the upgrade process.
|
It may be possible to perform the upgrade by installing the new software while the service is running. This would reduce downtime by ensuring the service was ready to run on the new version as soon as it is stopped on the node being upgraded. This can be done by installing the new version in its own directory and using the symbolic link method outlined above. It is important to test this procedure first to be sure that site-specific configuration data and production indices will not be overwritten during the upgrade process.
|
||||||
|
|
||||||
|
|
|
@ -217,7 +217,7 @@ starts. See {GIT}9899[#9899] (STATUS; DONE, fixed in v1.5.0)
|
||||||
|
|
||||||
Setting `zen.discovery.minimum_master_nodes` to a value higher than the current node count
|
Setting `zen.discovery.minimum_master_nodes` to a value higher than the current node count
|
||||||
effectively leaves the cluster without a master and unable to process requests. The only
|
effectively leaves the cluster without a master and unable to process requests. The only
|
||||||
way to fix this is to add more master-eligibile nodes. {GIT}8321[#8321] adds a mechanism
|
way to fix this is to add more master-eligible nodes. {GIT}8321[#8321] adds a mechanism
|
||||||
to validate settings before applying them, and {GIT}9051[#9051] extends this validation
|
to validate settings before applying them, and {GIT}9051[#9051] extends this validation
|
||||||
support to settings applied during a cluster restore. (STATUS: DONE, Fixed in v1.5.0)
|
support to settings applied during a cluster restore. (STATUS: DONE, Fixed in v1.5.0)
|
||||||
|
|
||||||
|
|
|
@ -142,7 +142,7 @@ class Article
|
||||||
|
|
||||||
# Execute code after saving the model.
|
# Execute code after saving the model.
|
||||||
#
|
#
|
||||||
after_save { puts "Successfuly saved: #{self}" }
|
after_save { puts "Successfully saved: #{self}" }
|
||||||
end
|
end
|
||||||
------------------------------------
|
------------------------------------
|
||||||
|
|
||||||
|
@ -215,7 +215,7 @@ Any callbacks defined in the model will be triggered during the persistence oper
|
||||||
[source,ruby]
|
[source,ruby]
|
||||||
------------------------------------
|
------------------------------------
|
||||||
article.save
|
article.save
|
||||||
# Successfuly saved: #<Article {...}>
|
# Successfully saved: #<Article {...}>
|
||||||
------------------------------------
|
------------------------------------
|
||||||
|
|
||||||
Please see the extensive documentation in the library
|
Please see the extensive documentation in the library
|
||||||
|
|
6
pom.xml
6
pom.xml
|
@ -43,6 +43,7 @@
|
||||||
<tests.locale>random</tests.locale>
|
<tests.locale>random</tests.locale>
|
||||||
<tests.timezone>random</tests.timezone>
|
<tests.timezone>random</tests.timezone>
|
||||||
<tests.slow>false</tests.slow>
|
<tests.slow>false</tests.slow>
|
||||||
|
<tests.security.manager>true</tests.security.manager>
|
||||||
<es.logger.level>ERROR</es.logger.level>
|
<es.logger.level>ERROR</es.logger.level>
|
||||||
<tests.heap.size>512m</tests.heap.size>
|
<tests.heap.size>512m</tests.heap.size>
|
||||||
<tests.heapdump.path>${basedir}/logs/</tests.heapdump.path>
|
<tests.heapdump.path>${basedir}/logs/</tests.heapdump.path>
|
||||||
|
@ -635,8 +636,9 @@
|
||||||
<tests.security.manager>${tests.security.manager}</tests.security.manager>
|
<tests.security.manager>${tests.security.manager}</tests.security.manager>
|
||||||
<tests.compatibility>${tests.compatibility}</tests.compatibility>
|
<tests.compatibility>${tests.compatibility}</tests.compatibility>
|
||||||
<java.awt.headless>true</java.awt.headless>
|
<java.awt.headless>true</java.awt.headless>
|
||||||
<!-- security manager / test.policy -->
|
<!-- true if we are running tests from maven (as opposed to IDE, etc).
|
||||||
<java.security.policy>${basedir}/src/main/resources/org/elasticsearch/bootstrap/security.policy</java.security.policy>
|
allows us to assert certain things work, like libsigar -->
|
||||||
|
<tests.maven>true</tests.maven>
|
||||||
</systemProperties>
|
</systemProperties>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.client.ElasticsearchClient;
|
||||||
/**
|
/**
|
||||||
* Base action. Supports building the <code>Request</code> through a <code>RequestBuilder</code>.
|
* Base action. Supports building the <code>Request</code> through a <code>RequestBuilder</code>.
|
||||||
*/
|
*/
|
||||||
public abstract class Action<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder, Client>, Client extends ElasticsearchClient>
|
public abstract class Action<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
|
||||||
extends GenericAction<Request, Response> {
|
extends GenericAction<Request, Response> {
|
||||||
|
|
||||||
protected Action(String name) {
|
protected Action(String name) {
|
||||||
|
@ -34,5 +34,5 @@ public abstract class Action<Request extends ActionRequest, Response extends Act
|
||||||
/**
|
/**
|
||||||
* Creates a new request builder given the client provided as argument
|
* Creates a new request builder given the client provided as argument
|
||||||
*/
|
*/
|
||||||
public abstract RequestBuilder newRequestBuilder(Client client);
|
public abstract RequestBuilder newRequestBuilder(ElasticsearchClient client);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,8 +30,6 @@ import java.io.IOException;
|
||||||
*/
|
*/
|
||||||
public abstract class ActionRequest<T extends ActionRequest> extends TransportRequest {
|
public abstract class ActionRequest<T extends ActionRequest> extends TransportRequest {
|
||||||
|
|
||||||
private boolean listenerThreaded = false;
|
|
||||||
|
|
||||||
protected ActionRequest() {
|
protected ActionRequest() {
|
||||||
super();
|
super();
|
||||||
}
|
}
|
||||||
|
@ -43,25 +41,6 @@ public abstract class ActionRequest<T extends ActionRequest> extends TransportRe
|
||||||
//this.listenerThreaded = request.listenerThreaded();
|
//this.listenerThreaded = request.listenerThreaded();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Should the response listener be executed on a thread or not.
|
|
||||||
* <p/>
|
|
||||||
* <p>When not executing on a thread, it will either be executed on the calling thread, or
|
|
||||||
* on an expensive, IO based, thread.
|
|
||||||
*/
|
|
||||||
public final boolean listenerThreaded() {
|
|
||||||
return this.listenerThreaded;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets if the response listener be executed on a thread or not.
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
public final T listenerThreaded(boolean listenerThreaded) {
|
|
||||||
this.listenerThreaded = listenerThreaded;
|
|
||||||
return (T) this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public abstract ActionRequestValidationException validate();
|
public abstract ActionRequestValidationException validate();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.action;
|
package org.elasticsearch.action;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.support.PlainListenableActionFuture;
|
import org.elasticsearch.action.support.PlainListenableActionFuture;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
|
@ -26,18 +27,22 @@ import org.elasticsearch.client.ClusterAdminClient;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.client.IndicesAdminClient;
|
import org.elasticsearch.client.IndicesAdminClient;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
import org.elasticsearch.search.aggregations.reducers.ReducerBuilder;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public abstract class ActionRequestBuilder<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder, Client extends ElasticsearchClient> {
|
public abstract class ActionRequestBuilder<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> {
|
||||||
|
|
||||||
|
protected final Action<Request, Response, RequestBuilder> action;
|
||||||
protected final Request request;
|
protected final Request request;
|
||||||
private final ThreadPool threadPool;
|
private final ThreadPool threadPool;
|
||||||
protected final Client client;
|
protected final ElasticsearchClient client;
|
||||||
|
|
||||||
protected ActionRequestBuilder(Client client, Request request) {
|
protected ActionRequestBuilder(ElasticsearchClient client, Action<Request, Response, RequestBuilder> action, Request request) {
|
||||||
|
Preconditions.checkNotNull(action, "action must not be null");
|
||||||
|
this.action = action;
|
||||||
this.request = request;
|
this.request = request;
|
||||||
this.client = client;
|
this.client = client;
|
||||||
threadPool = client.threadPool();
|
threadPool = client.threadPool();
|
||||||
|
@ -48,12 +53,6 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
|
||||||
return this.request;
|
return this.request;
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
public final RequestBuilder setListenerThreaded(boolean listenerThreaded) {
|
|
||||||
request.listenerThreaded(listenerThreaded);
|
|
||||||
return (RequestBuilder) this;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public final RequestBuilder putHeader(String key, Object value) {
|
public final RequestBuilder putHeader(String key, Object value) {
|
||||||
request.putHeader(key, value);
|
request.putHeader(key, value);
|
||||||
|
@ -61,7 +60,7 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
|
||||||
}
|
}
|
||||||
|
|
||||||
public ListenableActionFuture<Response> execute() {
|
public ListenableActionFuture<Response> execute() {
|
||||||
PlainListenableActionFuture<Response> future = new PlainListenableActionFuture<>(request.listenerThreaded(), threadPool);
|
PlainListenableActionFuture<Response> future = new PlainListenableActionFuture<>(threadPool);
|
||||||
execute(future);
|
execute(future);
|
||||||
return future;
|
return future;
|
||||||
}
|
}
|
||||||
|
@ -87,9 +86,14 @@ public abstract class ActionRequestBuilder<Request extends ActionRequest, Respon
|
||||||
return execute().actionGet(timeout);
|
return execute().actionGet(timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void execute(ActionListener<Response> listener) {
|
public final void execute(ActionListener<Response> listener) {
|
||||||
doExecute(listener);
|
client.execute(action, beforeExecute(request), listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract void doExecute(ActionListener<Response> listener);
|
/**
|
||||||
|
* A callback to additionally process the request before its executed
|
||||||
|
*/
|
||||||
|
protected Request beforeExecute(Request request) {
|
||||||
|
return request;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,20 +24,15 @@ import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
import org.elasticsearch.transport.*;
|
||||||
import org.elasticsearch.transport.TransportException;
|
|
||||||
import org.elasticsearch.transport.TransportRequestOptions;
|
|
||||||
import org.elasticsearch.transport.TransportService;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A generic proxy that will execute the given action against a specific node.
|
* A generic proxy that will execute the given action against a specific node.
|
||||||
*/
|
*/
|
||||||
public class TransportActionNodeProxy<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
|
public class TransportActionNodeProxy<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
|
||||||
|
|
||||||
protected final TransportService transportService;
|
private final TransportService transportService;
|
||||||
|
|
||||||
private final GenericAction<Request, Response> action;
|
private final GenericAction<Request, Response> action;
|
||||||
|
|
||||||
private final TransportRequestOptions transportOptions;
|
private final TransportRequestOptions transportOptions;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
|
@ -48,36 +43,17 @@ public class TransportActionNodeProxy<Request extends ActionRequest, Response ex
|
||||||
this.transportOptions = action.transportOptions(settings);
|
this.transportOptions = action.transportOptions(settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void execute(DiscoveryNode node, final Request request, final ActionListener<Response> listener) {
|
public void execute(final DiscoveryNode node, final Request request, final ActionListener<Response> listener) {
|
||||||
ActionRequestValidationException validationException = request.validate();
|
ActionRequestValidationException validationException = request.validate();
|
||||||
if (validationException != null) {
|
if (validationException != null) {
|
||||||
listener.onFailure(validationException);
|
listener.onFailure(validationException);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
transportService.sendRequest(node, action.name(), request, transportOptions, new BaseTransportResponseHandler<Response>() {
|
transportService.sendRequest(node, action.name(), request, transportOptions, new ActionListenerResponseHandler<Response>(listener) {
|
||||||
@Override
|
@Override
|
||||||
public Response newInstance() {
|
public Response newInstance() {
|
||||||
return action.newResponse();
|
return action.newResponse();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public String executor() {
|
|
||||||
if (request.listenerThreaded()) {
|
|
||||||
return ThreadPool.Names.LISTENER;
|
|
||||||
}
|
|
||||||
return ThreadPool.Names.SAME;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void handleResponse(Response response) {
|
|
||||||
listener.onResponse(response);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void handleException(TransportException exp) {
|
|
||||||
listener.onFailure(exp);
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster;
|
|
||||||
|
|
||||||
import org.elasticsearch.action.Action;
|
|
||||||
import org.elasticsearch.action.ActionRequest;
|
|
||||||
import org.elasticsearch.action.ActionRequestBuilder;
|
|
||||||
import org.elasticsearch.action.ActionResponse;
|
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Cluster action (used with {@link ClusterAdminClient} API).
|
|
||||||
*/
|
|
||||||
public abstract class ClusterAction<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder, ClusterAdminClient>>
|
|
||||||
extends Action<Request, Response, RequestBuilder, ClusterAdminClient> {
|
|
||||||
|
|
||||||
protected ClusterAction(String name) {
|
|
||||||
super(name);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -19,12 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.health;
|
package org.elasticsearch.action.admin.cluster.health;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class ClusterHealthAction extends ClusterAction<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> {
|
public class ClusterHealthAction extends Action<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> {
|
||||||
|
|
||||||
public static final ClusterHealthAction INSTANCE = new ClusterHealthAction();
|
public static final ClusterHealthAction INSTANCE = new ClusterHealthAction();
|
||||||
public static final String NAME = "cluster:monitor/health";
|
public static final String NAME = "cluster:monitor/health";
|
||||||
|
@ -39,7 +39,7 @@ public class ClusterHealthAction extends ClusterAction<ClusterHealthRequest, Clu
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterHealthRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public ClusterHealthRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new ClusterHealthRequestBuilder(client);
|
return new ClusterHealthRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,19 +19,18 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.health;
|
package org.elasticsearch.action.admin.cluster.health;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
|
||||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder, ClusterAdminClient> {
|
public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> {
|
||||||
|
|
||||||
public ClusterHealthRequestBuilder(ClusterAdminClient clusterClient) {
|
public ClusterHealthRequestBuilder(ElasticsearchClient client, ClusterHealthAction action) {
|
||||||
super(clusterClient, new ClusterHealthRequest());
|
super(client, action, new ClusterHealthRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
public ClusterHealthRequestBuilder setIndices(String... indices) {
|
public ClusterHealthRequestBuilder setIndices(String... indices) {
|
||||||
|
@ -86,9 +85,4 @@ public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestB
|
||||||
request.waitForEvents(waitForEvents);
|
request.waitForEvents(waitForEvents);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<ClusterHealthResponse> listener) {
|
|
||||||
client.health(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.node.hotthreads;
|
package org.elasticsearch.action.admin.cluster.node.hotthreads;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class NodesHotThreadsAction extends ClusterAction<NodesHotThreadsRequest, NodesHotThreadsResponse, NodesHotThreadsRequestBuilder> {
|
public class NodesHotThreadsAction extends Action<NodesHotThreadsRequest, NodesHotThreadsResponse, NodesHotThreadsRequestBuilder> {
|
||||||
|
|
||||||
public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction();
|
public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction();
|
||||||
public static final String NAME = "cluster:monitor/nodes/hot_threads";
|
public static final String NAME = "cluster:monitor/nodes/hot_threads";
|
||||||
|
@ -39,7 +39,7 @@ public class NodesHotThreadsAction extends ClusterAction<NodesHotThreadsRequest,
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public NodesHotThreadsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public NodesHotThreadsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new NodesHotThreadsRequestBuilder(client);
|
return new NodesHotThreadsRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,14 +22,15 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ClusterAdminClient;
|
||||||
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder<NodesHotThreadsRequest, NodesHotThreadsResponse, NodesHotThreadsRequestBuilder> {
|
public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder<NodesHotThreadsRequest, NodesHotThreadsResponse, NodesHotThreadsRequestBuilder> {
|
||||||
|
|
||||||
public NodesHotThreadsRequestBuilder(ClusterAdminClient clusterClient) {
|
public NodesHotThreadsRequestBuilder(ElasticsearchClient client, NodesHotThreadsAction action) {
|
||||||
super(clusterClient, new NodesHotThreadsRequest());
|
super(client, action, new NodesHotThreadsRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
public NodesHotThreadsRequestBuilder setThreads(int threads) {
|
public NodesHotThreadsRequestBuilder setThreads(int threads) {
|
||||||
|
@ -51,9 +52,4 @@ public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder<
|
||||||
request.interval(interval);
|
request.interval(interval);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<NodesHotThreadsResponse> listener) {
|
|
||||||
client.nodesHotThreads(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.node.info;
|
package org.elasticsearch.action.admin.cluster.node.info;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class NodesInfoAction extends ClusterAction<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
|
public class NodesInfoAction extends Action<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
|
||||||
|
|
||||||
public static final NodesInfoAction INSTANCE = new NodesInfoAction();
|
public static final NodesInfoAction INSTANCE = new NodesInfoAction();
|
||||||
public static final String NAME = "cluster:monitor/nodes/info";
|
public static final String NAME = "cluster:monitor/nodes/info";
|
||||||
|
@ -39,7 +39,7 @@ public class NodesInfoAction extends ClusterAction<NodesInfoRequest, NodesInfoRe
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public NodesInfoRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public NodesInfoRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new NodesInfoRequestBuilder(client);
|
return new NodesInfoRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,17 +19,16 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.node.info;
|
package org.elasticsearch.action.admin.cluster.node.info;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
|
||||||
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
|
public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
|
||||||
|
|
||||||
public NodesInfoRequestBuilder(ClusterAdminClient clusterClient) {
|
public NodesInfoRequestBuilder(ElasticsearchClient client, NodesInfoAction action) {
|
||||||
super(clusterClient, new NodesInfoRequest());
|
super(client, action, new NodesInfoRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -119,9 +118,4 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
||||||
request().plugins(plugins);
|
request().plugins(plugins);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<NodesInfoResponse> listener) {
|
|
||||||
client.nodesInfo(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.node.stats;
|
package org.elasticsearch.action.admin.cluster.node.stats;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class NodesStatsAction extends ClusterAction<NodesStatsRequest, NodesStatsResponse, NodesStatsRequestBuilder> {
|
public class NodesStatsAction extends Action<NodesStatsRequest, NodesStatsResponse, NodesStatsRequestBuilder> {
|
||||||
|
|
||||||
public static final NodesStatsAction INSTANCE = new NodesStatsAction();
|
public static final NodesStatsAction INSTANCE = new NodesStatsAction();
|
||||||
public static final String NAME = "cluster:monitor/nodes/stats";
|
public static final String NAME = "cluster:monitor/nodes/stats";
|
||||||
|
@ -39,7 +39,7 @@ public class NodesStatsAction extends ClusterAction<NodesStatsRequest, NodesStat
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public NodesStatsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public NodesStatsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new NodesStatsRequestBuilder(client);
|
return new NodesStatsRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,15 +22,15 @@ package org.elasticsearch.action.admin.cluster.node.stats;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
||||||
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder<NodesStatsRequest, NodesStatsResponse, NodesStatsRequestBuilder> {
|
public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder<NodesStatsRequest, NodesStatsResponse, NodesStatsRequestBuilder> {
|
||||||
|
|
||||||
public NodesStatsRequestBuilder(ClusterAdminClient clusterClient) {
|
public NodesStatsRequestBuilder(ElasticsearchClient client, NodesStatsAction action) {
|
||||||
super(clusterClient, new NodesStatsRequest());
|
super(client, action, new NodesStatsRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -133,9 +133,4 @@ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder<Nodes
|
||||||
request.http(http);
|
request.http(http);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<NodesStatsResponse> listener) {
|
|
||||||
client.nodesStats(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,13 +19,13 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.repositories.delete;
|
package org.elasticsearch.action.admin.cluster.repositories.delete;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unregister repository action
|
* Unregister repository action
|
||||||
*/
|
*/
|
||||||
public class DeleteRepositoryAction extends ClusterAction<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder> {
|
public class DeleteRepositoryAction extends Action<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder> {
|
||||||
|
|
||||||
public static final DeleteRepositoryAction INSTANCE = new DeleteRepositoryAction();
|
public static final DeleteRepositoryAction INSTANCE = new DeleteRepositoryAction();
|
||||||
public static final String NAME = "cluster:admin/repository/delete";
|
public static final String NAME = "cluster:admin/repository/delete";
|
||||||
|
@ -40,8 +40,8 @@ public class DeleteRepositoryAction extends ClusterAction<DeleteRepositoryReques
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DeleteRepositoryRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public DeleteRepositoryRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new DeleteRepositoryRequestBuilder(client);
|
return new DeleteRepositoryRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,31 +19,26 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.repositories.delete;
|
package org.elasticsearch.action.admin.cluster.repositories.delete;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builder for unregister repository request
|
* Builder for unregister repository request
|
||||||
*/
|
*/
|
||||||
public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder, ClusterAdminClient> {
|
public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs unregister repository request builder
|
* Constructs unregister repository request builder
|
||||||
*
|
|
||||||
* @param clusterAdminClient cluster admin client
|
|
||||||
*/
|
*/
|
||||||
public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) {
|
public DeleteRepositoryRequestBuilder(ElasticsearchClient client, DeleteRepositoryAction action) {
|
||||||
super(clusterAdminClient, new DeleteRepositoryRequest());
|
super(client, action, new DeleteRepositoryRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs unregister repository request builder with specified repository name
|
* Constructs unregister repository request builder with specified repository name
|
||||||
*
|
|
||||||
* @param clusterAdminClient cluster adming client
|
|
||||||
*/
|
*/
|
||||||
public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) {
|
public DeleteRepositoryRequestBuilder(ElasticsearchClient client, DeleteRepositoryAction action, String name) {
|
||||||
super(clusterAdminClient, new DeleteRepositoryRequest(name));
|
super(client, action, new DeleteRepositoryRequest(name));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -55,9 +50,4 @@ public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder<D
|
||||||
request.name(name);
|
request.name(name);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<DeleteRepositoryResponse> listener) {
|
|
||||||
client.deleteRepository(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,13 +19,13 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.repositories.get;
|
package org.elasticsearch.action.admin.cluster.repositories.get;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get repositories action
|
* Get repositories action
|
||||||
*/
|
*/
|
||||||
public class GetRepositoriesAction extends ClusterAction<GetRepositoriesRequest, GetRepositoriesResponse, GetRepositoriesRequestBuilder> {
|
public class GetRepositoriesAction extends Action<GetRepositoriesRequest, GetRepositoriesResponse, GetRepositoriesRequestBuilder> {
|
||||||
|
|
||||||
public static final GetRepositoriesAction INSTANCE = new GetRepositoriesAction();
|
public static final GetRepositoriesAction INSTANCE = new GetRepositoriesAction();
|
||||||
public static final String NAME = "cluster:admin/repository/get";
|
public static final String NAME = "cluster:admin/repository/get";
|
||||||
|
@ -40,8 +40,8 @@ public class GetRepositoriesAction extends ClusterAction<GetRepositoriesRequest,
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public GetRepositoriesRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public GetRepositoriesRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new GetRepositoriesRequestBuilder(client);
|
return new GetRepositoriesRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,32 +20,26 @@
|
||||||
package org.elasticsearch.action.admin.cluster.repositories.get;
|
package org.elasticsearch.action.admin.cluster.repositories.get;
|
||||||
|
|
||||||
import com.google.common.collect.ObjectArrays;
|
import com.google.common.collect.ObjectArrays;
|
||||||
import org.elasticsearch.action.ActionListener;
|
|
||||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get repository request builder
|
* Get repository request builder
|
||||||
*/
|
*/
|
||||||
public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetRepositoriesRequest, GetRepositoriesResponse, GetRepositoriesRequestBuilder, ClusterAdminClient> {
|
public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetRepositoriesRequest, GetRepositoriesResponse, GetRepositoriesRequestBuilder> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates new get repository request builder
|
* Creates new get repository request builder
|
||||||
*
|
|
||||||
* @param clusterAdminClient cluster admin client
|
|
||||||
*/
|
*/
|
||||||
public GetRepositoriesRequestBuilder(ClusterAdminClient clusterAdminClient) {
|
public GetRepositoriesRequestBuilder(ElasticsearchClient client, GetRepositoriesAction action) {
|
||||||
super(clusterAdminClient, new GetRepositoriesRequest());
|
super(client, action, new GetRepositoriesRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates new get repository request builder
|
* Creates new get repository request builder
|
||||||
*
|
|
||||||
* @param clusterAdminClient cluster admin client
|
|
||||||
* @param repositories list of repositories to get
|
|
||||||
*/
|
*/
|
||||||
public GetRepositoriesRequestBuilder(ClusterAdminClient clusterAdminClient, String... repositories) {
|
public GetRepositoriesRequestBuilder(ElasticsearchClient client, GetRepositoriesAction action, String... repositories) {
|
||||||
super(clusterAdminClient, new GetRepositoriesRequest(repositories));
|
super(client, action, new GetRepositoriesRequest(repositories));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -69,9 +63,4 @@ public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationReques
|
||||||
request.repositories(ObjectArrays.concat(request.repositories(), repositories, String.class));
|
request.repositories(ObjectArrays.concat(request.repositories(), repositories, String.class));
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<GetRepositoriesResponse> listener) {
|
|
||||||
client.getRepositories(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,13 +19,13 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.repositories.put;
|
package org.elasticsearch.action.admin.cluster.repositories.put;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register repository action
|
* Register repository action
|
||||||
*/
|
*/
|
||||||
public class PutRepositoryAction extends ClusterAction<PutRepositoryRequest, PutRepositoryResponse, PutRepositoryRequestBuilder> {
|
public class PutRepositoryAction extends Action<PutRepositoryRequest, PutRepositoryResponse, PutRepositoryRequestBuilder> {
|
||||||
|
|
||||||
public static final PutRepositoryAction INSTANCE = new PutRepositoryAction();
|
public static final PutRepositoryAction INSTANCE = new PutRepositoryAction();
|
||||||
public static final String NAME = "cluster:admin/repository/put";
|
public static final String NAME = "cluster:admin/repository/put";
|
||||||
|
@ -40,8 +40,8 @@ public class PutRepositoryAction extends ClusterAction<PutRepositoryRequest, Put
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PutRepositoryRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public PutRepositoryRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new PutRepositoryRequestBuilder(client);
|
return new PutRepositoryRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.cluster.repositories.put;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -29,25 +29,20 @@ import java.util.Map;
|
||||||
/**
|
/**
|
||||||
* Register repository request builder
|
* Register repository request builder
|
||||||
*/
|
*/
|
||||||
public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder<PutRepositoryRequest, PutRepositoryResponse, PutRepositoryRequestBuilder, ClusterAdminClient> {
|
public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder<PutRepositoryRequest, PutRepositoryResponse, PutRepositoryRequestBuilder> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs register repository request
|
* Constructs register repository request
|
||||||
*
|
|
||||||
* @param clusterAdminClient cluster admin client
|
|
||||||
*/
|
*/
|
||||||
public PutRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) {
|
public PutRepositoryRequestBuilder(ElasticsearchClient client, PutRepositoryAction action) {
|
||||||
super(clusterAdminClient, new PutRepositoryRequest());
|
super(client, action, new PutRepositoryRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs register repository request for the repository with a given name
|
* Constructs register repository request for the repository with a given name
|
||||||
*
|
|
||||||
* @param clusterAdminClient cluster admin client
|
|
||||||
* @param name repository name
|
|
||||||
*/
|
*/
|
||||||
public PutRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) {
|
public PutRepositoryRequestBuilder(ElasticsearchClient client, PutRepositoryAction action, String name) {
|
||||||
super(clusterAdminClient, new PutRepositoryRequest(name));
|
super(client, action, new PutRepositoryRequest(name));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -126,9 +121,4 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder<PutR
|
||||||
request.verify(verify);
|
request.verify(verify);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<PutRepositoryResponse> listener) {
|
|
||||||
client.putRepository(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,13 +19,13 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.repositories.verify;
|
package org.elasticsearch.action.admin.cluster.repositories.verify;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unregister repository action
|
* Unregister repository action
|
||||||
*/
|
*/
|
||||||
public class VerifyRepositoryAction extends ClusterAction<VerifyRepositoryRequest, VerifyRepositoryResponse, VerifyRepositoryRequestBuilder> {
|
public class VerifyRepositoryAction extends Action<VerifyRepositoryRequest, VerifyRepositoryResponse, VerifyRepositoryRequestBuilder> {
|
||||||
|
|
||||||
public static final VerifyRepositoryAction INSTANCE = new VerifyRepositoryAction();
|
public static final VerifyRepositoryAction INSTANCE = new VerifyRepositoryAction();
|
||||||
public static final String NAME = "cluster:admin/repository/verify";
|
public static final String NAME = "cluster:admin/repository/verify";
|
||||||
|
@ -40,8 +40,8 @@ public class VerifyRepositoryAction extends ClusterAction<VerifyRepositoryReques
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public VerifyRepositoryRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public VerifyRepositoryRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new VerifyRepositoryRequestBuilder(client);
|
return new VerifyRepositoryRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,32 +19,26 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.repositories.verify;
|
package org.elasticsearch.action.admin.cluster.repositories.verify;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
|
||||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builder for unregister repository request
|
* Builder for unregister repository request
|
||||||
*/
|
*/
|
||||||
public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder<VerifyRepositoryRequest, VerifyRepositoryResponse, VerifyRepositoryRequestBuilder, ClusterAdminClient> {
|
public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder<VerifyRepositoryRequest, VerifyRepositoryResponse, VerifyRepositoryRequestBuilder> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs unregister repository request builder
|
* Constructs unregister repository request builder
|
||||||
*
|
|
||||||
* @param clusterAdminClient cluster admin client
|
|
||||||
*/
|
*/
|
||||||
public VerifyRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) {
|
public VerifyRepositoryRequestBuilder(ElasticsearchClient client, VerifyRepositoryAction action) {
|
||||||
super(clusterAdminClient, new VerifyRepositoryRequest());
|
super(client, action, new VerifyRepositoryRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs unregister repository request builder with specified repository name
|
* Constructs unregister repository request builder with specified repository name
|
||||||
*
|
|
||||||
* @param clusterAdminClient cluster adming client
|
|
||||||
*/
|
*/
|
||||||
public VerifyRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) {
|
public VerifyRepositoryRequestBuilder(ElasticsearchClient client, VerifyRepositoryAction action, String name) {
|
||||||
super(clusterAdminClient, new VerifyRepositoryRequest(name));
|
super(client, action, new VerifyRepositoryRequest(name));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -56,9 +50,4 @@ public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBu
|
||||||
request.name(name);
|
request.name(name);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<VerifyRepositoryResponse> listener) {
|
|
||||||
client.verifyRepository(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.reroute;
|
package org.elasticsearch.action.admin.cluster.reroute;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class ClusterRerouteAction extends ClusterAction<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
|
public class ClusterRerouteAction extends Action<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
|
||||||
|
|
||||||
public static final ClusterRerouteAction INSTANCE = new ClusterRerouteAction();
|
public static final ClusterRerouteAction INSTANCE = new ClusterRerouteAction();
|
||||||
public static final String NAME = "cluster:admin/reroute";
|
public static final String NAME = "cluster:admin/reroute";
|
||||||
|
@ -39,7 +39,7 @@ public class ClusterRerouteAction extends ClusterAction<ClusterRerouteRequest, C
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterRerouteRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public ClusterRerouteRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new ClusterRerouteRequestBuilder(client);
|
return new ClusterRerouteRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,19 +19,18 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.reroute;
|
package org.elasticsearch.action.admin.cluster.reroute;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builder for a cluster reroute request
|
* Builder for a cluster reroute request
|
||||||
*/
|
*/
|
||||||
public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder, ClusterAdminClient> {
|
public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
|
||||||
|
|
||||||
public ClusterRerouteRequestBuilder(ClusterAdminClient clusterClient) {
|
public ClusterRerouteRequestBuilder(ElasticsearchClient client, ClusterRerouteAction action) {
|
||||||
super(clusterClient, new ClusterRerouteRequest());
|
super(client, action, new ClusterRerouteRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -68,9 +67,4 @@ public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<Clu
|
||||||
request.source(source);
|
request.source(source);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<ClusterRerouteResponse> listener) {
|
|
||||||
client.reroute(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.settings;
|
package org.elasticsearch.action.admin.cluster.settings;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class ClusterUpdateSettingsAction extends ClusterAction<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> {
|
public class ClusterUpdateSettingsAction extends Action<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> {
|
||||||
|
|
||||||
public static final ClusterUpdateSettingsAction INSTANCE = new ClusterUpdateSettingsAction();
|
public static final ClusterUpdateSettingsAction INSTANCE = new ClusterUpdateSettingsAction();
|
||||||
public static final String NAME = "cluster:admin/settings/update";
|
public static final String NAME = "cluster:admin/settings/update";
|
||||||
|
@ -39,7 +39,7 @@ public class ClusterUpdateSettingsAction extends ClusterAction<ClusterUpdateSett
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterUpdateSettingsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public ClusterUpdateSettingsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new ClusterUpdateSettingsRequestBuilder(client);
|
return new ClusterUpdateSettingsRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,9 +19,8 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.settings;
|
package org.elasticsearch.action.admin.cluster.settings;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -29,10 +28,10 @@ import java.util.Map;
|
||||||
/**
|
/**
|
||||||
* Builder for a cluster update settings request
|
* Builder for a cluster update settings request
|
||||||
*/
|
*/
|
||||||
public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder, ClusterAdminClient> {
|
public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> {
|
||||||
|
|
||||||
public ClusterUpdateSettingsRequestBuilder(ClusterAdminClient clusterClient) {
|
public ClusterUpdateSettingsRequestBuilder(ElasticsearchClient client, ClusterUpdateSettingsAction action) {
|
||||||
super(clusterClient, new ClusterUpdateSettingsRequest());
|
super(client, action, new ClusterUpdateSettingsRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -98,9 +97,4 @@ public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuil
|
||||||
request.persistentSettings(settings);
|
request.persistentSettings(settings);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<ClusterUpdateSettingsResponse> listener) {
|
|
||||||
client.updateSettings(request, listener);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.shards;
|
package org.elasticsearch.action.admin.cluster.shards;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.ClusterAction;
|
import org.elasticsearch.action.Action;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class ClusterSearchShardsAction extends ClusterAction<ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder> {
|
public class ClusterSearchShardsAction extends Action<ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder> {
|
||||||
|
|
||||||
public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction();
|
public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction();
|
||||||
public static final String NAME = "indices:admin/shards/search_shards";
|
public static final String NAME = "indices:admin/shards/search_shards";
|
||||||
|
@ -39,7 +39,7 @@ public class ClusterSearchShardsAction extends ClusterAction<ClusterSearchShards
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterSearchShardsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
|
public ClusterSearchShardsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||||
return new ClusterSearchShardsRequestBuilder(client);
|
return new ClusterSearchShardsRequestBuilder(client, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,17 +19,16 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.shards;
|
package org.elasticsearch.action.admin.cluster.shards;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||||
import org.elasticsearch.client.ClusterAdminClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder, ClusterAdminClient> {
|
public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder<ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder> {
|
||||||
|
|
||||||
public ClusterSearchShardsRequestBuilder(ClusterAdminClient clusterClient) {
|
public ClusterSearchShardsRequestBuilder(ElasticsearchClient client, ClusterSearchShardsAction action) {
|
||||||
super(clusterClient, new ClusterSearchShardsRequest());
|
super(client, action, new ClusterSearchShardsRequest());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -83,10 +82,4 @@ public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRe
|
||||||
request().indicesOptions(indicesOptions);
|
request().indicesOptions(indicesOptions);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(ActionListener<ClusterSearchShardsResponse> listener) {
|
|
||||||
client.searchShards(request, listener);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue