diff --git a/docs/community/clients.asciidoc b/docs/community/clients.asciidoc index e0205816ca0..c92534eca7d 100644 --- a/docs/community/clients.asciidoc +++ b/docs/community/clients.asciidoc @@ -13,7 +13,7 @@ See the {client}/perl-api/current/index.html[official Elasticsearch Perl client] See the {client}/python-api/current/index.html[official Elasticsearch Python client]. * http://github.com/elasticsearch/elasticsearch-dsl-py[elasticsearch-dsl-py] - chainable query and filter construction built on top of offical client. + chainable query and filter construction built on top of official client. * http://github.com/rhec/pyelasticsearch[pyelasticsearch]: Python client. diff --git a/docs/community/integrations.asciidoc b/docs/community/integrations.asciidoc index d488e324313..bb2ff6850a0 100644 --- a/docs/community/integrations.asciidoc +++ b/docs/community/integrations.asciidoc @@ -59,6 +59,9 @@ * http://searchbox-io.github.com/wp-elasticsearch/[Wp-Elasticsearch]: Elasticsearch WordPress Plugin + +* https://github.com/wallmanderco/elasticsearch-indexer[Elasticsearch Indexer]: + Elasticsearch WordPress Plugin * https://github.com/OlegKunitsyn/eslogd[eslogd]: Linux daemon that replicates events to a central Elasticsearch server in real-time diff --git a/docs/java-api/client.asciidoc b/docs/java-api/client.asciidoc index be946673491..0dccd485c2a 100644 --- a/docs/java-api/client.asciidoc +++ b/docs/java-api/client.asciidoc @@ -134,7 +134,7 @@ be "two hop" operations). -------------------------------------------------- // on startup -Client client = new TransportClient() +Client client = TransportClient.builder().build() .addTransportAddress(new InetSocketTransportAddress("host1", 9300)) .addTransportAddress(new InetSocketTransportAddress("host2", 9300)); @@ -150,7 +150,7 @@ Note that you have to set the cluster name if you use one different than -------------------------------------------------- Settings settings = ImmutableSettings.settingsBuilder() .put("cluster.name", "myClusterName").build(); -Client client = new TransportClient(settings); +Client client = TransportClient.builder().settings(settings).build(); //Add transport addresses and do something with the client... -------------------------------------------------- @@ -166,7 +166,7 @@ used will be the ones that the other nodes were started with (the -------------------------------------------------- Settings settings = ImmutableSettings.settingsBuilder() .put("client.transport.sniff", true).build(); -TransportClient client = new TransportClient(settings); +TransportClient client = TransportClient.builder().settings(settings).build(); -------------------------------------------------- Other transport client level settings include: diff --git a/docs/java-api/query-dsl-filters.asciidoc b/docs/java-api/query-dsl-filters.asciidoc index 7070d0499c7..2240a3b93fc 100644 --- a/docs/java-api/query-dsl-filters.asciidoc +++ b/docs/java-api/query-dsl-filters.asciidoc @@ -150,7 +150,7 @@ FilterBuilder filter = geoDistanceFilter("pin.location") <1> <2> center point <3> distance from center point <4> optimize bounding box: `memory`, `indexed` or `none` -<5> distance computation mode: `GeoDistance.SLOPPY_ARC` (default), `GeoDistance.ARC` (slighly more precise but +<5> distance computation mode: `GeoDistance.SLOPPY_ARC` (default), `GeoDistance.ARC` (slightly more precise but significantly slower) or `GeoDistance.PLANE` (faster, but inaccurate on long distances and close to the poles) Note that you can cache the result using diff --git a/docs/reference/search/aggregations.asciidoc b/docs/reference/aggregations.asciidoc similarity index 51% rename from docs/reference/search/aggregations.asciidoc rename to docs/reference/aggregations.asciidoc index cf4b4348eda..c6fb674834e 100644 --- a/docs/reference/search/aggregations.asciidoc +++ b/docs/reference/aggregations.asciidoc @@ -1,6 +1,8 @@ [[search-aggregations]] -== Aggregations += Aggregations +[partintro] +-- The aggregations framework helps provide aggregated data based on a search query. It is based on simple building blocks called aggregations, that can be composed in order to build complex summaries of the data. @@ -11,16 +13,19 @@ query/filters of the search request). There are many different types of aggregations, each with its own purpose and output. To better understand these types, it is often easier to break them into two main families: -_Bucketing_:: +<>:: A family of aggregations that build buckets, where each bucket is associated with a _key_ and a document criterion. When the aggregation is executed, all the buckets criteria are evaluated on every document in the context and when a criterion matches, the document is considered to "fall in" the relevant bucket. By the end of the aggregation process, we'll end up with a list of buckets - each one with a set of documents that "belong" to it. -_Metric_:: +<>:: Aggregations that keep track and compute metrics over a set of documents. +<>:: + Aggregations that aggregate the output of other aggregations and their associated metrics + The interesting part comes next. Since each bucket effectively defines a document set (all documents belonging to the bucket), one can potentially associate aggregations on the bucket level, and those will execute within the context of that bucket. This is where the real power of aggregations kicks in: *aggregations can be nested!* @@ -31,7 +36,7 @@ NOTE: Bucketing aggregations can have sub-aggregations (bucketing or metric). Th another higher-level aggregation). [float] -=== Structuring Aggregations +== Structuring Aggregations The following snippet captures the basic structure of aggregations: @@ -62,7 +67,7 @@ bucketing aggregation. For example, if you define a set of aggregations under th sub-aggregations will be computed for the range buckets that are defined. [float] -==== Values Source +=== Values Source Some aggregations work on values extracted from the aggregated documents. Typically, the values will be extracted from a specific document field which is set using the `field` key for the aggregations. It is also possible to define a @@ -89,142 +94,7 @@ perform optimizations when dealing with sorted values (for example, with the `mi sorted, Elasticsearch will skip the iterations over all the values and rely on the first value in the list to be the minimum value among all other values associated with the same document). -[float] -=== Metrics Aggregations - -The aggregations in this family compute metrics based on values extracted in one way or another from the documents that -are being aggregated. The values are typically extracted from the fields of the document (using the field data), but -can also be generated using scripts. - -Numeric metrics aggregations are a special type of metrics aggregation which output numeric values. Some aggregations output -a single numeric metric (e.g. `avg`) and are called `single-value numeric metrics aggregation`, others generate multiple -metrics (e.g. `stats`) and are called `multi-value numeric metrics aggregation`. The distinction between single-value and -multi-value numeric metrics aggregations plays a role when these aggregations serve as direct sub-aggregations of some -bucket aggregations (some bucket aggregations enable you to sort the returned buckets based on the numeric metrics in each bucket). - - -[float] -=== Bucket Aggregations - -Bucket aggregations don't calculate metrics over fields like the metrics aggregations do, but instead, they create -buckets of documents. Each bucket is associated with a criterion (depending on the aggregation type) which determines -whether or not a document in the current context "falls" into it. In other words, the buckets effectively define document -sets. In addition to the buckets themselves, the `bucket` aggregations also compute and return the number of documents -that "fell in" to each bucket. - -Bucket aggregations, as opposed to `metrics` aggregations, can hold sub-aggregations. These sub-aggregations will be -aggregated for the buckets created by their "parent" bucket aggregation. - -There are different bucket aggregators, each with a different "bucketing" strategy. Some define a single bucket, some -define fixed number of multiple buckets, and others dynamically create the buckets during the aggregation process. - -[float] -=== Reducer Aggregations - -coming[2.0.0] - -experimental[] - -Reducer aggregations work on the outputs produced from other aggregations rather than from document sets, adding -information to the output tree. There are many different types of reducer, each computing different information from -other aggregations, but these types can broken down into two families: - -_Parent_:: - A family of reducer aggregations that is provided with the output of its parent aggregation and is able - to compute new buckets or new aggregations to add to existing buckets. - -_Sibling_:: - Reducer aggregations that are provided with the output of a sibling aggregation and are able to compute a - new aggregation which will be at the same level as the sibling aggregation. - -Reducer aggregations can reference the aggregations they need to perform their computation by using the `buckets_paths` -parameter to indicate the paths to the required metrics. The syntax for defining these paths can be found in the -<> section. - -?????? SHOULD THE SECTION ABOUT DEFINING AGGREGATION PATHS -BE IN THIS PAGE AND REFERENCED FROM THE TERMS AGGREGATION DOCUMENTATION ??????? - -Reducer aggregations cannot have sub-aggregations but depending on the type it can reference another reducer in the `buckets_path` -allowing reducers to be chained. - -NOTE: Because reducer aggregations only add to the output, when chaining reducer aggregations the output of each reducer will be -included in the final output. - -[float] -=== Caching heavy aggregations - -Frequently used aggregations (e.g. for display on the home page of a website) -can be cached for faster responses. These cached results are the same results -that would be returned by an uncached aggregation -- you will never get stale -results. - -See <> for more details. - -[float] -=== Returning only aggregation results - -There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by -setting `size=0`. For example: - -[source,js] --------------------------------------------------- -$ curl -XGET 'http://localhost:9200/twitter/tweet/_search' -d '{ - "size": 0, - "aggregations": { - "my_agg": { - "terms": { - "field": "text" - } - } - } -} -' --------------------------------------------------- - -Setting `size` to `0` avoids executing the fetch phase of the search making the request more efficient. - -[float] -=== Metadata - -You can associate a piece of metadata with individual aggregations at request time that will be returned in place -at response time. - -Consider this example where we want to associate the color blue with our `terms` aggregation. - -[source,js] --------------------------------------------------- -{ - ... - aggs": { - "titles": { - "terms": { - "field": "title" - }, - "meta": { - "color": "blue" - }, - } - } -} --------------------------------------------------- - -Then that piece of metadata will be returned in place for our `titles` terms aggregation - -[source,js] --------------------------------------------------- -{ - ... - "aggregations": { - "titles": { - "meta": { - "color" : "blue" - }, - "buckets": [ - ] - } - } -} --------------------------------------------------- +-- include::aggregations/metrics.asciidoc[] @@ -232,3 +102,4 @@ include::aggregations/bucket.asciidoc[] include::aggregations/reducer.asciidoc[] +include::aggregations/misc.asciidoc[] diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc new file mode 100644 index 00000000000..2d185dd49a0 --- /dev/null +++ b/docs/reference/aggregations/bucket.asciidoc @@ -0,0 +1,49 @@ +[[search-aggregations-bucket]] +== Bucket Aggregations + +Bucket aggregations don't calculate metrics over fields like the metrics aggregations do, but instead, they create +buckets of documents. Each bucket is associated with a criterion (depending on the aggregation type) which determines +whether or not a document in the current context "falls" into it. In other words, the buckets effectively define document +sets. In addition to the buckets themselves, the `bucket` aggregations also compute and return the number of documents +that "fell in" to each bucket. + +Bucket aggregations, as opposed to `metrics` aggregations, can hold sub-aggregations. These sub-aggregations will be +aggregated for the buckets created by their "parent" bucket aggregation. + +There are different bucket aggregators, each with a different "bucketing" strategy. Some define a single bucket, some +define fixed number of multiple buckets, and others dynamically create the buckets during the aggregation process. + +include::bucket/children-aggregation.asciidoc[] + +include::bucket/datehistogram-aggregation.asciidoc[] + +include::bucket/daterange-aggregation.asciidoc[] + +include::bucket/filter-aggregation.asciidoc[] + +include::bucket/filters-aggregation.asciidoc[] + +include::bucket/geodistance-aggregation.asciidoc[] + +include::bucket/geohashgrid-aggregation.asciidoc[] + +include::bucket/global-aggregation.asciidoc[] + +include::bucket/histogram-aggregation.asciidoc[] + +include::bucket/iprange-aggregation.asciidoc[] + +include::bucket/missing-aggregation.asciidoc[] + +include::bucket/nested-aggregation.asciidoc[] + +include::bucket/range-aggregation.asciidoc[] + +include::bucket/reverse-nested-aggregation.asciidoc[] + +include::bucket/sampler-aggregation.asciidoc[] + +include::bucket/significantterms-aggregation.asciidoc[] + +include::bucket/terms-aggregation.asciidoc[] + diff --git a/docs/reference/search/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/children-aggregation.asciidoc rename to docs/reference/aggregations/bucket/children-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc rename to docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/daterange-aggregation.asciidoc b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc similarity index 97% rename from docs/reference/search/aggregations/bucket/daterange-aggregation.asciidoc rename to docs/reference/aggregations/bucket/daterange-aggregation.asciidoc index 7c5d6cc86fc..a965716269e 100644 --- a/docs/reference/search/aggregations/bucket/daterange-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc @@ -2,7 +2,7 @@ === Date Range Aggregation A range aggregation that is dedicated for date values. The main difference between this aggregation and the normal <> aggregation is that the `from` and `to` values can be expressed in <> expressions, and it is also possible to specify a date format by which the `from` and `to` response fields will be returned. -Note that this aggregration includes the `from` value and excludes the `to` value for each range. +Note that this aggregation includes the `from` value and excludes the `to` value for each range. Example: diff --git a/docs/reference/search/aggregations/bucket/filter-aggregation.asciidoc b/docs/reference/aggregations/bucket/filter-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/filter-aggregation.asciidoc rename to docs/reference/aggregations/bucket/filter-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/filters-aggregation.asciidoc b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/filters-aggregation.asciidoc rename to docs/reference/aggregations/bucket/filters-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc b/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc rename to docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/geohashgrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc similarity index 98% rename from docs/reference/search/aggregations/bucket/geohashgrid-aggregation.asciidoc rename to docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc index e74e3e96d1b..e90e1c95deb 100644 --- a/docs/reference/search/aggregations/bucket/geohashgrid-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc @@ -119,7 +119,7 @@ size:: Optional. The maximum number of geohash buckets to return prioritised based on the volumes of documents they contain. A value of `0` will return all buckets that contain a hit, use with caution as this could use a lot of CPU - and network bandwith if there are many buckets. + and network bandwidth if there are many buckets. shard_size:: Optional. To allow for more accurate counting of the top cells returned in the final result the aggregation defaults to diff --git a/docs/reference/search/aggregations/bucket/global-aggregation.asciidoc b/docs/reference/aggregations/bucket/global-aggregation.asciidoc similarity index 97% rename from docs/reference/search/aggregations/bucket/global-aggregation.asciidoc rename to docs/reference/aggregations/bucket/global-aggregation.asciidoc index fa500e1ff85..4e5addb46c1 100644 --- a/docs/reference/search/aggregations/bucket/global-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/global-aggregation.asciidoc @@ -30,7 +30,7 @@ Example: The above aggregation demonstrates how one would compute aggregations (`avg_price` in this example) on all the documents in the search context, regardless of the query (in our example, it will compute the average price over all products in our catalog, not just on the "shirts"). -The response for the above aggreation: +The response for the above aggregation: [source,js] -------------------------------------------------- diff --git a/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc similarity index 99% rename from docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc rename to docs/reference/aggregations/bucket/histogram-aggregation.asciidoc index cd1fd06ddaf..129732c0410 100644 --- a/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc @@ -160,7 +160,7 @@ Example: ==== Order -By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controled +By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled using the `order` setting. Ordering the buckets by their key - descending: diff --git a/docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc b/docs/reference/aggregations/bucket/iprange-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc rename to docs/reference/aggregations/bucket/iprange-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/missing-aggregation.asciidoc b/docs/reference/aggregations/bucket/missing-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/missing-aggregation.asciidoc rename to docs/reference/aggregations/bucket/missing-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/nested-aggregation.asciidoc b/docs/reference/aggregations/bucket/nested-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/nested-aggregation.asciidoc rename to docs/reference/aggregations/bucket/nested-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/range-aggregation.asciidoc b/docs/reference/aggregations/bucket/range-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/range-aggregation.asciidoc rename to docs/reference/aggregations/bucket/range-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/reverse-nested-aggregation.asciidoc b/docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/reverse-nested-aggregation.asciidoc rename to docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/sampler-aggregation.asciidoc b/docs/reference/aggregations/bucket/sampler-aggregation.asciidoc similarity index 97% rename from docs/reference/search/aggregations/bucket/sampler-aggregation.asciidoc rename to docs/reference/aggregations/bucket/sampler-aggregation.asciidoc index 5ad9dbc0194..29742709ea0 100644 --- a/docs/reference/search/aggregations/bucket/sampler-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/sampler-aggregation.asciidoc @@ -72,7 +72,7 @@ Response: The `shard_size` parameter limits how many top-scoring documents are collected in the sample processed on each shard. The default value is 100. -=== Controlling diversity +==== Controlling diversity Optionally, you can use the `field` or `script` and `max_docs_per_value` settings to control the maximum number of documents collected on any one shard which share a common value. The choice of value (e.g. `author`) is loaded from a regular `field` or derived dynamically by a `script`. @@ -139,16 +139,16 @@ The default setting is to use `global_ordinals` if this information is available The `bytes_hash` setting may prove faster in some cases but introduces the possibility of false positives in de-duplication logic due to the possibility of hash collisions. Please note that Elasticsearch will ignore the choice of execution hint if it is not applicable and that there is no backward compatibility guarantee on these hints. -=== Limitations +==== Limitations -==== Cannot be nested under `breadth_first` aggregations +===== Cannot be nested under `breadth_first` aggregations Being a quality-based filter the sampler aggregation needs access to the relevance score produced for each document. It therefore cannot be nested under a `terms` aggregation which has the `collect_mode` switched from the default `depth_first` mode to `breadth_first` as this discards scores. In this situation an error will be thrown. -==== Limited de-dup logic. +===== Limited de-dup logic. The de-duplication logic in the diversify settings applies only at a shard level so will not apply across shards. -==== No specialized syntax for geo/date fields +===== No specialized syntax for geo/date fields Currently the syntax for defining the diversifying values is defined by a choice of `field` or `script` - there is no added syntactical sugar for expressing geo or date units such as "1w" (1 week). This support may be added in a later release and users will currently have to create these sorts of values using a script. \ No newline at end of file diff --git a/docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc rename to docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc rename to docs/reference/aggregations/bucket/terms-aggregation.asciidoc diff --git a/docs/reference/aggregations/metrics.asciidoc b/docs/reference/aggregations/metrics.asciidoc new file mode 100644 index 00000000000..f80c36f2ebe --- /dev/null +++ b/docs/reference/aggregations/metrics.asciidoc @@ -0,0 +1,48 @@ +[[search-aggregations-metrics]] +== Metrics Aggregations + +The aggregations in this family compute metrics based on values extracted in one way or another from the documents that +are being aggregated. The values are typically extracted from the fields of the document (using the field data), but +can also be generated using scripts. + +Numeric metrics aggregations are a special type of metrics aggregation which output numeric values. Some aggregations output +a single numeric metric (e.g. `avg`) and are called `single-value numeric metrics aggregation`, others generate multiple +metrics (e.g. `stats`) and are called `multi-value numeric metrics aggregation`. The distinction between single-value and +multi-value numeric metrics aggregations plays a role when these aggregations serve as direct sub-aggregations of some +bucket aggregations (some bucket aggregations enable you to sort the returned buckets based on the numeric metrics in each bucket). + +include::metrics/avg-aggregation.asciidoc[] + +include::metrics/cardinality-aggregation.asciidoc[] + +include::metrics/extendedstats-aggregation.asciidoc[] + +include::metrics/geobounds-aggregation.asciidoc[] + +include::metrics/max-aggregation.asciidoc[] + +include::metrics/min-aggregation.asciidoc[] + +include::metrics/percentile-aggregation.asciidoc[] + +include::metrics/percentile-rank-aggregation.asciidoc[] + +include::metrics/scripted-metric-aggregation.asciidoc[] + +include::metrics/stats-aggregation.asciidoc[] + +include::metrics/sum-aggregation.asciidoc[] + +include::metrics/tophits-aggregation.asciidoc[] + +include::metrics/valuecount-aggregation.asciidoc[] + + + + + + + + + + diff --git a/docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc b/docs/reference/aggregations/metrics/avg-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/avg-aggregation.asciidoc rename to docs/reference/aggregations/metrics/avg-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/cardinality-aggregation.asciidoc rename to docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc b/docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc rename to docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc b/docs/reference/aggregations/metrics/geobounds-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc rename to docs/reference/aggregations/metrics/geobounds-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc b/docs/reference/aggregations/metrics/max-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/max-aggregation.asciidoc rename to docs/reference/aggregations/metrics/max-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc b/docs/reference/aggregations/metrics/min-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/min-aggregation.asciidoc rename to docs/reference/aggregations/metrics/min-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc b/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc rename to docs/reference/aggregations/metrics/percentile-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc b/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc rename to docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc rename to docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc b/docs/reference/aggregations/metrics/stats-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc rename to docs/reference/aggregations/metrics/stats-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc b/docs/reference/aggregations/metrics/sum-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc rename to docs/reference/aggregations/metrics/sum-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/tophits-aggregation.asciidoc rename to docs/reference/aggregations/metrics/tophits-aggregation.asciidoc diff --git a/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc b/docs/reference/aggregations/metrics/valuecount-aggregation.asciidoc similarity index 100% rename from docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc rename to docs/reference/aggregations/metrics/valuecount-aggregation.asciidoc diff --git a/docs/reference/aggregations/misc.asciidoc b/docs/reference/aggregations/misc.asciidoc new file mode 100644 index 00000000000..f494d5291c0 --- /dev/null +++ b/docs/reference/aggregations/misc.asciidoc @@ -0,0 +1,76 @@ + +[[caching-heavy-aggregations]] +== Caching heavy aggregations + +Frequently used aggregations (e.g. for display on the home page of a website) +can be cached for faster responses. These cached results are the same results +that would be returned by an uncached aggregation -- you will never get stale +results. + +See <> for more details. + +[[returning-only-agg-results]] +== Returning only aggregation results + +There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by +setting `size=0`. For example: + +[source,js] +-------------------------------------------------- +$ curl -XGET 'http://localhost:9200/twitter/tweet/_search' -d '{ + "size": 0, + "aggregations": { + "my_agg": { + "terms": { + "field": "text" + } + } + } +} +' +-------------------------------------------------- + +Setting `size` to `0` avoids executing the fetch phase of the search making the request more efficient. + +[[agg-metadata]] +== Aggregation Metadata + +You can associate a piece of metadata with individual aggregations at request time that will be returned in place +at response time. + +Consider this example where we want to associate the color blue with our `terms` aggregation. + +[source,js] +-------------------------------------------------- +{ + ... + aggs": { + "titles": { + "terms": { + "field": "title" + }, + "meta": { + "color": "blue" + }, + } + } +} +-------------------------------------------------- + +Then that piece of metadata will be returned in place for our `titles` terms aggregation + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations": { + "titles": { + "meta": { + "color" : "blue" + }, + "buckets": [ + ] + } + } +} +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/aggregations/reducer.asciidoc b/docs/reference/aggregations/reducer.asciidoc new file mode 100644 index 00000000000..2ce379cd583 --- /dev/null +++ b/docs/reference/aggregations/reducer.asciidoc @@ -0,0 +1,160 @@ +[[search-aggregations-reducer]] + +== Reducer Aggregations + +coming[2.0.0] + +experimental[] + +Reducer aggregations work on the outputs produced from other aggregations rather than from document sets, adding +information to the output tree. There are many different types of reducer, each computing different information from +other aggregations, but these types can broken down into two families: + +_Parent_:: + A family of reducer aggregations that is provided with the output of its parent aggregation and is able + to compute new buckets or new aggregations to add to existing buckets. + +_Sibling_:: + Reducer aggregations that are provided with the output of a sibling aggregation and are able to compute a + new aggregation which will be at the same level as the sibling aggregation. + +Reducer aggregations can reference the aggregations they need to perform their computation by using the `buckets_paths` +parameter to indicate the paths to the required metrics. The syntax for defining these paths can be found in the +<> section below. + +Reducer aggregations cannot have sub-aggregations but depending on the type it can reference another reducer in the `buckets_path` +allowing reducers to be chained. For example, you can chain together two derivatives to calculate the second derivative +(e.g. a derivative of a derivative). + +NOTE: Because reducer aggregations only add to the output, when chaining reducer aggregations the output of each reducer will be +included in the final output. + +[[bucket-path-syntax]] +[float] +=== `buckets_path` Syntax + +Most reducers require another aggregation as their input. The input aggregation is defined via the `buckets_path` +parameter, which follows a specific format: + +-------------------------------------------------- +AGG_SEPARATOR := '>' +METRIC_SEPARATOR := '.' +AGG_NAME := +METRIC := +PATH := []*[] +-------------------------------------------------- + +For example, the path `"my_bucket>my_stats.avg"` will path to the `avg` value in the `"my_stats"` metric, which is +contained in the `"my_bucket"` bucket aggregation. + +Paths are relative from the position of the reducer; they are not absolute paths, and the path cannot go back "up" the +aggregation tree. For example, this moving average is embedded inside a date_histogram and refers to a "sibling" +metric `"the_sum"`: + +[source,js] +-------------------------------------------------- +{ + "my_date_histo":{ + "date_histogram":{ + "field":"timestamp", + "interval":"day" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "lemmings" } <1> + }, + "the_movavg":{ + "moving_avg":{ "buckets_path": "the_sum" } <2> + } + } + } +} +-------------------------------------------------- +<1> The metric is called `"the_sum"` +<2> The `buckets_path` refers to the metric via a relative path `"the_sum"` + +`buckets_path` is also used for Sibling reducer aggregations, where the aggregation is "next" to a series of buckets +instead of embedded "inside" them. For example, the `max_bucket` aggregation uses the `buckets_path` to specify +a metric embedded inside a sibling aggregation: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + } + } + }, + "max_monthly_sales": { + "max_bucket": { + "buckets_paths": "sales_per_month>sales" <1> + } + } + } +} +-------------------------------------------------- +<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the +`sales_per_month` date histogram. + +[float] +==== Special Paths + +Instead of pathing to a metric, `buckets_path` can use a special `"_count"` path. This instructs +the reducer to use the document count as it's input. For example, a moving average can be calculated on the document +count of each bucket, instead of a specific metric: + +[source,js] +-------------------------------------------------- +{ + "my_date_histo":{ + "date_histogram":{ + "field":"timestamp", + "interval":"day" + }, + "aggs":{ + "the_movavg":{ + "moving_avg":{ "buckets_path": "_count" } <1> + } + } + } +} +-------------------------------------------------- +<1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram + + +[float] +=== Dealing with gaps in the data + +There are a couple of reasons why the data output by the enclosing histogram may have gaps: + +* There are no documents matching the query for some buckets +* The data for a metric is missing in all of the documents falling into a bucket (this is most likely with either a small interval +on the enclosing histogram or with a query matching only a small number of documents) + +Where there is no data available in a bucket for a given metric it presents a problem for calculating the derivative value for both +the current bucket and the next bucket. In the derivative reducer aggregation has a `gap policy` parameter to define what the behavior +should be when a gap in the data is found. There are currently two options for controlling the gap policy: + +_ignore_:: + This option will not produce a derivative value for any buckets where the value in the current or previous bucket is + missing + +_insert_zeros_:: + This option will assume the missing value is `0` and calculate the derivative with the value `0`. + + + + +include::reducer/derivative-aggregation.asciidoc[] +include::reducer/max-bucket-aggregation.asciidoc[] +include::reducer/min-bucket-aggregation.asciidoc[] +include::reducer/movavg-aggregation.asciidoc[] diff --git a/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc b/docs/reference/aggregations/reducer/derivative-aggregation.asciidoc similarity index 83% rename from docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc rename to docs/reference/aggregations/reducer/derivative-aggregation.asciidoc index be644091b51..17801055418 100644 --- a/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc +++ b/docs/reference/aggregations/reducer/derivative-aggregation.asciidoc @@ -5,6 +5,28 @@ A parent reducer aggregation which calculates the derivative of a specified metr aggregation. The specified metric must be numeric and the enclosing histogram must have `min_doc_count` set to `0` (default for `histogram` aggregations). +==== Syntax + +A `derivative` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "derivative": { + "buckets_path": "the_sum" + } +} +-------------------------------------------------- + +.`derivative` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|=== + + +==== First Order Derivative + The following snippet calculates the derivative of the total monthly `sales`: [source,js] @@ -82,7 +104,7 @@ And the following may be the response: <1> No derivative for the first bucket since we need at least 2 data points to calculate the derivative <2> Derivative value units are implicitly defined by the `sales` aggregation and the parent histogram so in this case the units would be $/month assuming the `price` field has units of $. -<3> The number of documents in the bucket are represented by the `doc_count` value +<3> The number of documents in the bucket are represented by the `doc_count` f ==== Second Order Derivative @@ -172,23 +194,3 @@ And the following may be the response: <1> No second derivative for the first two buckets since we need at least 2 data points from the first derivative to calculate the second derivative -==== Dealing with gaps in the data - -There are a couple of reasons why the data output by the enclosing histogram may have gaps: - -* There are no documents matching the query for some buckets -* The data for a metric is missing in all of the documents falling into a bucket (this is most likely with either a small interval -on the enclosing histogram or with a query matching only a small number of documents) - -Where there is no data available in a bucket for a given metric it presents a problem for calculating the derivative value for both -the current bucket and the next bucket. In the derivative reducer aggregation has a `gap_policy` parameter to define what the behavior -should be when a gap in the data is found. There are currently two options for controlling the gap policy: - -_ignore_:: - This option will not produce a derivative value for any buckets where the value in the current or previous bucket is - missing - -_insert_zeros_:: - This option will assume the missing value is `0` and calculate the derivative with the value `0`. - - diff --git a/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc b/docs/reference/aggregations/reducer/max-bucket-aggregation.asciidoc similarity index 81% rename from docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc rename to docs/reference/aggregations/reducer/max-bucket-aggregation.asciidoc index a93c7ed8036..939140b4a26 100644 --- a/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/reducer/max-bucket-aggregation.asciidoc @@ -1,10 +1,30 @@ [[search-aggregations-reducer-max-bucket-aggregation]] === Max Bucket Aggregation -A sibling reducer aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibing aggregation +A sibling reducer aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. +==== Syntax + +A `max_bucket` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "max_bucket": { + "buckets_path": "the_sum" + } +} +-------------------------------------------------- + +.`max_bucket` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`buckets_path` |The path to the buckets we wish to find the maximum for (see <> for more + details |Required | +|=== + The following snippet calculates the maximum of the total monthly `sales`: [source,js] @@ -32,7 +52,6 @@ The following snippet calculates the maximum of the total monthly `sales`: } } -------------------------------------------------- - <1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the `sales_per_month` date histogram. diff --git a/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc b/docs/reference/aggregations/reducer/min-bucket-aggregation.asciidoc similarity index 83% rename from docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc rename to docs/reference/aggregations/reducer/min-bucket-aggregation.asciidoc index 558d0c19983..1ea26c17a2e 100644 --- a/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/reducer/min-bucket-aggregation.asciidoc @@ -5,6 +5,26 @@ A sibling reducer aggregation which identifies the bucket(s) with the minimum va and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. +==== Syntax + +A `max_bucket` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "min_bucket": { + "buckets_path": "the_sum" + } +} +-------------------------------------------------- + +.`min_bucket` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|=== + + The following snippet calculates the minimum of the total monthly `sales`: [source,js] diff --git a/docs/reference/search/aggregations/reducer/movavg-aggregation.asciidoc b/docs/reference/aggregations/reducer/movavg-aggregation.asciidoc similarity index 93% rename from docs/reference/search/aggregations/reducer/movavg-aggregation.asciidoc rename to docs/reference/aggregations/reducer/movavg-aggregation.asciidoc index 03f6b7e9fa1..18cf98d263d 100644 --- a/docs/reference/search/aggregations/reducer/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/reducer/movavg-aggregation.asciidoc @@ -35,16 +35,14 @@ A `moving_avg` aggregation looks like this in isolation: .`moving_avg` Parameters |=== -|Parameter Name |Description |Required |Default - -|`buckets_path` |The path to the metric that we wish to calculate a moving average for |Required | +|Parameter Name |Description |Required |Default Value +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | |`model` |The moving average weighting model that we wish to use |Optional |`simple` |`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero` |`window` |The size of window to "slide" across the histogram. |Optional |`5` |`settings` |Model-specific settings, contents which differ depending on the model specified. |Optional | |=== - `moving_avg` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be embedded like any other metric aggregation: @@ -73,27 +71,9 @@ embedded like any other metric aggregation: Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally add normal metrics, such as a `sum`, inside of that histogram. Finally, the `moving_avg` is embedded inside the histogram. -The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram. +The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see +<> for a description of the syntax for `buckets_path`. -A moving average can also be calculated on the document count of each bucket, instead of a metric: - -[source,js] --------------------------------------------------- -{ - "my_date_histo":{ - "date_histogram":{ - "field":"timestamp", - "interval":"day" - }, - "aggs":{ - "the_movavg":{ - "moving_avg":{ "buckets_path": "_count" } <1> - } - } - } -} --------------------------------------------------- -<1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram ==== Models @@ -250,7 +230,7 @@ image::images/reducers_movavg/double_0.2beta.png[] .Double Exponential moving average with window of size 100, alpha = 0.5, beta = 0.7 image::images/reducers_movavg/double_0.7beta.png[] -=== Prediction +==== Prediction All the moving average model support a "prediction" mode, which will attempt to extrapolate into the future given the current smoothed, moving average. Depending on the model and parameter, these predictions may or may not be accurate. diff --git a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc index 5c778a6c83d..a7cf7136a83 100644 --- a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc @@ -18,6 +18,9 @@ filters. |`char_filter` |An optional list of logical / registered name of char filters. + +|`position_offset_gap` |An optional number of positions to increment +between each field value of a field using this analyzer. |======================================================================= Here is an example: @@ -32,6 +35,7 @@ index : tokenizer : myTokenizer1 filter : [myTokenFilter1, myTokenFilter2] char_filter : [my_html] + position_offset_gap: 256 tokenizer : myTokenizer1 : type : standard diff --git a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc index 8108614ab74..47b247e4bd5 100644 --- a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc @@ -3,7 +3,7 @@ Basic support for hunspell stemming. Hunspell dictionaries will be picked up from a dedicated hunspell directory on the filesystem -(defaults to `/hunspell`). Each dictionary is expected to +(`/hunspell`). Each dictionary is expected to have its own directory named after its associated locale (language). This dictionary directory is expected to hold a single `*.aff` and one or more `*.dic` files (all of which will automatically be picked up). @@ -19,10 +19,6 @@ following directory layout will define the `en_US` dictionary: | | |-- en_US.aff -------------------------------------------------- -The location of the hunspell directory can be configured using the -`indices.analysis.hunspell.dictionary.location` settings in -_elasticsearch.yml_. - Each dictionary can be configured with one setting: `ignore_case`:: @@ -91,9 +87,9 @@ the stemming is determined by the quality of the dictionary. [float] ==== Dictionary loading -By default, the configured (`indices.analysis.hunspell.dictionary.location`) -or default Hunspell directory (`config/hunspell/`) is checked for dictionaries -when the node starts up, and any dictionaries are automatically loaded. +By default, the default Hunspell directory (`config/hunspell/`) is checked +for dictionaries when the node starts up, and any dictionaries are +automatically loaded. Dictionary loading can be deferred until they are actually used by setting `indices.analysis.hunspell.dictionary.lazy` to `true`in the config file. diff --git a/docs/reference/index-modules/allocation.asciidoc b/docs/reference/index-modules/allocation.asciidoc index 910858f7fcd..800e4d5de5e 100644 --- a/docs/reference/index-modules/allocation.asciidoc +++ b/docs/reference/index-modules/allocation.asciidoc @@ -43,7 +43,7 @@ to be allocated to a node. This is in contrast to `include` which will include a node if ANY rule matches. The `include`, `exclude` and `require` values can have generic simple -matching wildcards, for example, `value1*`. Additonally, special attribute +matching wildcards, for example, `value1*`. Additionally, special attribute names called `_ip`, `_name`, `_id` and `_host` can be used to match by node ip address, name, id or host name, respectively. diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index b7a9969ede5..87b7e2c4f65 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -24,7 +24,7 @@ The period with no flush happening to force a flush. Defaults to `30m`. How often to check if a flush is needed, randomized between the interval value and 2x the interval value. Defaults to `5s`. -`index.gateway.local.sync`:: +`index.translog.sync_interval`:: How often the translog is ++fsync++ed to disk. Defaults to `5s`. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 1e63d18a4d2..696fbaa3bca 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -18,6 +18,8 @@ include::docs.asciidoc[] include::search.asciidoc[] +include::aggregations.asciidoc[] + include::indices.asciidoc[] include::cat.asciidoc[] diff --git a/docs/reference/migration/migrate_1_0.asciidoc b/docs/reference/migration/migrate_1_0.asciidoc index f6cfd4f92a9..aa156676e85 100644 --- a/docs/reference/migration/migrate_1_0.asciidoc +++ b/docs/reference/migration/migrate_1_0.asciidoc @@ -59,7 +59,7 @@ you now use: GET /_cluster/state/nodes --------------- -Simliarly for the `nodes_stats` API, if you want the `transport` and `http` +Similarly for the `nodes_stats` API, if you want the `transport` and `http` metrics only, instead of: [source,sh] diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index 292bb633a29..b9e84b85839 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -309,7 +309,7 @@ Fields of type `murmur3` can no longer change `doc_values` or `index` setting. They are always stored with doc values, and not indexed. ==== Source field configuration -The `_source` field no longer supports `includes` and `excludes` paramters. When +The `_source` field no longer supports `includes` and `excludes` parameters. When `_source` is enabled, the entire original source will be stored. ==== Config based mappings @@ -400,9 +400,9 @@ be used separately to control whether `routing_nodes` should be returned. === Query DSL -Change to ranking behaviour: single-term queries on numeric fields now score in the same way as string fields (use of IDF, norms if enabled). +Change to ranking behaviour: single-term queries on numeric fields now score in the same way as string fields (use of IDF, norms if enabled). Previously, term queries on numeric fields were deliberately prevented from using the usual Lucene scoring logic and this behaviour was undocumented and, to some, unexpected. -If the introduction of scoring to numeric fields is undesirable for your query clauses the fix is simple: wrap them in a `constant_score` or use a `filter` expression instead. +If the introduction of scoring to numeric fields is undesirable for your query clauses the fix is simple: wrap them in a `constant_score` or use a `filter` expression instead. The `fuzzy_like_this` and `fuzzy_like_this_field` queries have been removed. @@ -458,3 +458,29 @@ there is not enough disk space to complete this migration, the upgrade will be cancelled and can only be resumed once enough disk space is made available. The `index.store.distributor` setting has also been removed. + +=== Hunspell dictionary configuration + +The parameter `indices.analysis.hunspell.dictionary.location` has been removed, +and `/hunspell` is always used. + +=== Java API Transport API construction + +The `TransportClient` construction code has changed, it now uses the builder +pattern. Instead of using: + +[source,java] +-------------------------------------------------- +Settings settings = ImmutableSettings.settingsBuilder() + .put("cluster.name", "myClusterName").build(); +Client client = new TransportClient(settings); +-------------------------------------------------- + +Use: + +[source,java] +-------------------------------------------------- +Settings settings = ImmutableSettings.settingsBuilder() + .put("cluster.name", "myClusterName").build(); +Client client = TransportClient.builder().settings(settings).build(); +-------------------------------------------------- diff --git a/docs/reference/modules/advanced-scripting.asciidoc b/docs/reference/modules/advanced-scripting.asciidoc index 4de55531697..ba96a6ec7ab 100644 --- a/docs/reference/modules/advanced-scripting.asciidoc +++ b/docs/reference/modules/advanced-scripting.asciidoc @@ -69,7 +69,7 @@ Field statistics can be accessed with a subscript operator like this: documents. -Field statistics are computed per shard and therfore these numbers can vary +Field statistics are computed per shard and therefore these numbers can vary depending on the shard the current document resides in. The number of terms in a field cannot be accessed using the `_index` variable. See <> on how to do that. @@ -90,7 +90,7 @@ affect is your set the `index_options` to `docs` (see <>. -Additionaly, every child document is mapped to its parent using a long +Additionally, every child document is mapped to its parent using a long value (approximately). It is advisable to keep the string parent ID short in order to reduce memory usage. diff --git a/docs/reference/query-dsl/filters/has-parent-filter.asciidoc b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc index dc708cceda3..1f43c47d41d 100644 --- a/docs/reference/query-dsl/filters/has-parent-filter.asciidoc +++ b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc @@ -50,7 +50,7 @@ The `has_parent` filter also accepts a filter instead of a query: In order to support parent-child joins, all of the (string) parent IDs must be resident in memory (in the <>. -Additionaly, every child document is mapped to its parent using a long +Additionally, every child document is mapped to its parent using a long value (approximately). It is advisable to keep the string parent ID short in order to reduce memory usage. diff --git a/docs/reference/query-dsl/queries/common-terms-query.asciidoc b/docs/reference/query-dsl/queries/common-terms-query.asciidoc index 3e9a73e31dc..b9aee04e401 100644 --- a/docs/reference/query-dsl/queries/common-terms-query.asciidoc +++ b/docs/reference/query-dsl/queries/common-terms-query.asciidoc @@ -55,7 +55,7 @@ Terms are allocated to the high or low frequency groups based on the `cutoff_frequency`, which can be specified as an absolute frequency (`>=1`) or as a relative frequency (`0.0 .. 1.0`). (Remember that document frequencies are computed on a per shard level as explained in the blog post -{defguide}/relevance-is-broken.html[Relevence is broken].) +{defguide}/relevance-is-broken.html[Relevance is broken].) Perhaps the most interesting property of this query is that it adapts to domain specific stopwords automatically. For example, on a video hosting diff --git a/docs/reference/query-dsl/queries/has-child-query.asciidoc b/docs/reference/query-dsl/queries/has-child-query.asciidoc index 0fcf7873c26..f161dd74842 100644 --- a/docs/reference/query-dsl/queries/has-child-query.asciidoc +++ b/docs/reference/query-dsl/queries/has-child-query.asciidoc @@ -86,7 +86,7 @@ the `score_mode` parameter. In order to support parent-child joins, all of the (string) parent IDs must be resident in memory (in the <>. -Additionaly, every child document is mapped to its parent using a long +Additionally, every child document is mapped to its parent using a long value (approximately). It is advisable to keep the string parent ID short in order to reduce memory usage. diff --git a/docs/reference/query-dsl/queries/has-parent-query.asciidoc b/docs/reference/query-dsl/queries/has-parent-query.asciidoc index f684173a45b..3e66e6f5b0c 100644 --- a/docs/reference/query-dsl/queries/has-parent-query.asciidoc +++ b/docs/reference/query-dsl/queries/has-parent-query.asciidoc @@ -54,7 +54,7 @@ matching parent document. The score type can be specified with the In order to support parent-child joins, all of the (string) parent IDs must be resident in memory (in the <>. -Additionaly, every child document is mapped to its parent using a long +Additionally, every child document is mapped to its parent using a long value (approximately). It is advisable to keep the string parent ID short in order to reduce memory usage. diff --git a/docs/reference/query-dsl/queries/span-not-query.asciidoc b/docs/reference/query-dsl/queries/span-not-query.asciidoc index ad6fd8ca4a4..abd6ef82a20 100644 --- a/docs/reference/query-dsl/queries/span-not-query.asciidoc +++ b/docs/reference/query-dsl/queries/span-not-query.asciidoc @@ -30,7 +30,7 @@ The `include` and `exclude` clauses can be any span type query. The `exclude` clause is the span query whose matches must not overlap those returned. -In the above example all documents with the term hoya are filtered except the ones that have 'la' preceeding them. +In the above example all documents with the term hoya are filtered except the ones that have 'la' preceding them. Other top level options: diff --git a/docs/reference/query-dsl/queries/top-children-query.asciidoc b/docs/reference/query-dsl/queries/top-children-query.asciidoc index a13250094d6..4616d87676b 100644 --- a/docs/reference/query-dsl/queries/top-children-query.asciidoc +++ b/docs/reference/query-dsl/queries/top-children-query.asciidoc @@ -68,7 +68,7 @@ same scope name that will work against the child documents. For example: In order to support parent-child joins, all of the (string) parent IDs must be resident in memory (in the <>. -Additionaly, every child document is mapped to its parent using a long +Additionally, every child document is mapped to its parent using a long value (approximately). It is advisable to keep the string parent ID short in order to reduce memory usage. diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 79d3c7a93fd..b71a0dfe466 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -85,8 +85,6 @@ include::search/search-template.asciidoc[] include::search/search-shards.asciidoc[] -include::search/aggregations.asciidoc[] - include::search/facets.asciidoc[] include::search/suggesters.asciidoc[] diff --git a/docs/reference/search/aggregations/bucket.asciidoc b/docs/reference/search/aggregations/bucket.asciidoc deleted file mode 100644 index 7d7848fa1a2..00000000000 --- a/docs/reference/search/aggregations/bucket.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -[[search-aggregations-bucket]] - -include::bucket/global-aggregation.asciidoc[] - -include::bucket/filter-aggregation.asciidoc[] - -include::bucket/filters-aggregation.asciidoc[] - -include::bucket/missing-aggregation.asciidoc[] - -include::bucket/nested-aggregation.asciidoc[] - -include::bucket/reverse-nested-aggregation.asciidoc[] - -include::bucket/children-aggregation.asciidoc[] - -include::bucket/terms-aggregation.asciidoc[] - -include::bucket/significantterms-aggregation.asciidoc[] - -include::bucket/range-aggregation.asciidoc[] - -include::bucket/daterange-aggregation.asciidoc[] - -include::bucket/iprange-aggregation.asciidoc[] - -include::bucket/histogram-aggregation.asciidoc[] - -include::bucket/datehistogram-aggregation.asciidoc[] - -include::bucket/geodistance-aggregation.asciidoc[] - -include::bucket/geohashgrid-aggregation.asciidoc[] diff --git a/docs/reference/search/aggregations/metrics.asciidoc b/docs/reference/search/aggregations/metrics.asciidoc deleted file mode 100644 index 7dbbd090bbd..00000000000 --- a/docs/reference/search/aggregations/metrics.asciidoc +++ /dev/null @@ -1,27 +0,0 @@ -[[search-aggregations-metrics]] - -include::metrics/min-aggregation.asciidoc[] - -include::metrics/max-aggregation.asciidoc[] - -include::metrics/sum-aggregation.asciidoc[] - -include::metrics/avg-aggregation.asciidoc[] - -include::metrics/stats-aggregation.asciidoc[] - -include::metrics/extendedstats-aggregation.asciidoc[] - -include::metrics/valuecount-aggregation.asciidoc[] - -include::metrics/percentile-aggregation.asciidoc[] - -include::metrics/percentile-rank-aggregation.asciidoc[] - -include::metrics/cardinality-aggregation.asciidoc[] - -include::metrics/geobounds-aggregation.asciidoc[] - -include::metrics/tophits-aggregation.asciidoc[] - -include::metrics/scripted-metric-aggregation.asciidoc[] diff --git a/docs/reference/search/aggregations/reducer.asciidoc b/docs/reference/search/aggregations/reducer.asciidoc deleted file mode 100644 index a725bc77e38..00000000000 --- a/docs/reference/search/aggregations/reducer.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ -[[search-aggregations-reducer]] - -include::reducer/derivative-aggregation.asciidoc[] -include::reducer/max-bucket-aggregation.asciidoc[] -include::reducer/min-bucket-aggregation.asciidoc[] -include::reducer/movavg-aggregation.asciidoc[] diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc index c1547e8784c..729ec8ad839 100644 --- a/docs/reference/search/request/rescore.asciidoc +++ b/docs/reference/search/request/rescore.asciidoc @@ -73,7 +73,7 @@ curl -s -XPOST 'localhost:9200/_search' -d '{ ' -------------------------------------------------- -The way the scores are combined can be controled with the `score_mode`: +The way the scores are combined can be controlled with the `score_mode`: [cols="<,<",options="header",] |======================================================================= |Score Mode |Description diff --git a/docs/reference/search/request/search-type.asciidoc b/docs/reference/search/request/search-type.asciidoc index f5532f66e85..b80264e9830 100644 --- a/docs/reference/search/request/search-type.asciidoc +++ b/docs/reference/search/request/search-type.asciidoc @@ -21,10 +21,10 @@ it does not take into account term frequencies and other search engine information from the other shards. If we want to support accurate ranking, we would need to first gather the term frequencies from all shards to calculate global term frequencies, then execute the query on -each shard using these globale frequencies. +each shard using these global frequencies. Also, because of the need to sort the results, getting back a large -document set, or even scrolling it, while maintaing the correct sorting +document set, or even scrolling it, while maintaining the correct sorting behavior can be a very expensive operation. For large result set scrolling without sorting, the `scan` search type (explained below) is also available. diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index 85ae2384fb7..39d1262a050 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -192,7 +192,7 @@ Allow to sort by `_geo_distance`. Here is an example: `distance_type`:: - How to compute the distance. Can either be `sloppy_arc` (default), `arc` (slighly more precise but significantly slower) or `plane` (faster, but inaccurate on long distances and close to the poles). + How to compute the distance. Can either be `sloppy_arc` (default), `arc` (slightly more precise but significantly slower) or `plane` (faster, but inaccurate on long distances and close to the poles). Note: the geo distance sorting supports `sort_mode` options: `min`, `max` and `avg`. diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index c191e6bf129..311a9a75304 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -125,7 +125,7 @@ can contain misspellings (See parameter descriptions below). query terms a number `>=1` as an absolute number of query terms. The default is set to `1.0` which corresponds to that only corrections with at most 1 misspelled term are returned. Note that setting this too high - can negativly impact performance. Low values like `1` or `2` are recommended + can negatively impact performance. Low values like `1` or `2` are recommended otherwise the time spend in suggest calls might exceed the time spend in query execution. diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index 7d0ad80e131..eed595e25ba 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -134,7 +134,7 @@ by running `ulimit -l unlimited` as `root` before starting Elasticsearch. Another possible reason why `mlockall` can fail is that the temporary directory (usually `/tmp`) is mounted with the `noexec` option. This can be solved by -specfying a new temp directory, by starting Elasticsearch with: +specifying a new temp directory, by starting Elasticsearch with: [source,sh] -------------- diff --git a/docs/reference/setup/upgrade.asciidoc b/docs/reference/setup/upgrade.asciidoc index 9f9e745808f..c477f5ec9c7 100644 --- a/docs/reference/setup/upgrade.asciidoc +++ b/docs/reference/setup/upgrade.asciidoc @@ -121,6 +121,20 @@ This syntax applies to Elasticsearch 1.0 and later: * Repeat this process for all remaining nodes. +[IMPORTANT] +==================================================== +During a rolling upgrade, primary shards assigned to a node with the higher +version will never have their replicas assigned to a node with the lower +version, because the newer version may have a different data format which is +not understood by the older version. + +If it is not possible to assign the replica shards to another node with the +higher version -- e.g. if there is only one node with the higher version in +the cluster -- then the replica shards will remain unassigned, i.e. the +cluster health will be status `yellow`. As soon as another node with the +higher version joins the cluster, the replicas should be assigned and the +cluster health will reach status `green`. +==================================================== It may be possible to perform the upgrade by installing the new software while the service is running. This would reduce downtime by ensuring the service was ready to run on the new version as soon as it is stopped on the node being upgraded. This can be done by installing the new version in its own directory and using the symbolic link method outlined above. It is important to test this procedure first to be sure that site-specific configuration data and production indices will not be overwritten during the upgrade process. diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index d52e8804392..b1f3c7a3049 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -217,7 +217,7 @@ starts. See {GIT}9899[#9899] (STATUS; DONE, fixed in v1.5.0) Setting `zen.discovery.minimum_master_nodes` to a value higher than the current node count effectively leaves the cluster without a master and unable to process requests. The only -way to fix this is to add more master-eligibile nodes. {GIT}8321[#8321] adds a mechanism +way to fix this is to add more master-eligible nodes. {GIT}8321[#8321] adds a mechanism to validate settings before applying them, and {GIT}9051[#9051] extends this validation support to settings applied during a cluster restore. (STATUS: DONE, Fixed in v1.5.0) diff --git a/docs/ruby/persistence.asciidoc b/docs/ruby/persistence.asciidoc index 3d2d807e709..1860f0167f3 100644 --- a/docs/ruby/persistence.asciidoc +++ b/docs/ruby/persistence.asciidoc @@ -142,7 +142,7 @@ class Article # Execute code after saving the model. # - after_save { puts "Successfuly saved: #{self}" } + after_save { puts "Successfully saved: #{self}" } end ------------------------------------ @@ -215,7 +215,7 @@ Any callbacks defined in the model will be triggered during the persistence oper [source,ruby] ------------------------------------ article.save -# Successfuly saved: #
+# Successfully saved: #
------------------------------------ Please see the extensive documentation in the library diff --git a/pom.xml b/pom.xml index 432aa22744e..718e98e91ea 100644 --- a/pom.xml +++ b/pom.xml @@ -43,6 +43,7 @@ random random false + true ERROR 512m ${basedir}/logs/ @@ -635,8 +636,9 @@ ${tests.security.manager} ${tests.compatibility} true - - ${basedir}/src/main/resources/org/elasticsearch/bootstrap/security.policy + + true diff --git a/src/main/java/org/elasticsearch/action/Action.java b/src/main/java/org/elasticsearch/action/Action.java index f45e525c58f..51e3f5440ea 100644 --- a/src/main/java/org/elasticsearch/action/Action.java +++ b/src/main/java/org/elasticsearch/action/Action.java @@ -24,7 +24,7 @@ import org.elasticsearch.client.ElasticsearchClient; /** * Base action. Supports building the Request through a RequestBuilder. */ -public abstract class Action, Client extends ElasticsearchClient> +public abstract class Action> extends GenericAction { protected Action(String name) { @@ -34,5 +34,5 @@ public abstract class Action extends TransportRequest { - private boolean listenerThreaded = false; - protected ActionRequest() { super(); } @@ -43,25 +41,6 @@ public abstract class ActionRequest extends TransportRe //this.listenerThreaded = request.listenerThreaded(); } - /** - * Should the response listener be executed on a thread or not. - *

- *

When not executing on a thread, it will either be executed on the calling thread, or - * on an expensive, IO based, thread. - */ - public final boolean listenerThreaded() { - return this.listenerThreaded; - } - - /** - * Sets if the response listener be executed on a thread or not. - */ - @SuppressWarnings("unchecked") - public final T listenerThreaded(boolean listenerThreaded) { - this.listenerThreaded = listenerThreaded; - return (T) this; - } - public abstract ActionRequestValidationException validate(); @Override diff --git a/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index 4335a40e030..bbb2c508ebe 100644 --- a/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action; +import com.google.common.base.Preconditions; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.PlainListenableActionFuture; import org.elasticsearch.client.Client; @@ -26,18 +27,22 @@ import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import org.elasticsearch.threadpool.ThreadPool; /** * */ -public abstract class ActionRequestBuilder { +public abstract class ActionRequestBuilder> { + protected final Action action; protected final Request request; private final ThreadPool threadPool; - protected final Client client; + protected final ElasticsearchClient client; - protected ActionRequestBuilder(Client client, Request request) { + protected ActionRequestBuilder(ElasticsearchClient client, Action action, Request request) { + Preconditions.checkNotNull(action, "action must not be null"); + this.action = action; this.request = request; this.client = client; threadPool = client.threadPool(); @@ -48,12 +53,6 @@ public abstract class ActionRequestBuilder execute() { - PlainListenableActionFuture future = new PlainListenableActionFuture<>(request.listenerThreaded(), threadPool); + PlainListenableActionFuture future = new PlainListenableActionFuture<>(threadPool); execute(future); return future; } @@ -87,9 +86,14 @@ public abstract class ActionRequestBuilder listener) { - doExecute(listener); + public final void execute(ActionListener listener) { + client.execute(action, beforeExecute(request), listener); } - protected abstract void doExecute(ActionListener listener); + /** + * A callback to additionally process the request before its executed + */ + protected Request beforeExecute(Request request) { + return request; + } } diff --git a/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java index b2410f95827..dccf9b0fd57 100644 --- a/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java +++ b/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java @@ -24,20 +24,15 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportResponseHandler; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.*; /** * A generic proxy that will execute the given action against a specific node. */ public class TransportActionNodeProxy extends AbstractComponent { - protected final TransportService transportService; - + private final TransportService transportService; private final GenericAction action; - private final TransportRequestOptions transportOptions; @Inject @@ -48,36 +43,17 @@ public class TransportActionNodeProxy listener) { + public void execute(final DiscoveryNode node, final Request request, final ActionListener listener) { ActionRequestValidationException validationException = request.validate(); if (validationException != null) { listener.onFailure(validationException); return; } - transportService.sendRequest(node, action.name(), request, transportOptions, new BaseTransportResponseHandler() { + transportService.sendRequest(node, action.name(), request, transportOptions, new ActionListenerResponseHandler(listener) { @Override public Response newInstance() { return action.newResponse(); } - - @Override - public String executor() { - if (request.listenerThreaded()) { - return ThreadPool.Names.LISTENER; - } - return ThreadPool.Names.SAME; - } - - @Override - public void handleResponse(Response response) { - listener.onResponse(response); - } - - @Override - public void handleException(TransportException exp) { - listener.onFailure(exp); - } }); } - } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/ClusterAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/ClusterAction.java deleted file mode 100644 index 91ce66543d4..00000000000 --- a/src/main/java/org/elasticsearch/action/admin/cluster/ClusterAction.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster; - -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.client.ClusterAdminClient; - -/** - * Cluster action (used with {@link ClusterAdminClient} API). - */ -public abstract class ClusterAction> - extends Action { - - protected ClusterAction(String name) { - super(name); - } -} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java index b8ec1c70d07..9770b05bae0 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.health; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class ClusterHealthAction extends ClusterAction { +public class ClusterHealthAction extends Action { public static final ClusterHealthAction INSTANCE = new ClusterHealthAction(); public static final String NAME = "cluster:monitor/health"; @@ -39,7 +39,7 @@ public class ClusterHealthAction extends ClusterAction { +public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder { - public ClusterHealthRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new ClusterHealthRequest()); + public ClusterHealthRequestBuilder(ElasticsearchClient client, ClusterHealthAction action) { + super(client, action, new ClusterHealthRequest()); } public ClusterHealthRequestBuilder setIndices(String... indices) { @@ -86,9 +85,4 @@ public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestB request.waitForEvents(waitForEvents); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.health(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java index 64b3c3cfcd8..7010b4cb143 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class NodesHotThreadsAction extends ClusterAction { +public class NodesHotThreadsAction extends Action { public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction(); public static final String NAME = "cluster:monitor/nodes/hot_threads"; @@ -39,7 +39,7 @@ public class NodesHotThreadsAction extends ClusterAction { - public NodesHotThreadsRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new NodesHotThreadsRequest()); + public NodesHotThreadsRequestBuilder(ElasticsearchClient client, NodesHotThreadsAction action) { + super(client, action, new NodesHotThreadsRequest()); } public NodesHotThreadsRequestBuilder setThreads(int threads) { @@ -51,9 +52,4 @@ public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder< request.interval(interval); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.nodesHotThreads(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java index 47d6fae7616..adfd9481e74 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.node.info; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class NodesInfoAction extends ClusterAction { +public class NodesInfoAction extends Action { public static final NodesInfoAction INSTANCE = new NodesInfoAction(); public static final String NAME = "cluster:monitor/nodes/info"; @@ -39,7 +39,7 @@ public class NodesInfoAction extends ClusterAction { - public NodesInfoRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new NodesInfoRequest()); + public NodesInfoRequestBuilder(ElasticsearchClient client, NodesInfoAction action) { + super(client, action, new NodesInfoRequest()); } /** @@ -119,9 +118,4 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder listener) { - client.nodesInfo(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java index 252eb1cc1db..3c322e3335e 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.node.stats; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class NodesStatsAction extends ClusterAction { +public class NodesStatsAction extends Action { public static final NodesStatsAction INSTANCE = new NodesStatsAction(); public static final String NAME = "cluster:monitor/nodes/stats"; @@ -39,7 +39,7 @@ public class NodesStatsAction extends ClusterAction { - public NodesStatsRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new NodesStatsRequest()); + public NodesStatsRequestBuilder(ElasticsearchClient client, NodesStatsAction action) { + super(client, action, new NodesStatsRequest()); } /** @@ -133,9 +133,4 @@ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder listener) { - client.nodesStats(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java index 2a9be85e37e..83166753b27 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.repositories.delete; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Unregister repository action */ -public class DeleteRepositoryAction extends ClusterAction { +public class DeleteRepositoryAction extends Action { public static final DeleteRepositoryAction INSTANCE = new DeleteRepositoryAction(); public static final String NAME = "cluster:admin/repository/delete"; @@ -40,8 +40,8 @@ public class DeleteRepositoryAction extends ClusterAction { +public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder { /** * Constructs unregister repository request builder - * - * @param clusterAdminClient cluster admin client */ - public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) { - super(clusterAdminClient, new DeleteRepositoryRequest()); + public DeleteRepositoryRequestBuilder(ElasticsearchClient client, DeleteRepositoryAction action) { + super(client, action, new DeleteRepositoryRequest()); } /** * Constructs unregister repository request builder with specified repository name - * - * @param clusterAdminClient cluster adming client */ - public DeleteRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) { - super(clusterAdminClient, new DeleteRepositoryRequest(name)); + public DeleteRepositoryRequestBuilder(ElasticsearchClient client, DeleteRepositoryAction action, String name) { + super(client, action, new DeleteRepositoryRequest(name)); } /** @@ -55,9 +50,4 @@ public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.deleteRepository(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java index befe51fdeb5..c0d1d1bcde7 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.repositories.get; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Get repositories action */ -public class GetRepositoriesAction extends ClusterAction { +public class GetRepositoriesAction extends Action { public static final GetRepositoriesAction INSTANCE = new GetRepositoriesAction(); public static final String NAME = "cluster:admin/repository/get"; @@ -40,8 +40,8 @@ public class GetRepositoriesAction extends ClusterAction { +public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationRequestBuilder { /** * Creates new get repository request builder - * - * @param clusterAdminClient cluster admin client */ - public GetRepositoriesRequestBuilder(ClusterAdminClient clusterAdminClient) { - super(clusterAdminClient, new GetRepositoriesRequest()); + public GetRepositoriesRequestBuilder(ElasticsearchClient client, GetRepositoriesAction action) { + super(client, action, new GetRepositoriesRequest()); } /** * Creates new get repository request builder - * - * @param clusterAdminClient cluster admin client - * @param repositories list of repositories to get */ - public GetRepositoriesRequestBuilder(ClusterAdminClient clusterAdminClient, String... repositories) { - super(clusterAdminClient, new GetRepositoriesRequest(repositories)); + public GetRepositoriesRequestBuilder(ElasticsearchClient client, GetRepositoriesAction action, String... repositories) { + super(client, action, new GetRepositoriesRequest(repositories)); } /** @@ -69,9 +63,4 @@ public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationReques request.repositories(ObjectArrays.concat(request.repositories(), repositories, String.class)); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.getRepositories(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java index 30e9733663c..f3f8b51eeea 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.repositories.put; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Register repository action */ -public class PutRepositoryAction extends ClusterAction { +public class PutRepositoryAction extends Action { public static final PutRepositoryAction INSTANCE = new PutRepositoryAction(); public static final String NAME = "cluster:admin/repository/put"; @@ -40,8 +40,8 @@ public class PutRepositoryAction extends ClusterAction { +public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder { /** * Constructs register repository request - * - * @param clusterAdminClient cluster admin client */ - public PutRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) { - super(clusterAdminClient, new PutRepositoryRequest()); + public PutRepositoryRequestBuilder(ElasticsearchClient client, PutRepositoryAction action) { + super(client, action, new PutRepositoryRequest()); } /** * Constructs register repository request for the repository with a given name - * - * @param clusterAdminClient cluster admin client - * @param name repository name */ - public PutRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) { - super(clusterAdminClient, new PutRepositoryRequest(name)); + public PutRepositoryRequestBuilder(ElasticsearchClient client, PutRepositoryAction action, String name) { + super(client, action, new PutRepositoryRequest(name)); } /** @@ -126,9 +121,4 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.putRepository(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java index 25f1b5004cd..d52945ac28c 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.repositories.verify; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Unregister repository action */ -public class VerifyRepositoryAction extends ClusterAction { +public class VerifyRepositoryAction extends Action { public static final VerifyRepositoryAction INSTANCE = new VerifyRepositoryAction(); public static final String NAME = "cluster:admin/repository/verify"; @@ -40,8 +40,8 @@ public class VerifyRepositoryAction extends ClusterAction { +public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs unregister repository request builder - * - * @param clusterAdminClient cluster admin client */ - public VerifyRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient) { - super(clusterAdminClient, new VerifyRepositoryRequest()); + public VerifyRepositoryRequestBuilder(ElasticsearchClient client, VerifyRepositoryAction action) { + super(client, action, new VerifyRepositoryRequest()); } /** * Constructs unregister repository request builder with specified repository name - * - * @param clusterAdminClient cluster adming client */ - public VerifyRepositoryRequestBuilder(ClusterAdminClient clusterAdminClient, String name) { - super(clusterAdminClient, new VerifyRepositoryRequest(name)); + public VerifyRepositoryRequestBuilder(ElasticsearchClient client, VerifyRepositoryAction action, String name) { + super(client, action, new VerifyRepositoryRequest(name)); } /** @@ -56,9 +50,4 @@ public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBu request.name(name); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.verifyRepository(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java index d2fc5a313ec..7aa6dc25cdc 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.reroute; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class ClusterRerouteAction extends ClusterAction { +public class ClusterRerouteAction extends Action { public static final ClusterRerouteAction INSTANCE = new ClusterRerouteAction(); public static final String NAME = "cluster:admin/reroute"; @@ -39,7 +39,7 @@ public class ClusterRerouteAction extends ClusterAction { +public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder { - public ClusterRerouteRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new ClusterRerouteRequest()); + public ClusterRerouteRequestBuilder(ElasticsearchClient client, ClusterRerouteAction action) { + super(client, action, new ClusterRerouteRequest()); } /** @@ -68,9 +67,4 @@ public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.reroute(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java index 8e8a6bdb365..15b3e70dd04 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.settings; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class ClusterUpdateSettingsAction extends ClusterAction { +public class ClusterUpdateSettingsAction extends Action { public static final ClusterUpdateSettingsAction INSTANCE = new ClusterUpdateSettingsAction(); public static final String NAME = "cluster:admin/settings/update"; @@ -39,7 +39,7 @@ public class ClusterUpdateSettingsAction extends ClusterAction { +public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder { - public ClusterUpdateSettingsRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new ClusterUpdateSettingsRequest()); + public ClusterUpdateSettingsRequestBuilder(ElasticsearchClient client, ClusterUpdateSettingsAction action) { + super(client, action, new ClusterUpdateSettingsRequest()); } /** @@ -98,9 +97,4 @@ public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuil request.persistentSettings(settings); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.updateSettings(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java index 15a218fc1f1..28f7557a2e2 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.shards; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class ClusterSearchShardsAction extends ClusterAction { +public class ClusterSearchShardsAction extends Action { public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction(); public static final String NAME = "indices:admin/shards/search_shards"; @@ -39,7 +39,7 @@ public class ClusterSearchShardsAction extends ClusterAction { +public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder { - public ClusterSearchShardsRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new ClusterSearchShardsRequest()); + public ClusterSearchShardsRequestBuilder(ElasticsearchClient client, ClusterSearchShardsAction action) { + super(client, action, new ClusterSearchShardsRequest()); } /** @@ -83,10 +82,4 @@ public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRe request().indicesOptions(indicesOptions); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.searchShards(request, listener); - } - } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java index 0bbeb2d5279..9bdcd2c96bd 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.snapshots.create; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Create snapshot action */ -public class CreateSnapshotAction extends ClusterAction { +public class CreateSnapshotAction extends Action { public static final CreateSnapshotAction INSTANCE = new CreateSnapshotAction(); public static final String NAME = "cluster:admin/snapshot/create"; @@ -40,8 +40,8 @@ public class CreateSnapshotAction extends ClusterAction { +public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs a new create snapshot request builder - * - * @param clusterAdminClient cluster admin client */ - public CreateSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) { - super(clusterAdminClient, new CreateSnapshotRequest()); + public CreateSnapshotRequestBuilder(ElasticsearchClient client, CreateSnapshotAction action) { + super(client, action, new CreateSnapshotRequest()); } /** * Constructs a new create snapshot request builder with specified repository and snapshot names - * - * @param clusterAdminClient cluster admin client - * @param repository repository name - * @param snapshot snapshot name */ - public CreateSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String snapshot) { - super(clusterAdminClient, new CreateSnapshotRequest(repository, snapshot)); + public CreateSnapshotRequestBuilder(ElasticsearchClient client, CreateSnapshotAction action, String repository, String snapshot) { + super(client, action, new CreateSnapshotRequest(repository, snapshot)); } /** @@ -184,9 +178,4 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil request.includeGlobalState(includeGlobalState); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.createSnapshot(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java index 3abf3937627..8a794a0c8ff 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.snapshots.delete; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Delete snapshot action */ -public class DeleteSnapshotAction extends ClusterAction { +public class DeleteSnapshotAction extends Action { public static final DeleteSnapshotAction INSTANCE = new DeleteSnapshotAction(); public static final String NAME = "cluster:admin/snapshot/delete"; @@ -40,8 +40,8 @@ public class DeleteSnapshotAction extends ClusterAction { +public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs delete snapshot request builder - * - * @param clusterAdminClient cluster admin client */ - public DeleteSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) { - super(clusterAdminClient, new DeleteSnapshotRequest()); + public DeleteSnapshotRequestBuilder(ElasticsearchClient client, DeleteSnapshotAction action) { + super(client, action, new DeleteSnapshotRequest()); } /** * Constructs delete snapshot request builder with specified repository and snapshot names - * - * @param clusterAdminClient cluster admin client - * @param repository repository name - * @param snapshot snapshot name */ - public DeleteSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String snapshot) { - super(clusterAdminClient, new DeleteSnapshotRequest(repository, snapshot)); + public DeleteSnapshotRequestBuilder(ElasticsearchClient client, DeleteSnapshotAction action, String repository, String snapshot) { + super(client, action, new DeleteSnapshotRequest(repository, snapshot)); } /** @@ -69,9 +63,4 @@ public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuil request.snapshot(snapshot); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.deleteSnapshot(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java index 0613b3776c8..7d2cafb9ced 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Get snapshots action */ -public class GetSnapshotsAction extends ClusterAction { +public class GetSnapshotsAction extends Action { public static final GetSnapshotsAction INSTANCE = new GetSnapshotsAction(); public static final String NAME = "cluster:admin/snapshot/get"; @@ -40,8 +40,8 @@ public class GetSnapshotsAction extends ClusterAction { +public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs the new get snapshot request - * - * @param clusterAdminClient cluster admin client */ - public GetSnapshotsRequestBuilder(ClusterAdminClient clusterAdminClient) { - super(clusterAdminClient, new GetSnapshotsRequest()); + public GetSnapshotsRequestBuilder(ElasticsearchClient client, GetSnapshotsAction action) { + super(client, action, new GetSnapshotsRequest()); } /** * Constructs the new get snapshot request with specified repository - * - * @param clusterAdminClient cluster admin client - * @param repository repository name */ - public GetSnapshotsRequestBuilder(ClusterAdminClient clusterAdminClient, String repository) { - super(clusterAdminClient, new GetSnapshotsRequest(repository)); + public GetSnapshotsRequestBuilder(ElasticsearchClient client, GetSnapshotsAction action, String repository) { + super(client, action, new GetSnapshotsRequest(repository)); } /** @@ -76,7 +70,7 @@ public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilde * @return this builder */ public GetSnapshotsRequestBuilder setCurrentSnapshot() { - request.snapshots(new String[] {GetSnapshotsRequest.CURRENT_SNAPSHOT}); + request.snapshots(new String[]{GetSnapshotsRequest.CURRENT_SNAPSHOT}); return this; } @@ -90,9 +84,4 @@ public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilde request.snapshots(ObjectArrays.concat(request.snapshots(), snapshots, String.class)); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.getSnapshots(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java index 3e7cbb5e602..3836d2d7fa5 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.snapshots.restore; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Restore snapshot action */ -public class RestoreSnapshotAction extends ClusterAction { +public class RestoreSnapshotAction extends Action { public static final RestoreSnapshotAction INSTANCE = new RestoreSnapshotAction(); public static final String NAME = "cluster:admin/snapshot/restore"; @@ -40,8 +40,8 @@ public class RestoreSnapshotAction extends ClusterAction { +public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs new restore snapshot request builder - * - * @param clusterAdminClient cluster admin client */ - public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) { - super(clusterAdminClient, new RestoreSnapshotRequest()); + public RestoreSnapshotRequestBuilder(ElasticsearchClient client, RestoreSnapshotAction action) { + super(client, action, new RestoreSnapshotRequest()); } /** * Constructs new restore snapshot request builder with specified repository and snapshot names - * - * @param clusterAdminClient cluster admin client - * @param repository reposiory name - * @param name snapshot name */ - public RestoreSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String name) { - super(clusterAdminClient, new RestoreSnapshotRequest(repository, name)); + public RestoreSnapshotRequestBuilder(ElasticsearchClient client, RestoreSnapshotAction action, String repository, String name) { + super(client, action, new RestoreSnapshotRequest(repository, name)); } @@ -233,6 +226,7 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui /** * Sets index settings that should be added or replaced during restore + * * @param settings index settings * @return this builder */ @@ -243,7 +237,7 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui /** * Sets index settings that should be added or replaced during restore - + * * @param settings index settings * @return this builder */ @@ -254,7 +248,7 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui /** * Sets index settings that should be added or replaced during restore - + * * @param source index settings * @return this builder */ @@ -265,7 +259,7 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui /** * Sets index settings that should be added or replaced during restore - + * * @param source index settings * @return this builder */ @@ -290,10 +284,4 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui request.ignoreIndexSettings(ignoreIndexSettings); return this; } - - - @Override - protected void doExecute(ActionListener listener) { - client.restoreSnapshot(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java index adfea744547..0a2d26198ec 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Snapshots status action */ -public class SnapshotsStatusAction extends ClusterAction { +public class SnapshotsStatusAction extends Action { public static final SnapshotsStatusAction INSTANCE = new SnapshotsStatusAction(); public static final String NAME = "cluster:admin/snapshot/status"; @@ -40,8 +40,8 @@ public class SnapshotsStatusAction extends ClusterAction { +public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs the new snapshotstatus request - * - * @param clusterAdminClient cluster admin client */ - public SnapshotsStatusRequestBuilder(ClusterAdminClient clusterAdminClient) { - super(clusterAdminClient, new SnapshotsStatusRequest()); + public SnapshotsStatusRequestBuilder(ElasticsearchClient client, SnapshotsStatusAction action) { + super(client, action, new SnapshotsStatusRequest()); } /** * Constructs the new snapshot status request with specified repository - * - * @param clusterAdminClient cluster admin client - * @param repository repository name */ - public SnapshotsStatusRequestBuilder(ClusterAdminClient clusterAdminClient, String repository) { - super(clusterAdminClient, new SnapshotsStatusRequest(repository)); + public SnapshotsStatusRequestBuilder(ElasticsearchClient client, SnapshotsStatusAction action, String repository) { + super(client, action, new SnapshotsStatusRequest(repository)); } /** @@ -80,9 +75,4 @@ public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBui request.snapshots(ObjectArrays.concat(request.snapshots(), snapshots, String.class)); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.snapshotsStatus(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java index a339129ec09..70786f99e0d 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.state; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class ClusterStateAction extends ClusterAction { +public class ClusterStateAction extends Action { public static final ClusterStateAction INSTANCE = new ClusterStateAction(); public static final String NAME = "cluster:monitor/state"; @@ -39,7 +39,7 @@ public class ClusterStateAction extends ClusterAction { +public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder { - public ClusterStateRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new ClusterStateRequest()); + public ClusterStateRequestBuilder(ElasticsearchClient client, ClusterStateAction action) { + super(client, action, new ClusterStateRequest()); } /** @@ -94,9 +93,4 @@ public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBu request.indicesOptions(indicesOptions); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.state(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java index 34bddac0234..1cddf37ad31 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class ClusterStatsAction extends ClusterAction { +public class ClusterStatsAction extends Action { public static final ClusterStatsAction INSTANCE = new ClusterStatsAction(); public static final String NAME = "cluster:monitor/stats"; @@ -39,7 +39,7 @@ public class ClusterStatsAction extends ClusterAction { - public ClusterStatsRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new ClusterStatsRequest()); - } - - @Override - protected void doExecute(ActionListener listener) { - client.clusterStats(request, listener); + public ClusterStatsRequestBuilder(ElasticsearchClient client, ClusterStatsAction action) { + super(client, action, new ClusterStatsRequest()); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java index b074d054e60..1e3eb3f6e81 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.cluster.tasks; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class PendingClusterTasksAction extends ClusterAction { +public class PendingClusterTasksAction extends Action { public static final PendingClusterTasksAction INSTANCE = new PendingClusterTasksAction(); public static final String NAME = "cluster:monitor/task"; @@ -39,7 +39,7 @@ public class PendingClusterTasksAction extends ClusterAction { +public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder { - public PendingClusterTasksRequestBuilder(ClusterAdminClient client) { - super(client, new PendingClusterTasksRequest()); - } - - @Override - protected void doExecute(ActionListener listener) { - client.pendingClusterTasks(request, listener); + public PendingClusterTasksRequestBuilder(ElasticsearchClient client, PendingClusterTasksAction action) { + super(client, action, new PendingClusterTasksRequest()); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/IndicesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/IndicesAction.java deleted file mode 100644 index 4fae668f77e..00000000000 --- a/src/main/java/org/elasticsearch/action/admin/indices/IndicesAction.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices; - -import org.elasticsearch.action.*; -import org.elasticsearch.client.IndicesAdminClient; - -/** - * Indices action (used with {@link IndicesAdminClient} API). - */ -public abstract class IndicesAction> - extends Action { - - protected IndicesAction(String name) { - super(name); - } -} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java index c8dea4db2b9..3cf0ca574fb 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.alias; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class IndicesAliasesAction extends IndicesAction { +public class IndicesAliasesAction extends Action { public static final IndicesAliasesAction INSTANCE = new IndicesAliasesAction(); public static final String NAME = "indices:admin/aliases"; @@ -39,7 +39,7 @@ public class IndicesAliasesAction extends IndicesAction { +public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder { - public IndicesAliasesRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new IndicesAliasesRequest()); + public IndicesAliasesRequestBuilder(ElasticsearchClient client, IndicesAliasesAction action) { + super(client, action, new IndicesAliasesRequest()); } - + /** * Adds an alias to the index. * - * @param index The index - * @param alias The alias + * @param index The index + * @param alias The alias */ public IndicesAliasesRequestBuilder addAlias(String index, String alias) { request.addAlias(alias, index); return this; } - + /** * Adds an alias to the index. * * @param indices The indices - * @param alias The alias + * @param alias The alias */ public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias) { request.addAlias(alias, indices); @@ -71,32 +70,32 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder filter) { request.addAlias(alias, filter, indices); return this; } - + /** * Adds an alias to the index. * @@ -120,11 +119,11 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.aliases(request, listener); - } - + /** * Adds an alias action to the request. * diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java index 18d281eb99a..23dc1e13a56 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/AliasesExistAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.indices.alias.exists; -import org.elasticsearch.action.admin.indices.IndicesAction; +import org.elasticsearch.action.Action; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class AliasesExistAction extends IndicesAction { +public class AliasesExistAction extends Action { public static final AliasesExistAction INSTANCE = new AliasesExistAction(); public static final String NAME = "indices:admin/aliases/exists"; @@ -35,8 +35,8 @@ public class AliasesExistAction extends IndicesAction { - public AliasesExistRequestBuilder(IndicesAdminClient client, String... aliases) { - super(client, aliases); + public AliasesExistRequestBuilder(ElasticsearchClient client, AliasesExistAction action, String... aliases) { + super(client, action, aliases); } - - @Override - protected void doExecute(ActionListener listener) { - client.aliasesExist(request, listener); - } - } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java index 312a3f65d14..da7c505771c 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java @@ -20,17 +20,19 @@ package org.elasticsearch.action.admin.indices.alias.get; import com.google.common.collect.ObjectArrays; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.client.IndicesAdminClient; /** */ -public abstract class BaseAliasesRequestBuilder> extends MasterNodeReadOperationRequestBuilder { +public abstract class BaseAliasesRequestBuilder> extends MasterNodeReadOperationRequestBuilder { - public BaseAliasesRequestBuilder(IndicesAdminClient client, String... aliases) { - super(client, new GetAliasesRequest(aliases)); + public BaseAliasesRequestBuilder(ElasticsearchClient client, Action action, String... aliases) { + super(client, action, new GetAliasesRequest(aliases)); } @SuppressWarnings("unchecked") diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java index fd8acb8beba..188f72e1e34 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.alias.get; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class GetAliasesAction extends IndicesAction { +public class GetAliasesAction extends Action { public static final GetAliasesAction INSTANCE = new GetAliasesAction(); public static final String NAME = "indices:admin/aliases/get"; @@ -34,8 +34,8 @@ public class GetAliasesAction extends IndicesAction { - public GetAliasesRequestBuilder(IndicesAdminClient client, String... aliases) { - super(client, aliases); + public GetAliasesRequestBuilder(ElasticsearchClient client, GetAliasesAction action, String... aliases) { + super(client, action, aliases); } - - @Override - protected void doExecute(ActionListener listener) { - client.getAliases(request, listener); - } - } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java index b2843e71099..b29ba7507f8 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.analyze; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class AnalyzeAction extends IndicesAction { +public class AnalyzeAction extends Action { public static final AnalyzeAction INSTANCE = new AnalyzeAction(); public static final String NAME = "indices:admin/analyze"; @@ -39,7 +39,7 @@ public class AnalyzeAction extends IndicesAction { - public AnalyzeRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new AnalyzeRequest()); + public AnalyzeRequestBuilder(ElasticsearchClient client, AnalyzeAction action) { + super(client, action, new AnalyzeRequest()); } - public AnalyzeRequestBuilder(IndicesAdminClient indicesClient, String index, String text) { - super(indicesClient, new AnalyzeRequest(index).text(text)); + public AnalyzeRequestBuilder(ElasticsearchClient client, AnalyzeAction action, String index, String text) { + super(client, action, new AnalyzeRequest(index).text(text)); } /** @@ -87,9 +86,4 @@ public class AnalyzeRequestBuilder extends SingleCustomOperationRequestBuilder listener) { - client.analyze(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java index 16adc9cb1a4..0880c66802e 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.cache.clear; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class ClearIndicesCacheAction extends IndicesAction { +public class ClearIndicesCacheAction extends Action { public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction(); public static final String NAME = "indices:admin/cache/clear"; @@ -39,7 +39,7 @@ public class ClearIndicesCacheAction extends IndicesAction { +public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder { - public ClearIndicesCacheRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new ClearIndicesCacheRequest()); + public ClearIndicesCacheRequestBuilder(ElasticsearchClient client, ClearIndicesCacheAction action) { + super(client, action, new ClearIndicesCacheRequest()); } public ClearIndicesCacheRequestBuilder setFilterCache(boolean filterCache) { @@ -57,9 +55,4 @@ public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBu request.idCache(idCache); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.clearCache(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java index 01e6c3e37a3..fcb38b01da5 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.close; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class CloseIndexAction extends IndicesAction { +public class CloseIndexAction extends Action { public static final CloseIndexAction INSTANCE = new CloseIndexAction(); public static final String NAME = "indices:admin/close"; @@ -39,7 +39,7 @@ public class CloseIndexAction extends IndicesAction { +public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder { - public CloseIndexRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new CloseIndexRequest()); + public CloseIndexRequestBuilder(ElasticsearchClient client, CloseIndexAction action) { + super(client, action, new CloseIndexRequest()); } - public CloseIndexRequestBuilder(IndicesAdminClient indicesClient, String... indices) { - super(indicesClient, new CloseIndexRequest(indices)); + public CloseIndexRequestBuilder(ElasticsearchClient client, CloseIndexAction action, String... indices) { + super(client, action, new CloseIndexRequest(indices)); } /** * Sets the indices to be closed + * * @param indices the indices to be closed * @return the request itself */ @@ -58,9 +58,4 @@ public class CloseIndexRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.close(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java index 520b3d1a0a2..aa3d3ac92ad 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.create; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class CreateIndexAction extends IndicesAction { +public class CreateIndexAction extends Action { public static final CreateIndexAction INSTANCE = new CreateIndexAction(); public static final String NAME = "indices:admin/create"; @@ -39,7 +39,7 @@ public class CreateIndexAction extends IndicesAction { +public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder { - public CreateIndexRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new CreateIndexRequest()); + public CreateIndexRequestBuilder(ElasticsearchClient client, CreateIndexAction action) { + super(client, action, new CreateIndexRequest()); } - public CreateIndexRequestBuilder(IndicesAdminClient indicesClient, String index) { - super(indicesClient, new CreateIndexRequest(index)); + public CreateIndexRequestBuilder(ElasticsearchClient client, CreateIndexAction action, String index) { + super(client, action, new CreateIndexRequest(index)); } /** @@ -244,9 +243,4 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.create(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java index ac9d5eba86f..135aacc197b 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.delete; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class DeleteIndexAction extends IndicesAction { +public class DeleteIndexAction extends Action { public static final DeleteIndexAction INSTANCE = new DeleteIndexAction(); public static final String NAME = "indices:admin/delete"; @@ -39,7 +39,7 @@ public class DeleteIndexAction extends IndicesAction { +public class DeleteIndexRequestBuilder extends MasterNodeOperationRequestBuilder { - public DeleteIndexRequestBuilder(IndicesAdminClient indicesClient, String... indices) { - super(indicesClient, new DeleteIndexRequest(indices)); + public DeleteIndexRequestBuilder(ElasticsearchClient client, DeleteIndexAction action, String... indices) { + super(client, action, new DeleteIndexRequest(indices)); } /** @@ -54,16 +53,11 @@ public class DeleteIndexRequestBuilder extends MasterNodeOperationRequestBuilder /** * Specifies what type of requested indices to ignore and wildcard indices expressions. - * + *

* For example indices that don't exist. */ public DeleteIndexRequestBuilder setIndicesOptions(IndicesOptions options) { request.indicesOptions(options); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.delete(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java index 734e20e407f..b9bfa00d7a2 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.exists.indices; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class IndicesExistsAction extends IndicesAction { +public class IndicesExistsAction extends Action { public static final IndicesExistsAction INSTANCE = new IndicesExistsAction(); public static final String NAME = "indices:admin/exists"; @@ -39,7 +39,7 @@ public class IndicesExistsAction extends IndicesAction { +public class IndicesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder { - public IndicesExistsRequestBuilder(IndicesAdminClient indicesClient, String... indices) { - super(indicesClient, new IndicesExistsRequest(indices)); + public IndicesExistsRequestBuilder(ElasticsearchClient client, IndicesExistsAction action, String... indices) { + super(client, action, new IndicesExistsRequest(indices)); } public IndicesExistsRequestBuilder setIndices(String... indices) { @@ -40,16 +39,11 @@ public class IndicesExistsRequestBuilder extends MasterNodeReadOperationRequestB /** * Specifies what type of requested indices to ignore and wildcard indices expressions. - * + *

* For example indices that don't exist. */ public IndicesExistsRequestBuilder setIndicesOptions(IndicesOptions options) { request.indicesOptions(options); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.exists(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java index 9feb5568668..b7fea539d13 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsAction.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.action.admin.indices.exists.types; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class TypesExistsAction extends IndicesAction { +public class TypesExistsAction extends Action { public static final TypesExistsAction INSTANCE = new TypesExistsAction(); public static final String NAME = "indices:admin/types/exists"; @@ -38,7 +38,7 @@ public class TypesExistsAction extends IndicesAction { +public class TypesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder { /** * @param indices What indices to check for types */ - public TypesExistsRequestBuilder(IndicesAdminClient indicesClient, String... indices) { - super(indicesClient, new TypesExistsRequest(indices, Strings.EMPTY_ARRAY)); + public TypesExistsRequestBuilder(ElasticsearchClient client, TypesExistsAction action, String... indices) { + super(client, action, new TypesExistsRequest(indices, Strings.EMPTY_ARRAY)); } - TypesExistsRequestBuilder(IndicesAdminClient client) { - super(client, new TypesExistsRequest()); + TypesExistsRequestBuilder(ElasticsearchClient client, TypesExistsAction action) { + super(client, action, new TypesExistsRequest()); } /** @@ -63,9 +62,4 @@ public class TypesExistsRequestBuilder extends MasterNodeReadOperationRequestBui request.indicesOptions(indicesOptions); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.typesExists(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java index 78e46715139..4cc116fd1dd 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class FlushAction extends IndicesAction { +public class FlushAction extends Action { public static final FlushAction INSTANCE = new FlushAction(); public static final String NAME = "indices:admin/flush"; @@ -39,7 +39,7 @@ public class FlushAction extends IndicesAction { +public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { - public FlushRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new FlushRequest()); + public FlushRequestBuilder(ElasticsearchClient client, FlushAction action) { + super(client, action, new FlushRequest()); } public FlushRequestBuilder setForce(boolean force) { @@ -37,11 +36,6 @@ public class FlushRequestBuilder extends BroadcastOperationRequestBuilder listener) { - client.flush(request, listener); - } - public FlushRequestBuilder setWaitIfOngoing(boolean waitIfOngoing) { request.waitIfOngoing(waitIfOngoing); return this; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java index 6481808c616..74111c82b2f 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.get; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class GetIndexAction extends IndicesAction { +public class GetIndexAction extends Action { public static final GetIndexAction INSTANCE = new GetIndexAction(); public static final String NAME = "indices:admin/get"; @@ -34,8 +34,8 @@ public class GetIndexAction extends IndicesAction { - public GetIndexRequestBuilder(IndicesAdminClient client, String... indices) { - super(client, new GetIndexRequest().indices(indices)); + public GetIndexRequestBuilder(ElasticsearchClient client, GetIndexAction action, String... indices) { + super(client, action, new GetIndexRequest().indices(indices)); } public GetIndexRequestBuilder setFeatures(Feature... features) { @@ -42,9 +41,4 @@ public class GetIndexRequestBuilder extends ClusterInfoRequestBuilder listener) { - client.getIndex(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java index 222b99762c5..520ed7b2cd4 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.mapping.get; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class GetFieldMappingsAction extends IndicesAction { +public class GetFieldMappingsAction extends Action { public static final GetFieldMappingsAction INSTANCE = new GetFieldMappingsAction(); public static final String NAME = "indices:admin/mappings/fields/get"; @@ -34,8 +34,8 @@ public class GetFieldMappingsAction extends IndicesAction { +public class GetFieldMappingsRequestBuilder extends ActionRequestBuilder { - public GetFieldMappingsRequestBuilder(IndicesAdminClient client, String... indices) { - super(client, new GetFieldMappingsRequest().indices(indices)); + public GetFieldMappingsRequestBuilder(ElasticsearchClient client, GetFieldMappingsAction action, String... indices) { + super(client, action, new GetFieldMappingsRequest().indices(indices)); } public GetFieldMappingsRequestBuilder setIndices(String... indices) { @@ -69,10 +70,4 @@ public class GetFieldMappingsRequestBuilder extends ActionRequestBuilder listener) { - client.getFieldMappings(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java index 2ac4d58bc88..d7dbebc600c 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.mapping.get; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class GetMappingsAction extends IndicesAction { +public class GetMappingsAction extends Action { public static final GetMappingsAction INSTANCE = new GetMappingsAction(); public static final String NAME = "indices:admin/mappings/get"; @@ -34,8 +34,8 @@ public class GetMappingsAction extends IndicesAction { - public GetMappingsRequestBuilder(IndicesAdminClient client, String... indices) { - super(client, new GetMappingsRequest().indices(indices)); - } - - @Override - protected void doExecute(ActionListener listener) { - client.getMappings(request, listener); + public GetMappingsRequestBuilder(ElasticsearchClient client, GetMappingsAction action, String... indices) { + super(client, action, new GetMappingsRequest().indices(indices)); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index ee4a6d6f076..3c8d1b7affa 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -64,8 +64,6 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction() { @Override public void onResponse(GetFieldMappingsResponse result) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java index 01bfccdb46c..5ed79ceca98 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.mapping.put; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class PutMappingAction extends IndicesAction { +public class PutMappingAction extends Action { public static final PutMappingAction INSTANCE = new PutMappingAction(); public static final String NAME = "indices:admin/mapping/put"; @@ -39,7 +39,7 @@ public class PutMappingAction extends IndicesAction { +public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder { - public PutMappingRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new PutMappingRequest()); + public PutMappingRequestBuilder(ElasticsearchClient client, PutMappingAction action) { + super(client, action, new PutMappingRequest()); } public PutMappingRequestBuilder setIndices(String... indices) { @@ -43,7 +42,7 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder * For example indices that don't exist. */ public PutMappingRequestBuilder setIndicesOptions(IndicesOptions options) { @@ -101,9 +100,4 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.putMapping(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java index 09bd6ff8f5a..c12e8d23c9c 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.open; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class OpenIndexAction extends IndicesAction { +public class OpenIndexAction extends Action { public static final OpenIndexAction INSTANCE = new OpenIndexAction(); public static final String NAME = "indices:admin/open"; @@ -39,7 +39,7 @@ public class OpenIndexAction extends IndicesAction { +public class OpenIndexRequestBuilder extends AcknowledgedRequestBuilder { - public OpenIndexRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new OpenIndexRequest()); + public OpenIndexRequestBuilder(ElasticsearchClient client, OpenIndexAction action) { + super(client, action, new OpenIndexRequest()); } - public OpenIndexRequestBuilder(IndicesAdminClient indicesClient, String... indices) { - super(indicesClient, new OpenIndexRequest(indices)); + public OpenIndexRequestBuilder(ElasticsearchClient client, OpenIndexAction action, String... indices) { + super(client, action, new OpenIndexRequest(indices)); } /** * Sets the indices to be opened + * * @param indices the indices to be opened * @return the request itself */ @@ -58,9 +58,4 @@ public class OpenIndexRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.open(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeAction.java index 65d2ce43f01..b44d372f7c9 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.optimize; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class OptimizeAction extends IndicesAction { +public class OptimizeAction extends Action { public static final OptimizeAction INSTANCE = new OptimizeAction(); public static final String NAME = "indices:admin/optimize"; @@ -39,7 +39,7 @@ public class OptimizeAction extends IndicesAction{@link #setMaxNumSegments(int)} allows to control the number of segments to optimize down to. By default, will * cause the optimize process to optimize down to half the configured number of segments. */ -public class OptimizeRequestBuilder extends BroadcastOperationRequestBuilder { +public class OptimizeRequestBuilder extends BroadcastOperationRequestBuilder { - public OptimizeRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new OptimizeRequest()); + public OptimizeRequestBuilder(ElasticsearchClient client, OptimizeAction action) { + super(client, action, new OptimizeRequest()); } /** @@ -61,9 +60,4 @@ public class OptimizeRequestBuilder extends BroadcastOperationRequestBuilder listener) { - client.optimize(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java b/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java index 1b210c004a0..a8848a23edc 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.indices.recovery; -import org.elasticsearch.client.IndicesAdminClient; -import org.elasticsearch.action.admin.indices.IndicesAction; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Recovery information action */ -public class RecoveryAction extends IndicesAction { +public class RecoveryAction extends Action { public static final RecoveryAction INSTANCE = new RecoveryAction(); public static final String NAME = "indices:monitor/recovery"; @@ -35,8 +35,8 @@ public class RecoveryAction extends IndicesAction { +public class RecoveryRequestBuilder extends BroadcastOperationRequestBuilder { /** * Constructs a new recovery information request builder. - * - * @param indicesClient Indices admin client */ - public RecoveryRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new RecoveryRequest()); - } - - @Override - protected void doExecute(ActionListener listener) { - client.recoveries(request, listener); + public RecoveryRequestBuilder(ElasticsearchClient client, RecoveryAction action) { + super(client, action, new RecoveryRequest()); } public RecoveryRequestBuilder setDetailed(boolean detailed) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java index 985de9fd85d..79db06ec3f0 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.refresh; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class RefreshAction extends IndicesAction { +public class RefreshAction extends Action { public static final RefreshAction INSTANCE = new RefreshAction(); public static final String NAME = "indices:admin/refresh"; @@ -39,7 +39,7 @@ public class RefreshAction extends IndicesAction { +public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder { - public RefreshRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new RefreshRequest()); - } - - @Override - protected void doExecute(ActionListener listener) { - client.refresh(request, listener); + public RefreshRequestBuilder(ElasticsearchClient client, RefreshAction action) { + super(client, action, new RefreshRequest()); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java index 80a01d8ae3c..d98bc56aa5c 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.segments; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class IndicesSegmentsAction extends IndicesAction { +public class IndicesSegmentsAction extends Action { public static final IndicesSegmentsAction INSTANCE = new IndicesSegmentsAction(); public static final String NAME = "indices:monitor/segments"; @@ -39,7 +39,7 @@ public class IndicesSegmentsAction extends IndicesAction { +public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder { - public IndicesSegmentsRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new IndicesSegmentsRequest()); + public IndicesSegmentsRequestBuilder(ElasticsearchClient client, IndicesSegmentsAction action) { + super(client, action, new IndicesSegmentsRequest()); } - + public IndicesSegmentsRequestBuilder setVerbose(boolean verbose) { request.verbose = verbose; return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.segments(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java index 0b729bd7999..448a5e25fbd 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.settings.get; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class GetSettingsAction extends IndicesAction { +public class GetSettingsAction extends Action { public static final GetSettingsAction INSTANCE = new GetSettingsAction(); public static final String NAME = "indices:monitor/settings/get"; @@ -34,8 +34,8 @@ public class GetSettingsAction extends IndicesAction { +public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBuilder { - public GetSettingsRequestBuilder(IndicesAdminClient client, String... indices) { - super(client, new GetSettingsRequest().indices(indices)); + public GetSettingsRequestBuilder(ElasticsearchClient client, GetSettingsAction action, String... indices) { + super(client, action, new GetSettingsRequest().indices(indices)); } public GetSettingsRequestBuilder setIndices(String... indices) { @@ -45,7 +44,7 @@ public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBui /** * Specifies what type of requested indices to ignore and wildcard indices expressions. - * + *

* For example indices that don't exist. */ public GetSettingsRequestBuilder setIndicesOptions(IndicesOptions options) { @@ -57,9 +56,4 @@ public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBui request.names(names); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.getSettings(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java index df67115bb3a..17001b7376d 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.settings.put; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class UpdateSettingsAction extends IndicesAction { +public class UpdateSettingsAction extends Action { public static final UpdateSettingsAction INSTANCE = new UpdateSettingsAction(); public static final String NAME = "indices:admin/settings/update"; @@ -39,7 +39,7 @@ public class UpdateSettingsAction extends IndicesAction { +public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder { - public UpdateSettingsRequestBuilder(IndicesAdminClient indicesClient, String... indices) { - super(indicesClient, new UpdateSettingsRequest(indices)); + public UpdateSettingsRequestBuilder(ElasticsearchClient client, UpdateSettingsAction action, String... indices) { + super(client, action, new UpdateSettingsRequest(indices)); } /** @@ -46,7 +46,7 @@ public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder * For example indices that don't exist. */ public UpdateSettingsRequestBuilder setIndicesOptions(IndicesOptions options) { @@ -85,9 +85,4 @@ public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.updateSettings(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java index 19b4f5ceca3..d83e368b216 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class IndicesStatsAction extends IndicesAction { +public class IndicesStatsAction extends Action { public static final IndicesStatsAction INSTANCE = new IndicesStatsAction(); public static final String NAME = "indices:monitor/stats"; @@ -39,7 +39,7 @@ public class IndicesStatsAction extends IndicesActionAll the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. */ -public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder { +public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder { - public IndicesStatsRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new IndicesStatsRequest()); + public IndicesStatsRequestBuilder(ElasticsearchClient client, IndicesStatsAction action) { + super(client, action, new IndicesStatsRequest()); } /** @@ -172,9 +171,4 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder request.recovery(recovery); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.stats(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java index e826b76f971..570ced293d8 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.template.delete; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class DeleteIndexTemplateAction extends IndicesAction { +public class DeleteIndexTemplateAction extends Action { public static final DeleteIndexTemplateAction INSTANCE = new DeleteIndexTemplateAction(); public static final String NAME = "indices:admin/template/delete"; @@ -39,7 +39,7 @@ public class DeleteIndexTemplateAction extends IndicesAction { +public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder { - public DeleteIndexTemplateRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new DeleteIndexTemplateRequest()); + public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, DeleteIndexTemplateAction action) { + super(client, action, new DeleteIndexTemplateRequest()); } - public DeleteIndexTemplateRequestBuilder(IndicesAdminClient indicesClient, String name) { - super(indicesClient, new DeleteIndexTemplateRequest(name)); - } - - @Override - protected void doExecute(ActionListener listener) { - client.deleteTemplate(request, listener); + public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, DeleteIndexTemplateAction action, String name) { + super(client, action, new DeleteIndexTemplateRequest(name)); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java index de3ded1a64e..b4db5e0529f 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesAction.java @@ -18,13 +18,13 @@ */ package org.elasticsearch.action.admin.indices.template.get; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * */ -public class GetIndexTemplatesAction extends IndicesAction { +public class GetIndexTemplatesAction extends Action { public static final GetIndexTemplatesAction INSTANCE = new GetIndexTemplatesAction(); public static final String NAME = "indices:admin/template/get"; @@ -39,7 +39,7 @@ public class GetIndexTemplatesAction extends IndicesAction { +public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder { - public GetIndexTemplatesRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new GetIndexTemplatesRequest()); + public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTemplatesAction action) { + super(client, action, new GetIndexTemplatesRequest()); } - public GetIndexTemplatesRequestBuilder(IndicesAdminClient indicesClient, String... names) { - super(indicesClient, new GetIndexTemplatesRequest(names)); - } - - @Override - protected void doExecute(ActionListener listener) { - client.getTemplates(request, listener); + public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTemplatesAction action, String... names) { + super(client, action, new GetIndexTemplatesRequest(names)); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java index 00f98bc46b6..51adc0b5cfc 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.template.put; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class PutIndexTemplateAction extends IndicesAction { +public class PutIndexTemplateAction extends Action { public static final PutIndexTemplateAction INSTANCE = new PutIndexTemplateAction(); public static final String NAME = "indices:admin/template/put"; @@ -39,7 +39,7 @@ public class PutIndexTemplateAction extends IndicesAction { +public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder { - public PutIndexTemplateRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new PutIndexTemplateRequest()); + public PutIndexTemplateRequestBuilder(ElasticsearchClient client, PutIndexTemplateAction action) { + super(client, action, new PutIndexTemplateRequest()); } - public PutIndexTemplateRequestBuilder(IndicesAdminClient indicesClient, String name) { - super(indicesClient, new PutIndexTemplateRequest(name)); + public PutIndexTemplateRequestBuilder(ElasticsearchClient client, PutIndexTemplateAction action, String name) { + super(client, action, new PutIndexTemplateRequest(name)); } /** @@ -153,7 +153,7 @@ public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBu /** * Adds an alias that will be added when the index template gets created. * - * @param alias The alias + * @param alias The alias * @return the request builder */ public PutIndexTemplateRequestBuilder addAlias(Alias alias) { @@ -238,9 +238,4 @@ public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBu request.source(templateSource, offset, length); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.putTemplate(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java index 76bb9523183..fdec5490c97 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class ValidateQueryAction extends IndicesAction { +public class ValidateQueryAction extends Action { public static final ValidateQueryAction INSTANCE = new ValidateQueryAction(); public static final String NAME = "indices:admin/validate/query"; @@ -39,7 +39,7 @@ public class ValidateQueryAction extends IndicesAction { +public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder { private QuerySourceBuilder sourceBuilder; - public ValidateQueryRequestBuilder(IndicesAdminClient client) { - super(client, new ValidateQueryRequest()); + public ValidateQueryRequestBuilder(ElasticsearchClient client, ValidateQueryAction action) { + super(client, action, new ValidateQueryRequest()); } /** @@ -94,12 +94,11 @@ public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilde } @Override - protected void doExecute(ActionListener listener) { + protected ValidateQueryRequest beforeExecute(ValidateQueryRequest request) { if (sourceBuilder != null) { request.source(sourceBuilder); } - - client.validateQuery(request, listener); + return request; } private QuerySourceBuilder sourceBuilder() { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java index 9c3e4a001a0..86c447d3ca4 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/DeleteWarmerAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.indices.warmer.delete; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Action for the admin/warmers/delete API. */ -public class DeleteWarmerAction extends IndicesAction { +public class DeleteWarmerAction extends Action { public static final DeleteWarmerAction INSTANCE = new DeleteWarmerAction(); public static final String NAME = "indices:admin/warmers/delete"; @@ -40,7 +40,7 @@ public class DeleteWarmerAction extends IndicesAction { +public class DeleteWarmerRequestBuilder extends AcknowledgedRequestBuilder { - public DeleteWarmerRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new DeleteWarmerRequest()); + public DeleteWarmerRequestBuilder(ElasticsearchClient client, DeleteWarmerAction action) { + super(client, action, new DeleteWarmerRequest()); } public DeleteWarmerRequestBuilder setIndices(String... indices) { @@ -50,16 +50,11 @@ public class DeleteWarmerRequestBuilder extends AcknowledgedRequestBuilder * For example indices that don't exist. */ public DeleteWarmerRequestBuilder setIndicesOptions(IndicesOptions options) { request.indicesOptions(options); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.deleteWarmer(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java index 4170509b904..e2debde72a6 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.indices.warmer.get; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Action for the admin/warmers/get API. */ -public class GetWarmersAction extends IndicesAction { +public class GetWarmersAction extends Action { public static final GetWarmersAction INSTANCE = new GetWarmersAction(); public static final String NAME = "indices:admin/warmers/get"; @@ -35,8 +35,8 @@ public class GetWarmersAction extends IndicesAction { - public GetWarmersRequestBuilder(IndicesAdminClient client, String... indices) { - super(client, new GetWarmersRequest().indices(indices)); + public GetWarmersRequestBuilder(ElasticsearchClient client, GetWarmersAction action, String... indices) { + super(client, action, new GetWarmersRequest().indices(indices)); } public GetWarmersRequestBuilder setWarmers(String... warmers) { @@ -44,9 +43,4 @@ public class GetWarmersRequestBuilder extends ClusterInfoRequestBuilder listener) { - client.getWarmers(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java index 4b295286de3..3c5c8b7c412 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.admin.indices.warmer.put; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Action for the admin/warmers/put API. */ -public class PutWarmerAction extends IndicesAction { +public class PutWarmerAction extends Action { public static final PutWarmerAction INSTANCE = new PutWarmerAction(); public static final String NAME = "indices:admin/warmers/put"; @@ -40,7 +40,7 @@ public class PutWarmerAction extends IndicesAction { +public class PutWarmerRequestBuilder extends AcknowledgedRequestBuilder { /** * Creates a new {@link PutWarmerRequestBuilder} with a given name. */ - public PutWarmerRequestBuilder(IndicesAdminClient indicesClient, String name) { - super(indicesClient, new PutWarmerRequest().name(name)); + public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action, String name) { + super(client, action, new PutWarmerRequest().name(name)); } /** * Creates a new {@link PutWarmerRequestBuilder} * Note: {@link #setName(String)} must be called with a non-null value before this request is executed. */ - public PutWarmerRequestBuilder(IndicesAdminClient indicesClient) { - super(indicesClient, new PutWarmerRequest()); + public PutWarmerRequestBuilder(ElasticsearchClient client, PutWarmerAction action) { + super(client, action, new PutWarmerRequest()); } /** @@ -69,9 +69,4 @@ public class PutWarmerRequestBuilder extends AcknowledgedRequestBuilder listener) { - client.putWarmer(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkAction.java b/src/main/java/org/elasticsearch/action/bulk/BulkAction.java index e7a8ea13de8..42d0c22508b 100644 --- a/src/main/java/org/elasticsearch/action/bulk/BulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/BulkAction.java @@ -19,14 +19,14 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.transport.TransportRequestOptions; /** */ -public class BulkAction extends ClientAction { +public class BulkAction extends Action { public static final BulkAction INSTANCE = new BulkAction(); public static final String NAME = "indices:data/write/bulk"; @@ -41,8 +41,8 @@ public class BulkAction extends ClientAction { +public class BulkRequestBuilder extends ActionRequestBuilder { - public BulkRequestBuilder(Client client) { - super(client, new BulkRequest()); + public BulkRequestBuilder(ElasticsearchClient client, BulkAction action) { + super(client, action, new BulkRequest()); } /** @@ -149,9 +148,4 @@ public class BulkRequestBuilder extends ActionRequestBuilder listener) { - client.bulk(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/count/CountAction.java b/src/main/java/org/elasticsearch/action/count/CountAction.java index 319ca5366d5..4c7c8a2fcc2 100644 --- a/src/main/java/org/elasticsearch/action/count/CountAction.java +++ b/src/main/java/org/elasticsearch/action/count/CountAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.count; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class CountAction extends ClientAction { +public class CountAction extends Action { public static final CountAction INSTANCE = new CountAction(); public static final String NAME = "indices:data/read/count"; @@ -39,7 +39,7 @@ public class CountAction extends ClientAction { +public class CountRequestBuilder extends BroadcastOperationRequestBuilder { private QuerySourceBuilder sourceBuilder; - public CountRequestBuilder(Client client) { - super(client, new CountRequest()); + public CountRequestBuilder(ElasticsearchClient client, CountAction action) { + super(client, action, new CountRequest()); } /** @@ -101,7 +100,7 @@ public class CountRequestBuilder extends BroadcastOperationRequestBuilder listener) { + protected CountRequest beforeExecute(CountRequest request) { if (sourceBuilder != null) { request.source(sourceBuilder); } - - client.count(request, listener); + return request; } private QuerySourceBuilder sourceBuilder() { @@ -155,7 +153,7 @@ public class CountRequestBuilder extends BroadcastOperationRequestBuilder { +public class DeleteAction extends Action { public static final DeleteAction INSTANCE = new DeleteAction(); public static final String NAME = "indices:data/write/delete"; @@ -39,7 +39,7 @@ public class DeleteAction extends ClientAction { - public DeleteRequestBuilder(Client client) { - super(client, new DeleteRequest()); + public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action) { + super(client, action, new DeleteRequest()); } - public DeleteRequestBuilder(Client client, @Nullable String index) { - super(client, new DeleteRequest(index)); + public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action, @Nullable String index) { + super(client, action, new DeleteRequest(index)); } /** @@ -98,9 +97,4 @@ public class DeleteRequestBuilder extends ShardReplicationOperationRequestBuilde request.versionType(versionType); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.delete(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/exists/ExistsAction.java b/src/main/java/org/elasticsearch/action/exists/ExistsAction.java index e37f81c5e44..d4463aea0d6 100644 --- a/src/main/java/org/elasticsearch/action/exists/ExistsAction.java +++ b/src/main/java/org/elasticsearch/action/exists/ExistsAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.exists; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; -public class ExistsAction extends ClientAction { +public class ExistsAction extends Action { public static final ExistsAction INSTANCE = new ExistsAction(); public static final String NAME = "indices:data/read/exists"; @@ -37,7 +37,7 @@ public class ExistsAction extends ClientAction { - +public class ExistsRequestBuilder extends BroadcastOperationRequestBuilder { private QuerySourceBuilder sourceBuilder; - public ExistsRequestBuilder(Client client) { - super(client, new ExistsRequest()); + public ExistsRequestBuilder(ElasticsearchClient client, ExistsAction action) { + super(client, action, new ExistsRequest()); } /** @@ -104,12 +102,11 @@ public class ExistsRequestBuilder extends BroadcastOperationRequestBuilder listener) { + protected ExistsRequest beforeExecute(ExistsRequest request) { if (sourceBuilder != null) { request.source(sourceBuilder); } - - client.exists(request, listener); + return request; } private QuerySourceBuilder sourceBuilder() { diff --git a/src/main/java/org/elasticsearch/action/explain/ExplainAction.java b/src/main/java/org/elasticsearch/action/explain/ExplainAction.java index 3d7645db09b..b48530ce527 100644 --- a/src/main/java/org/elasticsearch/action/explain/ExplainAction.java +++ b/src/main/java/org/elasticsearch/action/explain/ExplainAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.explain; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** * Entry point for the explain feature. */ -public class ExplainAction extends ClientAction { +public class ExplainAction extends Action { public static final ExplainAction INSTANCE = new ExplainAction(); public static final String NAME = "indices:data/read/explain"; @@ -35,8 +35,8 @@ public class ExplainAction extends ClientAction listener) { + protected ExplainRequest beforeExecute(ExplainRequest request) { if (sourceBuilder != null) { request.source(sourceBuilder); } - - client.explain(request, listener); + return request; } private QuerySourceBuilder sourceBuilder() { diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java index fb4a3f77833..085952c9be6 100644 --- a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.fieldstats; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class FieldStatsAction extends ClientAction { +public class FieldStatsAction extends Action { public static final FieldStatsAction INSTANCE = new FieldStatsAction(); public static final String NAME = "indices:data/read/field_stats"; @@ -39,7 +39,7 @@ public class FieldStatsAction extends ClientAction { +public class FieldStatsRequestBuilder extends BroadcastOperationRequestBuilder { - public FieldStatsRequestBuilder(Client client) { - super(client, new FieldStatsRequest()); + public FieldStatsRequestBuilder(ElasticsearchClient client, FieldStatsAction action) { + super(client, action, new FieldStatsRequest()); } public FieldStatsRequestBuilder setFields(String... fields) { @@ -40,9 +39,4 @@ public class FieldStatsRequestBuilder extends BroadcastOperationRequestBuilder listener) { - client.fieldStats(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/get/GetAction.java b/src/main/java/org/elasticsearch/action/get/GetAction.java index 93e4f26e150..eb499ffb8c2 100644 --- a/src/main/java/org/elasticsearch/action/get/GetAction.java +++ b/src/main/java/org/elasticsearch/action/get/GetAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.get; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class GetAction extends ClientAction { +public class GetAction extends Action { public static final GetAction INSTANCE = new GetAction(); public static final String NAME = "indices:data/read/get"; @@ -39,7 +39,7 @@ public class GetAction extends ClientAction { - public GetRequestBuilder(Client client) { - super(client, new GetRequest()); + public GetRequestBuilder(ElasticsearchClient client, GetAction action) { + super(client, action, new GetRequest()); } - public GetRequestBuilder(Client client, @Nullable String index) { - super(client, new GetRequest(index)); + public GetRequestBuilder(ElasticsearchClient client, GetAction action, @Nullable String index) { + super(client, action, new GetRequest(index)); } /** @@ -96,14 +95,14 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder listener) { - client.get(request, listener); - } - - } diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetAction.java b/src/main/java/org/elasticsearch/action/get/MultiGetAction.java index 0a15892a057..38036d391e8 100644 --- a/src/main/java/org/elasticsearch/action/get/MultiGetAction.java +++ b/src/main/java/org/elasticsearch/action/get/MultiGetAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.get; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class MultiGetAction extends ClientAction { +public class MultiGetAction extends Action { public static final MultiGetAction INSTANCE = new MultiGetAction(); public static final String NAME = "indices:data/read/mget"; @@ -39,7 +39,7 @@ public class MultiGetAction extends ClientAction { +public class MultiGetRequestBuilder extends ActionRequestBuilder { - public MultiGetRequestBuilder(Client client) { - super(client, new MultiGetRequest()); + public MultiGetRequestBuilder(ElasticsearchClient client, MultiGetAction action) { + super(client, action, new MultiGetRequest()); } public MultiGetRequestBuilder add(String index, @Nullable String type, String id) { @@ -86,9 +85,4 @@ public class MultiGetRequestBuilder extends ActionRequestBuilder listener) { - client.multiGet(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/index/IndexAction.java b/src/main/java/org/elasticsearch/action/index/IndexAction.java index 1f6e5a5c8ea..ceacbcf2df7 100644 --- a/src/main/java/org/elasticsearch/action/index/IndexAction.java +++ b/src/main/java/org/elasticsearch/action/index/IndexAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.index; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class IndexAction extends ClientAction { +public class IndexAction extends Action { public static final IndexAction INSTANCE = new IndexAction(); public static final String NAME = "indices:data/write/index"; @@ -39,7 +39,7 @@ public class IndexAction extends ClientAction { - public IndexRequestBuilder(Client client) { - super(client, new IndexRequest()); + public IndexRequestBuilder(ElasticsearchClient client, IndexAction action) { + super(client, action, new IndexRequest()); } - public IndexRequestBuilder(Client client, @Nullable String index) { - super(client, new IndexRequest(index)); + public IndexRequestBuilder(ElasticsearchClient client, IndexAction action, @Nullable String index) { + super(client, action, new IndexRequest(index)); } /** @@ -180,7 +179,7 @@ public class IndexRequestBuilder extends ShardReplicationOperationRequestBuilder /** * Constructs a simple document with a field name and value pairs. - * Note: the number of objects passed to this method must be an even number. + * Note: the number of objects passed to this method must be an even number. */ public IndexRequestBuilder setSource(Object... source) { request.source(source); @@ -260,9 +259,4 @@ public class IndexRequestBuilder extends ShardReplicationOperationRequestBuilder request.ttl(ttl); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.index(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/indexedscripts/delete/DeleteIndexedScriptAction.java b/src/main/java/org/elasticsearch/action/indexedscripts/delete/DeleteIndexedScriptAction.java index 94009796b19..4ccbdf7801d 100644 --- a/src/main/java/org/elasticsearch/action/indexedscripts/delete/DeleteIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/action/indexedscripts/delete/DeleteIndexedScriptAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.indexedscripts.delete; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class DeleteIndexedScriptAction extends ClientAction { +public class DeleteIndexedScriptAction extends Action { public static final DeleteIndexedScriptAction INSTANCE = new DeleteIndexedScriptAction(); public static final String NAME = "indices:data/write/script/delete"; @@ -39,7 +39,7 @@ public class DeleteIndexedScriptAction extends ClientAction { +public class DeleteIndexedScriptRequestBuilder extends ActionRequestBuilder { - public DeleteIndexedScriptRequestBuilder(Client client) { - super(client, new DeleteIndexedScriptRequest()); + public DeleteIndexedScriptRequestBuilder(ElasticsearchClient client, DeleteIndexedScriptAction action) { + super(client, action, new DeleteIndexedScriptRequest()); } /** @@ -56,9 +55,4 @@ public class DeleteIndexedScriptRequestBuilder extends ActionRequestBuilder listener) { - client.deleteIndexedScript(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/indexedscripts/get/GetIndexedScriptAction.java b/src/main/java/org/elasticsearch/action/indexedscripts/get/GetIndexedScriptAction.java index a909b78d289..898d33691f6 100644 --- a/src/main/java/org/elasticsearch/action/indexedscripts/get/GetIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/action/indexedscripts/get/GetIndexedScriptAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.indexedscripts.get; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class GetIndexedScriptAction extends ClientAction { +public class GetIndexedScriptAction extends Action { public static final GetIndexedScriptAction INSTANCE = new GetIndexedScriptAction(); public static final String NAME = "indices:data/read/script/get"; @@ -39,8 +39,8 @@ public class GetIndexedScriptAction extends ClientAction { +public class GetIndexedScriptRequestBuilder extends ActionRequestBuilder { - public GetIndexedScriptRequestBuilder(Client client) { - super(client, new GetIndexedScriptRequest()); + public GetIndexedScriptRequestBuilder(ElasticsearchClient client, GetIndexedScriptAction action) { + super(client, action, new GetIndexedScriptRequest()); } /** @@ -68,10 +67,4 @@ public class GetIndexedScriptRequestBuilder extends ActionRequestBuilder listener) { - client.getIndexedScript(request, listener); - } - } diff --git a/src/main/java/org/elasticsearch/action/indexedscripts/put/PutIndexedScriptAction.java b/src/main/java/org/elasticsearch/action/indexedscripts/put/PutIndexedScriptAction.java index 3949e05af18..e0f364b0ad1 100644 --- a/src/main/java/org/elasticsearch/action/indexedscripts/put/PutIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/action/indexedscripts/put/PutIndexedScriptAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.indexedscripts.put; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class PutIndexedScriptAction extends ClientAction { +public class PutIndexedScriptAction extends Action { public static final PutIndexedScriptAction INSTANCE = new PutIndexedScriptAction(); public static final String NAME = "indices:data/write/script/put"; @@ -42,7 +42,7 @@ public class PutIndexedScriptAction extends ClientAction { +public class PutIndexedScriptRequestBuilder extends ActionRequestBuilder { - public PutIndexedScriptRequestBuilder(Client client) { - super(client, new PutIndexedScriptRequest()); + public PutIndexedScriptRequestBuilder(ElasticsearchClient client, PutIndexedScriptAction action) { + super(client, action, new PutIndexedScriptRequest()); } /** @@ -126,7 +125,7 @@ public class PutIndexedScriptRequestBuilder extends ActionRequestBuilderNote: the number of objects passed to this method must be an even number. + * Note: the number of objects passed to this method must be an even number. */ public PutIndexedScriptRequestBuilder setSource(Object... source) { request.source(source); @@ -182,27 +181,4 @@ public class PutIndexedScriptRequestBuilder extends ActionRequestBuilder listener) { - client.putIndexedScript(request, listener); - /* - try { - scriptService.putScriptToIndex(client, request.safeSource(), request.id(), request.scriptLang(), null, request.opType().toString(), new ActionListener() { - @Override - public void onResponse(IndexResponse indexResponse) { - listener.onResponse(new PutIndexedScriptResponse(indexResponse.getType(),indexResponse.getId(),indexResponse.getVersion(),indexResponse.isCreated())); - } - - @Override - public void onFailure(Throwable e) { - listener.onFailure(e); - } - }); - } catch (IOException ioe) { - listener.onFailure(ioe); - } - */ - } } diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java index b2561650d77..83313dfc095 100644 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java +++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.mlt; -import org.elasticsearch.action.ClientAction; +import org.elasticsearch.action.Action; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class MoreLikeThisAction extends ClientAction { +public class MoreLikeThisAction extends Action { public static final MoreLikeThisAction INSTANCE = new MoreLikeThisAction(); public static final String NAME = "indices:data/read/mlt"; @@ -40,7 +40,7 @@ public class MoreLikeThisAction extends ClientAction { +public class MoreLikeThisRequestBuilder extends ActionRequestBuilder { - public MoreLikeThisRequestBuilder(Client client) { - super(client, new MoreLikeThisRequest()); + public MoreLikeThisRequestBuilder(ElasticsearchClient client, MoreLikeThisAction action) { + super(client, action, new MoreLikeThisRequest()); } - public MoreLikeThisRequestBuilder(Client client, String index, String type, String id) { - super(client, new MoreLikeThisRequest(index).type(type).id(id)); + public MoreLikeThisRequestBuilder(ElasticsearchClient client, MoreLikeThisAction action, String index, String type, String id) { + super(client, action, new MoreLikeThisRequest(index).type(type).id(id)); } /** @@ -63,7 +62,7 @@ public class MoreLikeThisRequestBuilder extends ActionRequestBuilder30%. * - * @see org.elasticsearch.common.lucene.search.Queries#calculateMinShouldMatch(int, String) + * @see org.elasticsearch.common.lucene.search.Queries#calculateMinShouldMatch(int, String) */ public MoreLikeThisRequestBuilder setMinimumShouldMatch(String minimumShouldMatch) { request.minimumShouldMatch(minimumShouldMatch); @@ -259,10 +258,4 @@ public class MoreLikeThisRequestBuilder extends ActionRequestBuilder listener) { - client.moreLikeThis(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java b/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java index ab119e169f0..679ef533307 100644 --- a/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java +++ b/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java @@ -119,7 +119,6 @@ public class TransportMoreLikeThisAction extends HandledTransportAction() { @@ -197,8 +196,7 @@ public class TransportMoreLikeThisAction extends HandledTransportAction { +public class MultiPercolateAction extends Action { public static final MultiPercolateAction INSTANCE = new MultiPercolateAction(); public static final String NAME = "indices:data/read/mpercolate"; @@ -38,8 +38,8 @@ public class MultiPercolateAction extends ClientAction { +public class MultiPercolateRequestBuilder extends ActionRequestBuilder { - public MultiPercolateRequestBuilder(Client client) { - super(client, new MultiPercolateRequest()); + public MultiPercolateRequestBuilder(ElasticsearchClient client, MultiPercolateAction action) { + super(client, action, new MultiPercolateRequest()); } /** @@ -50,16 +49,11 @@ public class MultiPercolateRequestBuilder extends ActionRequestBuilder * Invoke this method before invoking {@link #add(PercolateRequestBuilder)}. */ public MultiPercolateRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { request.indicesOptions(indicesOptions); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.multiPercolate(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/PercolateAction.java index 6d85d207719..412f4e3ed62 100644 --- a/src/main/java/org/elasticsearch/action/percolate/PercolateAction.java +++ b/src/main/java/org/elasticsearch/action/percolate/PercolateAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.percolate; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class PercolateAction extends ClientAction { +public class PercolateAction extends Action { public static final PercolateAction INSTANCE = new PercolateAction(); public static final String NAME = "indices:data/read/percolate"; @@ -39,7 +39,7 @@ public class PercolateAction extends ClientAction { +public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder { private PercolateSourceBuilder sourceBuilder; - public PercolateRequestBuilder(Client client) { - super(client, new PercolateRequest()); + public PercolateRequestBuilder(ElasticsearchClient client, PercolateAction action) { + super(client, action, new PercolateRequest()); } /** @@ -252,11 +249,10 @@ public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder listener) { + protected PercolateRequest beforeExecute(PercolateRequest request) { if (sourceBuilder != null) { request.source(sourceBuilder); } - client.percolate(request, listener); + return request; } - } diff --git a/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java b/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java index 769c14fa369..6523378df4d 100644 --- a/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java +++ b/src/main/java/org/elasticsearch/action/search/ClearScrollAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.search; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class ClearScrollAction extends ClientAction { +public class ClearScrollAction extends Action { public static final ClearScrollAction INSTANCE = new ClearScrollAction(); public static final String NAME = "indices:data/read/scroll/clear"; @@ -39,7 +39,7 @@ public class ClearScrollAction extends ClientAction { +public class ClearScrollRequestBuilder extends ActionRequestBuilder { - public ClearScrollRequestBuilder(Client client) { - super(client, new ClearScrollRequest()); + public ClearScrollRequestBuilder(ElasticsearchClient client, ClearScrollAction action) { + super(client, action, new ClearScrollRequest()); } public ClearScrollRequestBuilder setScrollIds(List cursorIds) { @@ -42,9 +41,4 @@ public class ClearScrollRequestBuilder extends ActionRequestBuilder listener) { - client.clearScroll(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java b/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java index f052a927a16..ffa0a4b63f0 100644 --- a/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java +++ b/src/main/java/org/elasticsearch/action/search/MultiSearchAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.search; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class MultiSearchAction extends ClientAction { +public class MultiSearchAction extends Action { public static final MultiSearchAction INSTANCE = new MultiSearchAction(); public static final String NAME = "indices:data/read/msearch"; @@ -39,7 +39,7 @@ public class MultiSearchAction extends ClientAction { +public class MultiSearchRequestBuilder extends ActionRequestBuilder { - public MultiSearchRequestBuilder(Client client) { - super(client, new MultiSearchRequest()); + public MultiSearchRequestBuilder(ElasticsearchClient client, MultiSearchAction action) { + super(client, action, new MultiSearchRequest()); } /** @@ -65,16 +64,11 @@ public class MultiSearchRequestBuilder extends ActionRequestBuilder * Invoke this method before invoking {@link #add(SearchRequestBuilder)}. */ public MultiSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { request().indicesOptions(indicesOptions); return this; } - - @Override - protected void doExecute(ActionListener listener) { - client.multiSearch(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/search/SearchAction.java b/src/main/java/org/elasticsearch/action/search/SearchAction.java index 450e65eed19..501fe1afb76 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchAction.java +++ b/src/main/java/org/elasticsearch/action/search/SearchAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.search; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class SearchAction extends ClientAction { +public class SearchAction extends Action { public static final SearchAction INSTANCE = new SearchAction(); public static final String NAME = "indices:data/read/search"; @@ -39,7 +39,7 @@ public class SearchAction extends ClientAction { +public class SearchRequestBuilder extends ActionRequestBuilder { private SearchSourceBuilder sourceBuilder; - public SearchRequestBuilder(Client client) { - super(client, new SearchRequest()); + public SearchRequestBuilder(ElasticsearchClient client, SearchAction action) { + super(client, action, new SearchRequest()); } /** @@ -168,7 +166,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder * For example indices that don't exist. */ public SearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { @@ -703,6 +701,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder templateParams) { + public SearchRequestBuilder setTemplateParams(Map templateParams) { request.templateParams(templateParams); return this; } @@ -994,7 +1000,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder listener) { + protected SearchRequest beforeExecute(SearchRequest request) { if (sourceBuilder != null) { request.source(sourceBuilder()); } - client.search(request, listener); + return request; } private SearchSourceBuilder sourceBuilder() { diff --git a/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java b/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java index 0647b2e6554..eccfa0526a1 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java +++ b/src/main/java/org/elasticsearch/action/search/SearchScrollAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.search; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class SearchScrollAction extends ClientAction { +public class SearchScrollAction extends Action { public static final SearchScrollAction INSTANCE = new SearchScrollAction(); public static final String NAME = "indices:data/read/scroll"; @@ -39,7 +39,7 @@ public class SearchScrollAction extends ClientAction { +public class SearchScrollRequestBuilder extends ActionRequestBuilder { - public SearchScrollRequestBuilder(Client client) { - super(client, new SearchScrollRequest()); + public SearchScrollRequestBuilder(ElasticsearchClient client, SearchScrollAction action) { + super(client, action, new SearchScrollRequest()); } - public SearchScrollRequestBuilder(Client client, String scrollId) { - super(client, new SearchScrollRequest(scrollId)); - } - - /** - * Should the listener be called on a separate thread if needed. - */ - public SearchScrollRequestBuilder listenerThreaded(boolean threadedListener) { - request.listenerThreaded(threadedListener); - return this; + public SearchScrollRequestBuilder(ElasticsearchClient client, SearchScrollAction action, String scrollId) { + super(client, action, new SearchScrollRequest(scrollId)); } /** @@ -77,9 +68,4 @@ public class SearchScrollRequestBuilder extends ActionRequestBuilder listener) { - client.searchScroll(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/suggest/SuggestAction.java b/src/main/java/org/elasticsearch/action/suggest/SuggestAction.java index 59bfe4e6e19..88d2a92f331 100644 --- a/src/main/java/org/elasticsearch/action/suggest/SuggestAction.java +++ b/src/main/java/org/elasticsearch/action/suggest/SuggestAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.suggest; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.search.suggest.Suggest; /** */ -public class SuggestAction extends ClientAction { +public class SuggestAction extends Action { public static final SuggestAction INSTANCE = new SuggestAction(); public static final String NAME = "indices:data/read/suggest"; @@ -40,7 +40,7 @@ public class SuggestAction extends ClientAction { +public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder { final SuggestBuilder suggest = new SuggestBuilder(); - public SuggestRequestBuilder(Client client) { - super(client, new SuggestRequest()); + public SuggestRequestBuilder(ElasticsearchClient client, SuggestAction action) { + super(client, action, new SuggestRequest()); } /** @@ -84,7 +83,7 @@ public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder listener) { + protected SuggestRequest beforeExecute(SuggestRequest request) { try { XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE); suggest.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -92,7 +91,6 @@ public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder extends AdapterActionFuture implements ListenableActionFuture { - final boolean listenerThreaded; + private final static ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class); + final ThreadPool threadPool; volatile Object listeners; boolean executedListeners = false; - protected AbstractListenableActionFuture(boolean listenerThreaded, ThreadPool threadPool) { - this.listenerThreaded = listenerThreaded; + protected AbstractListenableActionFuture(ThreadPool threadPool) { this.threadPool = threadPool; } - public boolean listenerThreaded() { - return false; // we control execution of the listener - } - public ThreadPool threadPool() { return threadPool; } @@ -57,6 +53,7 @@ public abstract class AbstractListenableActionFuture extends AdapterAction } public void internalAddListener(ActionListener listener) { + listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener); boolean executeImmediate = false; synchronized (this) { if (executedListeners) { @@ -101,27 +98,10 @@ public abstract class AbstractListenableActionFuture extends AdapterAction } private void executeListener(final ActionListener listener) { - if (listenerThreaded) { - try { - threadPool.executor(ThreadPool.Names.LISTENER).execute(new Runnable() { - @Override - public void run() { - try { - listener.onResponse(actionGet()); - } catch (ElasticsearchException e) { - listener.onFailure(e); - } - } - }); - } catch (EsRejectedExecutionException e) { - listener.onFailure(e); - } - } else { - try { - listener.onResponse(actionGet()); - } catch (Throwable e) { - listener.onFailure(e); - } + try { + listener.onResponse(actionGet()); + } catch (Throwable e) { + listener.onFailure(e); } } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index f939893a98e..e2e1072feb3 100644 --- a/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -41,8 +41,6 @@ public abstract class HandledTransportAction() { @Override public void onResponse(Response response) { diff --git a/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java b/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java index 3d6cb28bced..1ec30606312 100644 --- a/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java +++ b/src/main/java/org/elasticsearch/action/support/PlainListenableActionFuture.java @@ -26,8 +26,8 @@ import org.elasticsearch.threadpool.ThreadPool; */ public class PlainListenableActionFuture extends AbstractListenableActionFuture { - public PlainListenableActionFuture(boolean listenerThreaded, ThreadPool threadPool) { - super(listenerThreaded, threadPool); + public PlainListenableActionFuture(ThreadPool threadPool) { + super(threadPool); } @Override diff --git a/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java new file mode 100644 index 00000000000..77dd305897c --- /dev/null +++ b/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.Future; + +/** + * An action listener that wraps another action listener and threading its execution. + */ +public final class ThreadedActionListener implements ActionListener { + + /** + * Wrapper that can be used to automatically wrap a listener in a threaded listener if needed. + */ + public static class Wrapper { + + private final ESLogger logger; + private final ThreadPool threadPool; + + private final boolean threadedListener; + + public Wrapper(ESLogger logger, Settings settings, ThreadPool threadPool) { + this.logger = logger; + this.threadPool = threadPool; + // Should the action listener be threaded or not by default. Action listeners are automatically threaded for client + // nodes and transport client in order to make sure client side code is not executed on IO threads. + this.threadedListener = DiscoveryNode.clientNode(settings) || TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING)); + } + + public ActionListener wrap(ActionListener listener) { + if (threadedListener == false) { + return listener; + } + // if its a future, the callback is very lightweight (flipping a bit) so no need to wrap it + if (listener instanceof Future) { + return listener; + } + // already threaded... + if (listener instanceof ThreadedActionListener) { + return listener; + } + return new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener); + } + } + + private final ESLogger logger; + private final ThreadPool threadPool; + private final String executor; + private final ActionListener listener; + + public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener listener) { + this.logger = logger; + this.threadPool = threadPool; + this.executor = executor; + this.listener = listener; + } + + @Override + public void onResponse(final Response response) { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + listener.onResponse(response); + } + + @Override + public void onFailure(Throwable t) { + listener.onFailure(t); + } + }); + } + + @Override + public void onFailure(final Throwable e) { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + listener.onFailure(e); + } + + @Override + public void onFailure(Throwable t) { + logger.warn("failed to execute failure callback on [{}], failure [{}]", t, listener, e); + } + }); + } +} diff --git a/src/main/java/org/elasticsearch/action/support/TransportAction.java b/src/main/java/org/elasticsearch/action/support/TransportAction.java index 7d3f3564693..c1a9f6098f6 100644 --- a/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -19,12 +19,10 @@ package org.elasticsearch.action.support; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.*; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.atomic.AtomicInteger; @@ -49,21 +47,11 @@ public abstract class TransportAction execute(Request request) { PlainActionFuture future = newFuture(); - // since we don't have a listener, and we release a possible lock with the future - // there is no need to execute it under a listener thread - request.listenerThreaded(false); execute(request, future); return future; } public final void execute(Request request, ActionListener listener) { - if (forceThreadedListener()) { - request.listenerThreaded(true); - } - if (request.listenerThreaded()) { - listener = new ThreadedActionListener<>(threadPool, listener, logger); - } - ActionRequestValidationException validationException = request.validate(); if (validationException != null) { listener.onFailure(validationException); @@ -83,69 +71,8 @@ public abstract class TransportAction listener); - static final class ThreadedActionListener implements ActionListener { - - private final ThreadPool threadPool; - - private final ActionListener listener; - - private final ESLogger logger; - - ThreadedActionListener(ThreadPool threadPool, ActionListener listener, ESLogger logger) { - this.threadPool = threadPool; - this.listener = listener; - this.logger = logger; - } - - @Override - public void onResponse(final Response response) { - try { - threadPool.executor(ThreadPool.Names.LISTENER).execute(new Runnable() { - @Override - public void run() { - try { - listener.onResponse(response); - } catch (Throwable e) { - listener.onFailure(e); - } - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not run threaded action, execution rejected [{}] running on current thread", listener); - /* we don't care if that takes long since we are shutting down. But if we not respond somebody could wait - * for the response on the listener side which could be a remote machine so make sure we push it out there.*/ - try { - listener.onResponse(response); - } catch (Throwable e) { - listener.onFailure(e); - } - } - } - - @Override - public void onFailure(final Throwable e) { - try { - threadPool.executor(ThreadPool.Names.LISTENER).execute(new Runnable() { - @Override - public void run() { - listener.onFailure(e); - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not run threaded action, execution rejected for listener [{}] running on current thread", listener); - /* we don't care if that takes long since we are shutting down (or queue capacity). But if we not respond somebody could wait - * for the response on the listener side which could be a remote machine so make sure we push it out there.*/ - listener.onFailure(e); - } - } - } - private static class RequestFilterChain implements ActionFilterChain { private final TransportAction action; diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java index 9a24917222f..50fb7b097f2 100644 --- a/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastOperationRequestBuilder.java @@ -19,19 +19,18 @@ package org.elasticsearch.action.support.broadcast; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.client.IndicesAdminClient; /** */ -public abstract class BroadcastOperationRequestBuilder, Response extends BroadcastOperationResponse, RequestBuilder extends BroadcastOperationRequestBuilder, Client extends ElasticsearchClient> - extends ActionRequestBuilder { +public abstract class BroadcastOperationRequestBuilder, Response extends BroadcastOperationResponse, RequestBuilder extends BroadcastOperationRequestBuilder> + extends ActionRequestBuilder { - protected BroadcastOperationRequestBuilder(Client client, Request request) { - super(client, request); + protected BroadcastOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } @SuppressWarnings("unchecked") diff --git a/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java index fcc92ed7959..939f47f9acf 100644 --- a/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java @@ -18,19 +18,18 @@ */ package org.elasticsearch.action.support.master; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.unit.TimeValue; /** * Base request builder for master node operations that support acknowledgements */ -public abstract class AcknowledgedRequestBuilder, Response extends AcknowledgedResponse, RequestBuilder extends AcknowledgedRequestBuilder, Client extends ElasticsearchClient> - extends MasterNodeOperationRequestBuilder { +public abstract class AcknowledgedRequestBuilder, Response extends AcknowledgedResponse, RequestBuilder extends AcknowledgedRequestBuilder> + extends MasterNodeOperationRequestBuilder { - protected AcknowledgedRequestBuilder(Client client, Request request) { - super(client, request); + protected AcknowledgedRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } /** @@ -39,7 +38,7 @@ public abstract class AcknowledgedRequestBuilder, Response extends ActionResponse, RequestBuilder extends MasterNodeOperationRequestBuilder, Client extends ElasticsearchClient> - extends ActionRequestBuilder { +public abstract class MasterNodeOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends MasterNodeOperationRequestBuilder> + extends ActionRequestBuilder { - protected MasterNodeOperationRequestBuilder(Client client, Request request) { - super(client, request); + protected MasterNodeOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } /** diff --git a/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java index 0b0644cc428..02c83298c25 100644 --- a/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadOperationRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.master; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.ElasticsearchClient; @@ -27,11 +28,11 @@ import org.elasticsearch.client.IndicesAdminClient; /** * Base request builder for master node read operations that can be executed on the local node as well */ -public abstract class MasterNodeReadOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends MasterNodeReadOperationRequestBuilder, Client extends ElasticsearchClient> - extends MasterNodeOperationRequestBuilder { +public abstract class MasterNodeReadOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends MasterNodeReadOperationRequestBuilder> + extends MasterNodeOperationRequestBuilder { - protected MasterNodeReadOperationRequestBuilder(Client client, Request request) { - super(client, request); + protected MasterNodeReadOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } /** diff --git a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java index 15e90c0784d..4ff18e68db0 100644 --- a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; @@ -75,14 +76,11 @@ public abstract class TransportMasterNodeOperationAction listener) { + protected void doExecute(final Request request, ActionListener listener) { + // TODO do we really need to wrap it in a listener? the handlers should be cheap + if ((listener instanceof ThreadedActionListener) == false) { + listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener); + } innerExecute(request, listener, new ClusterStateObserver(clusterService, request.masterNodeTimeout(), logger), false); } diff --git a/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java index d310a2abe7f..cc12e2836dd 100644 --- a/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequestBuilder.java @@ -19,19 +19,21 @@ package org.elasticsearch.action.support.master.info; import com.google.common.collect.ObjectArrays; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.client.IndicesAdminClient; /** */ -public abstract class ClusterInfoRequestBuilder, Response extends ActionResponse, Builder extends ClusterInfoRequestBuilder> extends MasterNodeReadOperationRequestBuilder { +public abstract class ClusterInfoRequestBuilder, Response extends ActionResponse, Builder extends ClusterInfoRequestBuilder> extends MasterNodeReadOperationRequestBuilder { - protected ClusterInfoRequestBuilder(IndicesAdminClient client, Request request) { - super(client, request); + protected ClusterInfoRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } @SuppressWarnings("unchecked") diff --git a/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java index e9c38539ede..303671db595 100644 --- a/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/support/nodes/NodesOperationRequestBuilder.java @@ -19,17 +19,18 @@ package org.elasticsearch.action.support.nodes; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; /** */ public abstract class NodesOperationRequestBuilder, Response extends NodesOperationResponse, RequestBuilder extends NodesOperationRequestBuilder> - extends ActionRequestBuilder { + extends ActionRequestBuilder { - protected NodesOperationRequestBuilder(ClusterAdminClient client, Request request) { - super(client, request); + protected NodesOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } @SuppressWarnings("unchecked") diff --git a/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequestBuilder.java index 80a0c96bd6a..aa34e249d9d 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequestBuilder.java @@ -19,19 +19,21 @@ package org.elasticsearch.action.support.replication; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.client.Client; +import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; /** */ public abstract class ShardReplicationOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends ShardReplicationOperationRequestBuilder> - extends ActionRequestBuilder { + extends ActionRequestBuilder { - protected ShardReplicationOperationRequestBuilder(Client client, Request request) { - super(client, request); + protected ShardReplicationOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } /** diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index 0e488a602ff..c45a3798318 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.support.replication; -import org.elasticsearch.ElasticsearchException; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionWriteResponse; @@ -36,17 +36,16 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.RefCounted; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.DocumentAlreadyExistsException; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -57,21 +56,15 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportResponseHandler; -import org.elasticsearch.transport.ConnectTransportException; -import org.elasticsearch.transport.EmptyTransportResponseHandler; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.*; +import java.io.Closeable; import java.io.IOException; import java.util.Map; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; /** */ @@ -103,7 +96,7 @@ public abstract class TransportShardReplicationOperationAction { @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { - // no need to have a threaded listener since we just send back a response - request.listenerThreaded(false); // if we have a local operation, execute it on a thread since we don't spawn request.operationThreaded(true); execute(request, new ActionListener() { @@ -285,7 +276,12 @@ public abstract class TransportShardReplicationOperationAction listener) { @@ -396,7 +393,9 @@ public abstract class TransportShardReplicationOperationAction primaryResponse = shardOperationOnPrimary(observer.observedState(), por); logger.trace("operation completed on primary [{}]", primary); - replicationPhase = new ReplicationPhase(shardsIt, primaryResponse.v2(), primaryResponse.v1(), observer, primary, internalRequest, listener); + replicationPhase = new ReplicationPhase(shardsIt, primaryResponse.v2(), primaryResponse.v1(), observer, primary, internalRequest, listener, indexShardReference); } catch (Throwable e) { internalRequest.request.setCanHaveDuplicates(); // shard has not been allocated yet, retry it here if (retryPrimaryException(e)) { logger.trace("had an error while performing operation on primary ({}), scheduling a retry.", e.getMessage()); + // We have to close here because when we retry we will increment get a new reference on index shard again and we do not want to + // increment twice. + Releasables.close(indexShardReference); + // We have to reset to null here because whe we retry it might be that we never get to the point where we assign a new reference + // (for example, in case the operation was rejected because queue is full). In this case we would release again once one of the finish methods is called. + indexShardReference = null; retry(e); return; } @@ -616,6 +628,12 @@ public abstract class TransportShardReplicationOperationAction listener) { + InternalRequest internalRequest, ActionListener listener, Releasable indexShardReference) { this.replicaRequest = replicaRequest; this.listener = listener; this.finalResponse = finalResponse; this.originalPrimaryShard = originalPrimaryShard; this.observer = observer; indexMetaData = observer.observedState().metaData().index(internalRequest.concreteIndex()); + this.indexShardReference = indexShardReference; ShardRouting shard; // we double check on the state, if it got changed we need to make sure we take the latest one cause @@ -744,17 +766,23 @@ public abstract class TransportShardReplicationOperationAction, Response extends ActionResponse, RequestBuilder extends SingleCustomOperationRequestBuilder> - extends ActionRequestBuilder { + extends ActionRequestBuilder { - protected SingleCustomOperationRequestBuilder(IndicesAdminClient client, Request request) { - super(client, request); + protected SingleCustomOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } /** diff --git a/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java index 5d1299c60b1..a40e293f64e 100644 --- a/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequestBuilder.java @@ -19,18 +19,20 @@ package org.elasticsearch.action.support.single.instance; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; /** */ public abstract class InstanceShardOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends InstanceShardOperationRequestBuilder> - extends ActionRequestBuilder { + extends ActionRequestBuilder { - protected InstanceShardOperationRequestBuilder(Client client, Request request) { - super(client, request); + protected InstanceShardOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } @SuppressWarnings("unchecked") diff --git a/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequestBuilder.java index cca130375ee..e9ce5204aaf 100644 --- a/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequestBuilder.java @@ -19,17 +19,19 @@ package org.elasticsearch.action.support.single.shard; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.ElasticsearchClient; /** */ public abstract class SingleShardOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends SingleShardOperationRequestBuilder> - extends ActionRequestBuilder { + extends ActionRequestBuilder { - protected SingleShardOperationRequestBuilder(Client client, Request request) { - super(client, request); + protected SingleShardOperationRequestBuilder(ElasticsearchClient client, Action action, Request request) { + super(client, action, request); } /** diff --git a/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java b/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java index 1c91a7753dd..db6260e6f85 100644 --- a/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/single/shard/TransportShardSingleOperationAction.java @@ -232,8 +232,6 @@ public abstract class TransportShardSingleOperationAction() { diff --git a/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java b/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java index 99faae076c0..d4451157c4b 100644 --- a/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java +++ b/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsAction.java @@ -19,12 +19,13 @@ package org.elasticsearch.action.termvectors; -import org.elasticsearch.action.ClientAction; +import org.elasticsearch.action.Action; import org.elasticsearch.client.Client; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class MultiTermVectorsAction extends ClientAction { +public class MultiTermVectorsAction extends Action { public static final MultiTermVectorsAction INSTANCE = new MultiTermVectorsAction(); public static final String NAME = "indices:data/read/mtv"; @@ -39,7 +40,7 @@ public class MultiTermVectorsAction extends ClientAction { - public MultiTermVectorsRequestBuilder(Client client) { - super(client, new MultiTermVectorsRequest()); +public class MultiTermVectorsRequestBuilder extends ActionRequestBuilder { + + public MultiTermVectorsRequestBuilder(ElasticsearchClient client, MultiTermVectorsAction action) { + super(client, action, new MultiTermVectorsRequest()); } public MultiTermVectorsRequestBuilder add(String index, @Nullable String type, Iterable ids) { @@ -47,9 +47,4 @@ public class MultiTermVectorsRequestBuilder extends ActionRequestBuilder listener) { - client.multiTermVectors(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java b/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java index 96269d55f1b..a4c53ee4a2d 100644 --- a/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java +++ b/src/main/java/org/elasticsearch/action/termvectors/TermVectorsAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.termvectors; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class TermVectorsAction extends ClientAction { +public class TermVectorsAction extends Action { public static final TermVectorsAction INSTANCE = new TermVectorsAction(); public static final String NAME = "indices:data/read/tv"; @@ -39,7 +39,7 @@ public class TermVectorsAction extends ClientAction { +public class TermVectorsRequestBuilder extends ActionRequestBuilder { - public TermVectorsRequestBuilder(Client client) { - super(client, new TermVectorsRequest()); + public TermVectorsRequestBuilder(ElasticsearchClient client, TermVectorsAction action) { + super(client, action, new TermVectorsRequest()); } /** @@ -45,8 +45,8 @@ public class TermVectorsRequestBuilder extends ActionRequestBuilder listener) { - client.termVectors(request, listener); - } } diff --git a/src/main/java/org/elasticsearch/action/update/UpdateAction.java b/src/main/java/org/elasticsearch/action/update/UpdateAction.java index c149a8839d2..4ac1002dbc6 100644 --- a/src/main/java/org/elasticsearch/action/update/UpdateAction.java +++ b/src/main/java/org/elasticsearch/action/update/UpdateAction.java @@ -19,12 +19,12 @@ package org.elasticsearch.action.update; -import org.elasticsearch.action.ClientAction; -import org.elasticsearch.client.Client; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** */ -public class UpdateAction extends ClientAction { +public class UpdateAction extends Action { public static final UpdateAction INSTANCE = new UpdateAction(); public static final String NAME = "indices:data/write/update"; @@ -39,7 +39,7 @@ public class UpdateAction extends ClientAction { - public UpdateRequestBuilder(Client client) { - super(client, new UpdateRequest()); + public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action) { + super(client, action, new UpdateRequest()); } - public UpdateRequestBuilder(Client client, String index, String type, String id) { - super(client, new UpdateRequest(index, type, id)); + public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action, String index, String type, String id) { + super(client, action, new UpdateRequest(index, type, id)); } /** @@ -77,7 +76,7 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder + *

* The script works with the variable ctx, which is bound to the entry, * e.g. ctx._source.mycounter += 1. * @@ -93,7 +92,7 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder * Default: groovy - *

+ *

* Ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html */ public UpdateRequestBuilder setScriptLang(String scriptLang) { @@ -344,7 +343,7 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder listener) { - client.update(request, listener); } } diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index d351a9d4ea2..74e8ec62cbc 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -19,6 +19,7 @@ package org.elasticsearch.bootstrap; +import org.apache.lucene.util.StringHelper; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.common.PidFile; @@ -27,7 +28,9 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.inject.spi.Message; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.jna.Kernel32Library; import org.elasticsearch.common.jna.Natives; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.log4j.LogConfigurator; @@ -38,6 +41,7 @@ import org.elasticsearch.monitor.process.JmxProcessProbe; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeBuilder; import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.hyperic.sigar.Sigar; import java.util.Locale; import java.util.Set; @@ -52,17 +56,75 @@ import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_ */ public class Bootstrap { + private static volatile Bootstrap INSTANCE; + private Node node; + private final CountDownLatch keepAliveLatch = new CountDownLatch(1); + private final Thread keepAliveThread; - private static volatile Thread keepAliveThread; - private static volatile CountDownLatch keepAliveLatch; - private static Bootstrap bootstrap; - - private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception { - if (settings.getAsBoolean("bootstrap.mlockall", false)) { + /** creates a new instance */ + Bootstrap() { + keepAliveThread = new Thread(new Runnable() { + @Override + public void run() { + try { + keepAliveLatch.await(); + } catch (InterruptedException e) { + // bail out + } + } + }, "elasticsearch[keepAlive/" + Version.CURRENT + "]"); + keepAliveThread.setDaemon(false); + // keep this thread alive (non daemon thread) until we shutdown + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + keepAliveLatch.countDown(); + } + }); + } + + /** initialize native resources */ + public static void initializeNatives(boolean mlockAll, boolean ctrlHandler) { + // mlockall if requested + if (mlockAll) { Natives.tryMlockall(); } + // listener for windows close event + if (ctrlHandler) { + Natives.addConsoleCtrlHandler(new ConsoleCtrlHandler() { + @Override + public boolean handle(int code) { + if (CTRL_CLOSE_EVENT == code) { + ESLogger logger = Loggers.getLogger(Bootstrap.class); + logger.info("running graceful exit on windows"); + + Bootstrap.INSTANCE.stop(); + return true; + } + return false; + } + }); + } + Kernel32Library.getInstance(); + + // initialize sigar explicitly + try { + Sigar.load(); + Loggers.getLogger(Bootstrap.class).trace("sigar libraries loaded successfully"); + } catch (Throwable t) { + Loggers.getLogger(Bootstrap.class).trace("failed to load sigar libraries", t); + } + + // init lucene random seed. it will use /dev/urandom where available: + StringHelper.randomId(); + } + + private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception { + initializeNatives(settings.getAsBoolean("bootstrap.mlockall", false), + settings.getAsBoolean("bootstrap.ctrlhandler", true)); + NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder().settings(settings).loadConfigSettings(false); node = nodeBuilder.build(); if (addShutdownHook) { @@ -73,23 +135,8 @@ public class Bootstrap { } }); } - - if (settings.getAsBoolean("bootstrap.ctrlhandler", true)) { - Natives.addConsoleCtrlHandler(new ConsoleCtrlHandler() { - @Override - public boolean handle(int code) { - if (CTRL_CLOSE_EVENT == code) { - ESLogger logger = Loggers.getLogger(Bootstrap.class); - logger.info("running graceful exit on windows"); - - System.exit(0); - return true; - } - return false; - } - }); - } - // install SM after natives, JNA can require strange permissions + + // install SM after natives, shutdown hooks, etc. setupSecurity(settings, environment); } @@ -124,47 +171,22 @@ public class Bootstrap { return InternalSettingsPreparer.prepareSettings(EMPTY_SETTINGS, true); } - /** - * hook for JSVC - */ - public void init(String[] args) throws Exception { - Tuple tuple = initialSettings(); - Settings settings = tuple.v1(); - Environment environment = tuple.v2(); - setupLogging(settings, environment); - setup(true, settings, environment); - } - - /** - * hook for JSVC - */ - public void start() { + private void start() { node.start(); + keepAliveThread.start(); } - /** - * hook for JSVC - */ - public void stop() { - destroy(); - } - - - /** - * hook for JSVC - */ - public void destroy() { - node.close(); - } - - public static void close(String[] args) { - bootstrap.destroy(); - keepAliveLatch.countDown(); + private void stop() { + try { + Releasables.close(node); + } finally { + keepAliveLatch.countDown(); + } } public static void main(String[] args) { System.setProperty("es.logger.prefix", ""); - bootstrap = new Bootstrap(); + INSTANCE = new Bootstrap(); final String pidFile = System.getProperty("es.pidfile", System.getProperty("es-pidfile")); if (pidFile != null) { @@ -216,40 +238,18 @@ public class Bootstrap { // fail if using broken version JVMCheck.check(); - bootstrap.setup(true, settings, environment); + INSTANCE.setup(true, settings, environment); stage = "Startup"; - bootstrap.start(); + INSTANCE.start(); if (!foreground) { closeSysError(); } - - keepAliveLatch = new CountDownLatch(1); - // keep this thread alive (non daemon thread) until we shutdown - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - keepAliveLatch.countDown(); - } - }); - - keepAliveThread = new Thread(new Runnable() { - @Override - public void run() { - try { - keepAliveLatch.await(); - } catch (InterruptedException e) { - // bail out - } - } - }, "elasticsearch[keepAlive/" + Version.CURRENT + "]"); - keepAliveThread.setDaemon(false); - keepAliveThread.start(); } catch (Throwable e) { ESLogger logger = Loggers.getLogger(Bootstrap.class); - if (bootstrap.node != null) { - logger = Loggers.getLogger(Bootstrap.class, bootstrap.node.settings().get("name")); + if (INSTANCE.node != null) { + logger = Loggers.getLogger(Bootstrap.class, INSTANCE.node.settings().get("name")); } String errorMessage = buildErrorMessage(stage, e); if (foreground) { diff --git a/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java b/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java new file mode 100644 index 00000000000..befef74251b --- /dev/null +++ b/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import java.net.URI; +import java.security.Permission; +import java.security.PermissionCollection; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.security.URIParameter; + +/** custom policy for union of static and dynamic permissions */ +public class ESPolicy extends Policy { + + /** template policy file, the one used in tests */ + static final String POLICY_RESOURCE = "security.policy"; + + final Policy template; + final PermissionCollection dynamic; + + public ESPolicy(PermissionCollection dynamic) throws Exception { + URI uri = getClass().getResource(POLICY_RESOURCE).toURI(); + this.template = Policy.getInstance("JavaPolicy", new URIParameter(uri)); + this.dynamic = dynamic; + } + + @Override + public boolean implies(ProtectionDomain domain, Permission permission) { + return template.implies(domain, permission) || dynamic.implies(permission); + } +} diff --git a/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index cb35d33bb45..e9a851ecea1 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -24,10 +24,6 @@ package org.elasticsearch.bootstrap; */ public class Elasticsearch extends Bootstrap { - public static void close(String[] args) { - Bootstrap.close(args); - } - public static void main(String[] args) { Bootstrap.main(args); } diff --git a/src/main/java/org/elasticsearch/bootstrap/ElasticsearchF.java b/src/main/java/org/elasticsearch/bootstrap/ElasticsearchF.java index a07ec8c162e..96e310eff3f 100644 --- a/src/main/java/org/elasticsearch/bootstrap/ElasticsearchF.java +++ b/src/main/java/org/elasticsearch/bootstrap/ElasticsearchF.java @@ -25,10 +25,6 @@ package org.elasticsearch.bootstrap; */ public class ElasticsearchF { - public static void close(String[] args) { - Bootstrap.close(args); - } - public static void main(String[] args) { System.setProperty("es.foreground", "yes"); Bootstrap.main(args); diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index 67ac531f0e7..e90b162c7e3 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -19,16 +19,14 @@ package org.elasticsearch.bootstrap; -import com.google.common.io.ByteStreams; -import org.apache.lucene.util.IOUtils; -import org.apache.lucene.util.StringHelper; +import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.env.Environment; import java.io.*; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.security.Permissions; +import java.security.Policy; /** * Initializes securitymanager with necessary permissions. @@ -36,79 +34,61 @@ import java.nio.file.Path; * We use a template file (the one we test with), and add additional * permissions based on the environment (data paths, etc) */ -class Security { - - /** template policy file, the one used in tests */ - static final String POLICY_RESOURCE = "security.policy"; - +public class Security { + /** * Initializes securitymanager for the environment * Can only happen once! */ - static void configure(Environment environment) throws IOException { - // init lucene random seed. it will use /dev/urandom where available. - StringHelper.randomId(); - InputStream config = Security.class.getResourceAsStream(POLICY_RESOURCE); - if (config == null) { - throw new NoSuchFileException(POLICY_RESOURCE); - } - Path newConfig = processTemplate(config, environment); - System.setProperty("java.security.policy", newConfig.toString()); + static void configure(Environment environment) throws Exception { + // enable security policy: union of template and environment-based paths. + Policy.setPolicy(new ESPolicy(createPermissions(environment))); + + // enable security manager System.setSecurityManager(new SecurityManager()); - IOUtils.deleteFilesIgnoringExceptions(newConfig); // TODO: maybe log something if it fails? + + // do some basic tests + selfTest(); } - - // package-private for testing - static Path processTemplate(InputStream template, Environment environment) throws IOException { - Path processed = Files.createTempFile(null, null); - try (OutputStream output = new BufferedOutputStream(Files.newOutputStream(processed))) { - // copy the template as-is. - try (InputStream in = new BufferedInputStream(template)) { - ByteStreams.copy(in, output); - } - // all policy files are UTF-8: - // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html - try (Writer writer = new OutputStreamWriter(output, StandardCharsets.UTF_8)) { - writer.write(System.lineSeparator()); - writer.write("grant {"); - writer.write(System.lineSeparator()); - - // add permissions for all configured paths. - // TODO: improve test infra so we can reduce permissions where read/write - // is not really needed... - addPath(writer, environment.homeFile(), "read,readlink,write,delete"); - addPath(writer, environment.configFile(), "read,readlink,write,delete"); - addPath(writer, environment.logsFile(), "read,readlink,write,delete"); - addPath(writer, environment.pluginsFile(), "read,readlink,write,delete"); - for (Path path : environment.dataFiles()) { - addPath(writer, path, "read,readlink,write,delete"); - } - for (Path path : environment.dataWithClusterFiles()) { - addPath(writer, path, "read,readlink,write,delete"); - } - - writer.write("};"); - writer.write(System.lineSeparator()); - } + /** returns dynamic Permissions to configured paths */ + static Permissions createPermissions(Environment environment) throws IOException { + // TODO: improve test infra so we can reduce permissions where read/write + // is not really needed... + Permissions policy = new Permissions(); + addPath(policy, PathUtils.get(System.getProperty("java.io.tmpdir")), "read,readlink,write,delete"); + addPath(policy, environment.homeFile(), "read,readlink,write,delete"); + addPath(policy, environment.configFile(), "read,readlink,write,delete"); + addPath(policy, environment.logsFile(), "read,readlink,write,delete"); + addPath(policy, environment.pluginsFile(), "read,readlink,write,delete"); + for (Path path : environment.dataFiles()) { + addPath(policy, path, "read,readlink,write,delete"); } - return processed; + for (Path path : environment.dataWithClusterFiles()) { + addPath(policy, path, "read,readlink,write,delete"); + } + + return policy; } - static void addPath(Writer writer, Path path, String permissions) throws IOException { + /** Add access to path (and all files underneath it */ + public static void addPath(Permissions policy, Path path, String permissions) throws IOException { // paths may not exist yet Files.createDirectories(path); // add each path twice: once for itself, again for files underneath it - writer.write("permission java.io.FilePermission \"" + encode(path) + "\", \"" + permissions + "\";"); - writer.write(System.lineSeparator()); - writer.write("permission java.io.FilePermission \"" + encode(path) + "${/}-\", \"" + permissions + "\";"); - writer.write(System.lineSeparator()); + policy.add(new FilePermission(path.toString(), permissions)); + policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions)); } - - // Any backslashes in paths must be escaped, because it is the escape character when parsing. - // See "Note Regarding File Path Specifications on Windows Systems". - // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html - static String encode(Path path) { - return path.toString().replace("\\", "\\\\"); + + /** Simple checks that everything is ok */ + public static void selfTest() { + // check we can manipulate temporary files + try { + Files.delete(Files.createTempFile(null, null)); + } catch (IOException ignored) { + // potentially virus scanner + } catch (SecurityException problem) { + throw new SecurityException("Security misconfiguration: cannot access java.io.tmpdir", problem); + } } } diff --git a/src/main/java/org/elasticsearch/client/Client.java b/src/main/java/org/elasticsearch/client/Client.java index 973ebf511c3..e356244db1a 100644 --- a/src/main/java/org/elasticsearch/client/Client.java +++ b/src/main/java/org/elasticsearch/client/Client.java @@ -62,6 +62,7 @@ import org.elasticsearch.action.termvectors.*; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.support.Headers; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Settings; @@ -79,7 +80,7 @@ import org.elasticsearch.common.settings.Settings; * @see org.elasticsearch.node.Node#client() * @see org.elasticsearch.client.transport.TransportClient */ -public interface Client extends ElasticsearchClient, Releasable { +public interface Client extends ElasticsearchClient, Releasable { String CLIENT_TYPE_SETTING = "client.type"; @@ -256,18 +257,11 @@ public interface Client extends ElasticsearchClient, Releasable { /** * Put the indexed script - * @param scriptLang - * @param id - * @param source - * @return */ PutIndexedScriptRequestBuilder preparePutIndexedScript(@Nullable String scriptLang, String id, String source); /** * delete an indexed script - * - * @param request - * @param listener */ void deleteIndexedScript(DeleteIndexedScriptRequest request, ActionListener listener); @@ -287,17 +281,11 @@ public interface Client extends ElasticsearchClient, Releasable { /** * Delete an indexed script - * @param scriptLang - * @param id - * @return */ DeleteIndexedScriptRequestBuilder prepareDeleteIndexedScript(@Nullable String scriptLang, String id); /** * Put an indexed script - * - * @param request - * @param listener */ void putIndexedScript(PutIndexedScriptRequest request, ActionListener listener); @@ -317,17 +305,11 @@ public interface Client extends ElasticsearchClient, Releasable { /** * Get the indexed script - * @param scriptLang - * @param id - * @return */ GetIndexedScriptRequestBuilder prepareGetIndexedScript(@Nullable String scriptLang, String id); /** * Get an indexed script - * - * @param request - * @param listener */ void getIndexedScript(GetIndexedScriptRequest request, ActionListener listener); @@ -670,4 +652,5 @@ public interface Client extends ElasticsearchClient, Releasable { */ Settings settings(); + Headers headers(); } diff --git a/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 0169151fc93..c3eb51585c2 100644 --- a/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -83,7 +83,7 @@ import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; * * @see AdminClient#cluster() */ -public interface ClusterAdminClient extends ElasticsearchClient { +public interface ClusterAdminClient extends ElasticsearchClient { /** * The health of the cluster. diff --git a/src/main/java/org/elasticsearch/client/ElasticsearchClient.java b/src/main/java/org/elasticsearch/client/ElasticsearchClient.java index 807dcc84f57..08a95bc71e6 100644 --- a/src/main/java/org/elasticsearch/client/ElasticsearchClient.java +++ b/src/main/java/org/elasticsearch/client/ElasticsearchClient.java @@ -23,7 +23,7 @@ package org.elasticsearch.client; import org.elasticsearch.action.*; import org.elasticsearch.threadpool.ThreadPool; -public interface ElasticsearchClient { +public interface ElasticsearchClient { /** * Executes a generic action, denoted by an {@link org.elasticsearch.action.Action}. @@ -35,7 +35,7 @@ public interface ElasticsearchClient { * @param The request builder type. * @return A future allowing to get back the response. */ - > ActionFuture execute(final Action action, final Request request); + > ActionFuture execute(final Action action, final Request request); /** * Executes a generic action, denoted by an {@link Action}. @@ -47,7 +47,7 @@ public interface ElasticsearchClient { * @param The response type. * @param The request builder type. */ - > void execute(final Action action, final Request request, ActionListener listener); + > void execute(final Action action, final Request request, ActionListener listener); /** * Prepares a request builder to execute, specified by {@link Action}. @@ -58,7 +58,7 @@ public interface ElasticsearchClient { * @param The request builder. * @return The request builder, that can, at a later stage, execute the request. */ - > RequestBuilder prepareExecute(final Action action); + > RequestBuilder prepareExecute(final Action action); /** * Returns the threadpool used to execute requests on this client diff --git a/src/main/java/org/elasticsearch/client/FilterClient.java b/src/main/java/org/elasticsearch/client/FilterClient.java index 2fc9f5f9f6a..c0a93f5aa05 100644 --- a/src/main/java/org/elasticsearch/client/FilterClient.java +++ b/src/main/java/org/elasticsearch/client/FilterClient.java @@ -20,10 +20,6 @@ package org.elasticsearch.client; import org.elasticsearch.action.*; import org.elasticsearch.client.support.AbstractClient; -import org.elasticsearch.client.support.AbstractClusterAdminClient; -import org.elasticsearch.client.support.AbstractIndicesAdminClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.ThreadPool; /** @@ -31,16 +27,18 @@ import org.elasticsearch.threadpool.ThreadPool; * uses as its basic source, possibly transforming the requests / responses along the * way or providing additional functionality. */ -public abstract class FilterClient extends AbstractClient implements AdminClient { +public abstract class FilterClient extends AbstractClient { protected final Client in; /** * Creates a new FilterClient + * * @param in the client to delegate to * @see #in() */ public FilterClient(Client in) { + super(in.settings(), in.threadPool(), in.headers()); this.in = in; } @@ -50,127 +48,14 @@ public abstract class FilterClient extends AbstractClient implements AdminClient } @Override - public AdminClient admin() { - return this; - } - - @Override - public > ActionFuture execute( - Action action, Request request) { - return in().execute(action, request); - } - - @Override - public > void execute( - Action action, Request request, ActionListener listener) { + protected > void doExecute(Action action, Request request, ActionListener listener) { in().execute(action, request, listener); } - @Override - public Settings settings() { - return in().settings(); - } - - @Override - public ThreadPool threadPool() { - return in().threadPool(); - } - /** * Returns the delegate {@link Client} */ protected Client in() { return in; } - - @Override - public ClusterAdminClient cluster() { - return in().admin().cluster(); - } - - @Override - public IndicesAdminClient indices() { - return in().admin().indices(); - } - - /** - * A {@link IndicesAdminClient} that contains another {@link IndicesAdminClient} which it - * uses as its basic source, possibly transforming the requests / responses along the - * way or providing additional functionality. - */ - public static class IndicesAdmin extends AbstractIndicesAdminClient { - protected final IndicesAdminClient in; - - /** - * Creates a new IndicesAdmin - * @param in the client to delegate to - * @see #in() - */ - public IndicesAdmin(IndicesAdminClient in) { - this.in = in; - } - - @Override - public > ActionFuture execute(Action action, Request request) { - return in().execute(action, request); - } - - @Override - public > void execute(Action action, Request request, ActionListener listener) { - in().execute(action, request, listener); - } - - - /** - * Returns the delegate {@link Client} - */ - protected IndicesAdminClient in() { - return in; - } - - @Override - public ThreadPool threadPool() { - return in().threadPool(); - } - } - - /** - * A {@link ClusterAdminClient} that contains another {@link ClusterAdminClient} which it - * uses as its basic source, possibly transforming the requests / responses along the - * way or providing additional functionality. - */ - public static class ClusterAdmin extends AbstractClusterAdminClient { - protected final ClusterAdminClient in; - - /** - * Creates a new ClusterAdmin - * @param in the client to delegate to - * @see #in() - */ - public ClusterAdmin(ClusterAdminClient in) { - this.in = in; - } - - @Override - public > ActionFuture execute(Action action, Request request) { - return in().execute(action, request); - } - - @Override - public > void execute(Action action, Request request, ActionListener listener) { - in().execute(action, request, listener); - } - - /** - * Returns the delegate {@link Client} - */ - protected ClusterAdminClient in() { - return in; - } - - @Override - public ThreadPool threadPool() { - return in().threadPool(); - } - } } diff --git a/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index 408bbd29641..715d7b618a9 100644 --- a/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -112,7 +112,7 @@ import org.elasticsearch.common.Nullable; * * @see AdminClient#indices() */ -public interface IndicesAdminClient extends ElasticsearchClient { +public interface IndicesAdminClient extends ElasticsearchClient { /** diff --git a/src/main/java/org/elasticsearch/client/node/NodeAdminClient.java b/src/main/java/org/elasticsearch/client/node/NodeAdminClient.java deleted file mode 100644 index 40e25178454..00000000000 --- a/src/main/java/org/elasticsearch/client/node/NodeAdminClient.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.node; - -import org.elasticsearch.client.AdminClient; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.client.IndicesAdminClient; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; - -/** - * - */ -public class NodeAdminClient extends AbstractComponent implements AdminClient { - - private final NodeIndicesAdminClient indicesAdminClient; - - private final NodeClusterAdminClient clusterAdminClient; - - @Inject - public NodeAdminClient(Settings settings, NodeClusterAdminClient clusterAdminClient, NodeIndicesAdminClient indicesAdminClient) { - super(settings); - this.indicesAdminClient = indicesAdminClient; - this.clusterAdminClient = clusterAdminClient; - } - - @Override - public IndicesAdminClient indices() { - return indicesAdminClient; - } - - @Override - public ClusterAdminClient cluster() { - return this.clusterAdminClient; - } -} diff --git a/src/main/java/org/elasticsearch/client/node/NodeClient.java b/src/main/java/org/elasticsearch/client/node/NodeClient.java index ae85454ba50..2ee07aee55b 100644 --- a/src/main/java/org/elasticsearch/client/node/NodeClient.java +++ b/src/main/java/org/elasticsearch/client/node/NodeClient.java @@ -21,13 +21,19 @@ package org.elasticsearch.client.node; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.*; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; +import org.elasticsearch.client.ClusterAdminClient; +import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.client.support.AbstractClient; import org.elasticsearch.client.support.Headers; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; @@ -38,38 +44,12 @@ import java.util.Map; */ public class NodeClient extends AbstractClient { - private final Settings settings; - private final ThreadPool threadPool; - - private final NodeAdminClient admin; - - private final ImmutableMap actions; - - private final Headers headers; + private final ImmutableMap actions; @Inject - public NodeClient(Settings settings, ThreadPool threadPool, NodeAdminClient admin, Map actions, Headers headers) { - this.settings = settings; - this.threadPool = threadPool; - this.admin = admin; - this.headers = headers; - MapBuilder actionsBuilder = new MapBuilder<>(); - for (Map.Entry entry : actions.entrySet()) { - if (entry.getKey() instanceof ClientAction) { - actionsBuilder.put((ClientAction) entry.getKey(), entry.getValue()); - } - } - this.actions = actionsBuilder.immutableMap(); - } - - @Override - public Settings settings() { - return this.settings; - } - - @Override - public ThreadPool threadPool() { - return this.threadPool; + public NodeClient(Settings settings, ThreadPool threadPool, Headers headers, Map actions) { + super(settings, threadPool, headers); + this.actions = ImmutableMap.copyOf(actions); } @Override @@ -77,24 +57,13 @@ public class NodeClient extends AbstractClient { // nothing really to do } - @Override - public AdminClient admin() { - return this.admin; - } - @SuppressWarnings("unchecked") @Override - public > ActionFuture execute(Action action, Request request) { - headers.applyTo(request); - TransportAction transportAction = actions.get((ClientAction)action); - return transportAction.execute(request); - } - - @SuppressWarnings("unchecked") - @Override - public > void execute(Action action, Request request, ActionListener listener) { - headers.applyTo(request); - TransportAction transportAction = actions.get((ClientAction)action); + public > void doExecute(Action action, Request request, ActionListener listener) { + TransportAction transportAction = actions.get(action); + if (transportAction == null) { + throw new IllegalStateException("failed to find action [" + action + "] to execute"); + } transportAction.execute(request, listener); } } diff --git a/src/main/java/org/elasticsearch/client/node/NodeClientModule.java b/src/main/java/org/elasticsearch/client/node/NodeClientModule.java index ea3a81a105f..fb0891da8cc 100644 --- a/src/main/java/org/elasticsearch/client/node/NodeClientModule.java +++ b/src/main/java/org/elasticsearch/client/node/NodeClientModule.java @@ -19,10 +19,7 @@ package org.elasticsearch.client.node; -import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.client.support.Headers; import org.elasticsearch.common.inject.AbstractModule; @@ -34,9 +31,6 @@ public class NodeClientModule extends AbstractModule { @Override protected void configure() { bind(Headers.class).asEagerSingleton(); - bind(ClusterAdminClient.class).to(NodeClusterAdminClient.class).asEagerSingleton(); - bind(IndicesAdminClient.class).to(NodeIndicesAdminClient.class).asEagerSingleton(); - bind(AdminClient.class).to(NodeAdminClient.class).asEagerSingleton(); bind(Client.class).to(NodeClient.class).asEagerSingleton(); } } diff --git a/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java b/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java deleted file mode 100644 index 41af331a93f..00000000000 --- a/src/main/java/org/elasticsearch/client/node/NodeClusterAdminClient.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.node; - -import com.google.common.collect.ImmutableMap; -import org.elasticsearch.action.*; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.action.support.TransportAction; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.client.support.AbstractClusterAdminClient; -import org.elasticsearch.client.support.Headers; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.Map; - -/** - * - */ -public class NodeClusterAdminClient extends AbstractClusterAdminClient implements ClusterAdminClient { - - private final ThreadPool threadPool; - - private final ImmutableMap actions; - - private final Headers headers; - - @Inject - public NodeClusterAdminClient(ThreadPool threadPool, Map actions, Headers headers) { - this.threadPool = threadPool; - this.headers = headers; - MapBuilder actionsBuilder = new MapBuilder<>(); - for (Map.Entry entry : actions.entrySet()) { - if (entry.getKey() instanceof ClusterAction) { - actionsBuilder.put((ClusterAction) entry.getKey(), entry.getValue()); - } - } - this.actions = actionsBuilder.immutableMap(); - } - - @Override - public ThreadPool threadPool() { - return this.threadPool; - } - - @SuppressWarnings("unchecked") - @Override - public > ActionFuture execute(Action action, Request request) { - headers.applyTo(request); - TransportAction transportAction = actions.get((ClusterAction)action); - return transportAction.execute(request); - } - - @SuppressWarnings("unchecked") - @Override - public > void execute(Action action, Request request, ActionListener listener) { - headers.applyTo(request); - TransportAction transportAction = actions.get((ClusterAction)action); - transportAction.execute(request, listener); - } -} diff --git a/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java b/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java deleted file mode 100644 index c74d68d494f..00000000000 --- a/src/main/java/org/elasticsearch/client/node/NodeIndicesAdminClient.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.node; - -import com.google.common.collect.ImmutableMap; -import org.elasticsearch.action.*; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.action.support.TransportAction; -import org.elasticsearch.client.IndicesAdminClient; -import org.elasticsearch.client.support.AbstractIndicesAdminClient; -import org.elasticsearch.client.support.Headers; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.Map; - -/** - * - */ -public class NodeIndicesAdminClient extends AbstractIndicesAdminClient implements IndicesAdminClient { - - private final ThreadPool threadPool; - - private final ImmutableMap actions; - - private final Headers headers; - - @Inject - public NodeIndicesAdminClient(ThreadPool threadPool, Map actions, Headers headers) { - this.threadPool = threadPool; - this.headers = headers; - MapBuilder actionsBuilder = new MapBuilder<>(); - for (Map.Entry entry : actions.entrySet()) { - if (entry.getKey() instanceof IndicesAction) { - actionsBuilder.put((IndicesAction) entry.getKey(), entry.getValue()); - } - } - this.actions = actionsBuilder.immutableMap(); - } - - @Override - public ThreadPool threadPool() { - return this.threadPool; - } - - @SuppressWarnings("unchecked") - @Override - public > ActionFuture execute(Action action, Request request) { - headers.applyTo(request); - TransportAction transportAction = actions.get((IndicesAction)action); - return transportAction.execute(request); - } - - @SuppressWarnings("unchecked") - @Override - public > void execute(Action action, Request request, ActionListener listener) { - headers.applyTo(request); - TransportAction transportAction = actions.get((IndicesAction)action); - transportAction.execute(request, listener); - } -} diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 7bfdfa14198..6ac9a0202ab 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -20,6 +20,194 @@ package org.elasticsearch.client.support; import org.elasticsearch.action.*; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsAction; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusAction; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequestBuilder; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction; +import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; +import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; +import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; +import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction; +import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder; +import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; +import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction; +import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; +import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; +import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexAction; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsAction; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequestBuilder; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; +import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction; +import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequest; +import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder; +import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; +import org.elasticsearch.action.admin.indices.flush.FlushAction; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.get.*; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; +import org.elasticsearch.action.admin.indices.open.OpenIndexAction; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; +import org.elasticsearch.action.admin.indices.optimize.OptimizeAction; +import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; +import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder; +import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse; +import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; +import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; +import org.elasticsearch.action.admin.indices.recovery.RecoveryRequestBuilder; +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; +import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequestBuilder; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction; +import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest; +import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder; +import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; +import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction; +import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest; +import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder; +import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; +import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction; +import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest; +import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder; +import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -70,24 +258,83 @@ import org.elasticsearch.action.suggest.SuggestAction; import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.action.suggest.SuggestRequestBuilder; import org.elasticsearch.action.suggest.SuggestResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.termvectors.*; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.*; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; /** * */ -public abstract class AbstractClient implements Client { +public abstract class AbstractClient extends AbstractComponent implements Client { + + private final ThreadPool threadPool; + private final Admin admin; + + private final Headers headers; + private final ThreadedActionListener.Wrapper threadedWrapper; + + public AbstractClient(Settings settings, ThreadPool threadPool, Headers headers) { + super(settings); + this.threadPool = threadPool; + this.headers = headers; + this.admin = new Admin(this); + this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool); + } @Override - public > RequestBuilder prepareExecute(final Action action) { + public Headers headers() { + return this.headers; + } + + @Override + public final Settings settings() { + return this.settings; + } + + @Override + public final ThreadPool threadPool() { + return this.threadPool; + } + + @Override + public final AdminClient admin() { + return admin; + } + + @Override + public final > RequestBuilder prepareExecute(final Action action) { return action.newRequestBuilder(this); } + @Override + public final > ActionFuture execute(Action action, Request request) { + PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + execute(action, request, actionFuture); + return actionFuture; + } + + /** + * This is the single execution point of *all* clients. + */ + @Override + public final > void execute(Action action, Request request, ActionListener listener) { + headers.applyTo(request); + listener = threadedWrapper.wrap(listener); + doExecute(action, request, listener); + } + + protected abstract > void doExecute(final Action action, final Request request, ActionListener listener); + + @Override public ActionFuture index(final IndexRequest request) { return execute(IndexAction.INSTANCE, request); @@ -100,7 +347,7 @@ public abstract class AbstractClient implements Client { @Override public IndexRequestBuilder prepareIndex() { - return new IndexRequestBuilder(this, null); + return new IndexRequestBuilder(this, IndexAction.INSTANCE, null); } @Override @@ -125,12 +372,12 @@ public abstract class AbstractClient implements Client { @Override public UpdateRequestBuilder prepareUpdate() { - return new UpdateRequestBuilder(this, null, null, null); + return new UpdateRequestBuilder(this, UpdateAction.INSTANCE, null, null, null); } @Override public UpdateRequestBuilder prepareUpdate(String index, String type, String id) { - return new UpdateRequestBuilder(this, index, type, id); + return new UpdateRequestBuilder(this, UpdateAction.INSTANCE, index, type, id); } @Override @@ -145,7 +392,7 @@ public abstract class AbstractClient implements Client { @Override public DeleteRequestBuilder prepareDelete() { - return new DeleteRequestBuilder(this, null); + return new DeleteRequestBuilder(this, DeleteAction.INSTANCE, null); } @Override @@ -165,7 +412,7 @@ public abstract class AbstractClient implements Client { @Override public BulkRequestBuilder prepareBulk() { - return new BulkRequestBuilder(this); + return new BulkRequestBuilder(this, BulkAction.INSTANCE); } @Override @@ -180,7 +427,7 @@ public abstract class AbstractClient implements Client { @Override public GetRequestBuilder prepareGet() { - return new GetRequestBuilder(this, null); + return new GetRequestBuilder(this, GetAction.INSTANCE, null); } @Override @@ -201,7 +448,7 @@ public abstract class AbstractClient implements Client { @Override public GetIndexedScriptRequestBuilder prepareGetIndexedScript() { - return new GetIndexedScriptRequestBuilder(this); + return new GetIndexedScriptRequestBuilder(this, GetIndexedScriptAction.INSTANCE); } @Override @@ -215,15 +462,11 @@ public abstract class AbstractClient implements Client { */ @Override public PutIndexedScriptRequestBuilder preparePutIndexedScript() { - return new PutIndexedScriptRequestBuilder(this); + return new PutIndexedScriptRequestBuilder(this, PutIndexedScriptAction.INSTANCE); } /** * Put the indexed script - * @param scriptLang - * @param id - * @param source - * @return */ @Override public PutIndexedScriptRequestBuilder preparePutIndexedScript(@Nullable String scriptLang, String id, String source){ @@ -232,9 +475,6 @@ public abstract class AbstractClient implements Client { /** * Put an indexed script - * - * @param request - * @param listener */ @Override public void putIndexedScript(final PutIndexedScriptRequest request, ActionListener listener){ @@ -255,9 +495,6 @@ public abstract class AbstractClient implements Client { /** * delete an indexed script - * - * @param request - * @param listener */ @Override public void deleteIndexedScript(DeleteIndexedScriptRequest request, ActionListener listener){ @@ -286,9 +523,6 @@ public abstract class AbstractClient implements Client { /** * Delete an indexed script - * @param scriptLang - * @param id - * @return */ @Override public DeleteIndexedScriptRequestBuilder prepareDeleteIndexedScript(@Nullable String scriptLang, String id){ @@ -309,7 +543,7 @@ public abstract class AbstractClient implements Client { @Override public MultiGetRequestBuilder prepareMultiGet() { - return new MultiGetRequestBuilder(this); + return new MultiGetRequestBuilder(this, MultiGetAction.INSTANCE); } @Override @@ -324,7 +558,7 @@ public abstract class AbstractClient implements Client { @Override public SearchRequestBuilder prepareSearch(String... indices) { - return new SearchRequestBuilder(this).setIndices(indices); + return new SearchRequestBuilder(this, SearchAction.INSTANCE).setIndices(indices); } @Override @@ -339,7 +573,7 @@ public abstract class AbstractClient implements Client { @Override public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) { - return new SearchScrollRequestBuilder(this, scrollId); + return new SearchScrollRequestBuilder(this, SearchScrollAction.INSTANCE, scrollId); } @Override @@ -354,7 +588,7 @@ public abstract class AbstractClient implements Client { @Override public MultiSearchRequestBuilder prepareMultiSearch() { - return new MultiSearchRequestBuilder(this); + return new MultiSearchRequestBuilder(this, MultiSearchAction.INSTANCE); } @Override @@ -369,7 +603,7 @@ public abstract class AbstractClient implements Client { @Override public CountRequestBuilder prepareCount(String... indices) { - return new CountRequestBuilder(this).setIndices(indices); + return new CountRequestBuilder(this, CountAction.INSTANCE).setIndices(indices); } @Override @@ -384,7 +618,7 @@ public abstract class AbstractClient implements Client { @Override public ExistsRequestBuilder prepareExists(String... indices) { - return new ExistsRequestBuilder(this).setIndices(indices); + return new ExistsRequestBuilder(this, ExistsAction.INSTANCE).setIndices(indices); } @Override @@ -399,7 +633,7 @@ public abstract class AbstractClient implements Client { @Override public SuggestRequestBuilder prepareSuggest(String... indices) { - return new SuggestRequestBuilder(this).setIndices(indices); + return new SuggestRequestBuilder(this, SuggestAction.INSTANCE).setIndices(indices); } @Override @@ -414,7 +648,7 @@ public abstract class AbstractClient implements Client { @Override public MoreLikeThisRequestBuilder prepareMoreLikeThis(String index, String type, String id) { - return new MoreLikeThisRequestBuilder(this, index, type, id); + return new MoreLikeThisRequestBuilder(this, MoreLikeThisAction.INSTANCE, index, type, id); } @Override @@ -429,12 +663,12 @@ public abstract class AbstractClient implements Client { @Override public TermVectorsRequestBuilder prepareTermVectors() { - return new TermVectorsRequestBuilder(this); + return new TermVectorsRequestBuilder(this, TermVectorsAction.INSTANCE); } @Override public TermVectorsRequestBuilder prepareTermVectors(String index, String type, String id) { - return new TermVectorsRequestBuilder(this, index, type, id); + return new TermVectorsRequestBuilder(this, TermVectorsAction.INSTANCE, index, type, id); } @Deprecated @@ -473,7 +707,7 @@ public abstract class AbstractClient implements Client { @Override public MultiTermVectorsRequestBuilder prepareMultiTermVectors() { - return new MultiTermVectorsRequestBuilder(this); + return new MultiTermVectorsRequestBuilder(this, MultiTermVectorsAction.INSTANCE); } @Override @@ -488,12 +722,12 @@ public abstract class AbstractClient implements Client { @Override public PercolateRequestBuilder preparePercolate() { - return new PercolateRequestBuilder(this); + return new PercolateRequestBuilder(this, PercolateAction.INSTANCE); } @Override public MultiPercolateRequestBuilder prepareMultiPercolate() { - return new MultiPercolateRequestBuilder(this); + return new MultiPercolateRequestBuilder(this, MultiPercolateAction.INSTANCE); } @Override @@ -508,7 +742,7 @@ public abstract class AbstractClient implements Client { @Override public ExplainRequestBuilder prepareExplain(String index, String type, String id) { - return new ExplainRequestBuilder(this, index, type, id); + return new ExplainRequestBuilder(this, ExplainAction.INSTANCE, index, type, id); } @Override @@ -533,7 +767,7 @@ public abstract class AbstractClient implements Client { @Override public ClearScrollRequestBuilder prepareClearScroll() { - return new ClearScrollRequestBuilder(this); + return new ClearScrollRequestBuilder(this, ClearScrollAction.INSTANCE); } @Override @@ -548,6 +782,838 @@ public abstract class AbstractClient implements Client { @Override public FieldStatsRequestBuilder prepareFieldStats() { - return new FieldStatsRequestBuilder(this); + return new FieldStatsRequestBuilder(this, FieldStatsAction.INSTANCE); + } + + static class Admin implements AdminClient { + + private final ClusterAdmin clusterAdmin; + private final IndicesAdmin indicesAdmin; + + public Admin(ElasticsearchClient client) { + this.clusterAdmin = new ClusterAdmin(client); + this.indicesAdmin = new IndicesAdmin(client); + } + + @Override + public ClusterAdminClient cluster() { + return clusterAdmin; + } + + @Override + public IndicesAdminClient indices() { + return indicesAdmin; + } + } + + static class ClusterAdmin implements ClusterAdminClient { + + private final ElasticsearchClient client; + + public ClusterAdmin(ElasticsearchClient client) { + this.client = client; + } + + @Override + public > ActionFuture execute(Action action, Request request) { + return client.execute(action, request); + } + + @Override + public > void execute(Action action, Request request, ActionListener listener) { + client.execute(action, request, listener); + } + + @Override + public > RequestBuilder prepareExecute(Action action) { + return client.prepareExecute(action); + } + + @Override + public ThreadPool threadPool() { + return client.threadPool(); + } + + @Override + public ActionFuture health(final ClusterHealthRequest request) { + return execute(ClusterHealthAction.INSTANCE, request); + } + + @Override + public void health(final ClusterHealthRequest request, final ActionListener listener) { + execute(ClusterHealthAction.INSTANCE, request, listener); + } + + @Override + public ClusterHealthRequestBuilder prepareHealth(String... indices) { + return new ClusterHealthRequestBuilder(this, ClusterHealthAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture state(final ClusterStateRequest request) { + return execute(ClusterStateAction.INSTANCE, request); + } + + @Override + public void state(final ClusterStateRequest request, final ActionListener listener) { + execute(ClusterStateAction.INSTANCE, request, listener); + } + + @Override + public ClusterStateRequestBuilder prepareState() { + return new ClusterStateRequestBuilder(this, ClusterStateAction.INSTANCE); + } + + @Override + public ActionFuture reroute(final ClusterRerouteRequest request) { + return execute(ClusterRerouteAction.INSTANCE, request); + } + + @Override + public void reroute(final ClusterRerouteRequest request, final ActionListener listener) { + execute(ClusterRerouteAction.INSTANCE, request, listener); + } + + @Override + public ClusterRerouteRequestBuilder prepareReroute() { + return new ClusterRerouteRequestBuilder(this, ClusterRerouteAction.INSTANCE); + } + + @Override + public ActionFuture updateSettings(final ClusterUpdateSettingsRequest request) { + return execute(ClusterUpdateSettingsAction.INSTANCE, request); + } + + @Override + public void updateSettings(final ClusterUpdateSettingsRequest request, final ActionListener listener) { + execute(ClusterUpdateSettingsAction.INSTANCE, request, listener); + } + + @Override + public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() { + return new ClusterUpdateSettingsRequestBuilder(this, ClusterUpdateSettingsAction.INSTANCE); + } + + @Override + public ActionFuture nodesInfo(final NodesInfoRequest request) { + return execute(NodesInfoAction.INSTANCE, request); + } + + @Override + public void nodesInfo(final NodesInfoRequest request, final ActionListener listener) { + execute(NodesInfoAction.INSTANCE, request, listener); + } + + @Override + public NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds) { + return new NodesInfoRequestBuilder(this, NodesInfoAction.INSTANCE).setNodesIds(nodesIds); + } + + @Override + public ActionFuture nodesStats(final NodesStatsRequest request) { + return execute(NodesStatsAction.INSTANCE, request); + } + + @Override + public void nodesStats(final NodesStatsRequest request, final ActionListener listener) { + execute(NodesStatsAction.INSTANCE, request, listener); + } + + @Override + public NodesStatsRequestBuilder prepareNodesStats(String... nodesIds) { + return new NodesStatsRequestBuilder(this, NodesStatsAction.INSTANCE).setNodesIds(nodesIds); + } + + @Override + public ActionFuture clusterStats(ClusterStatsRequest request) { + return execute(ClusterStatsAction.INSTANCE, request); + } + + @Override + public void clusterStats(ClusterStatsRequest request, ActionListener listener) { + execute(ClusterStatsAction.INSTANCE, request, listener); + } + + @Override + public ClusterStatsRequestBuilder prepareClusterStats() { + return new ClusterStatsRequestBuilder(this, ClusterStatsAction.INSTANCE); + } + + @Override + public ActionFuture nodesHotThreads(NodesHotThreadsRequest request) { + return execute(NodesHotThreadsAction.INSTANCE, request); + } + + @Override + public void nodesHotThreads(NodesHotThreadsRequest request, ActionListener listener) { + execute(NodesHotThreadsAction.INSTANCE, request, listener); + } + + @Override + public NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds) { + return new NodesHotThreadsRequestBuilder(this, NodesHotThreadsAction.INSTANCE).setNodesIds(nodesIds); + } + + @Override + public ActionFuture searchShards(final ClusterSearchShardsRequest request) { + return execute(ClusterSearchShardsAction.INSTANCE, request); + } + + @Override + public void searchShards(final ClusterSearchShardsRequest request, final ActionListener listener) { + execute(ClusterSearchShardsAction.INSTANCE, request, listener); + } + + @Override + public ClusterSearchShardsRequestBuilder prepareSearchShards() { + return new ClusterSearchShardsRequestBuilder(this, ClusterSearchShardsAction.INSTANCE); + } + + @Override + public ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices) { + return new ClusterSearchShardsRequestBuilder(this, ClusterSearchShardsAction.INSTANCE).setIndices(indices); + } + + @Override + public PendingClusterTasksRequestBuilder preparePendingClusterTasks() { + return new PendingClusterTasksRequestBuilder(this, PendingClusterTasksAction.INSTANCE); + } + + @Override + public ActionFuture pendingClusterTasks(PendingClusterTasksRequest request) { + return execute(PendingClusterTasksAction.INSTANCE, request); + } + + @Override + public void pendingClusterTasks(PendingClusterTasksRequest request, ActionListener listener) { + execute(PendingClusterTasksAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture putRepository(PutRepositoryRequest request) { + return execute(PutRepositoryAction.INSTANCE, request); + } + + @Override + public void putRepository(PutRepositoryRequest request, ActionListener listener) { + execute(PutRepositoryAction.INSTANCE, request, listener); + } + + @Override + public PutRepositoryRequestBuilder preparePutRepository(String name) { + return new PutRepositoryRequestBuilder(this, PutRepositoryAction.INSTANCE, name); + } + + @Override + public ActionFuture createSnapshot(CreateSnapshotRequest request) { + return execute(CreateSnapshotAction.INSTANCE, request); + } + + @Override + public void createSnapshot(CreateSnapshotRequest request, ActionListener listener) { + execute(CreateSnapshotAction.INSTANCE, request, listener); + } + + @Override + public CreateSnapshotRequestBuilder prepareCreateSnapshot(String repository, String name) { + return new CreateSnapshotRequestBuilder(this, CreateSnapshotAction.INSTANCE, repository, name); + } + + @Override + public ActionFuture getSnapshots(GetSnapshotsRequest request) { + return execute(GetSnapshotsAction.INSTANCE, request); + } + + @Override + public void getSnapshots(GetSnapshotsRequest request, ActionListener listener) { + execute(GetSnapshotsAction.INSTANCE, request, listener); + } + + @Override + public GetSnapshotsRequestBuilder prepareGetSnapshots(String repository) { + return new GetSnapshotsRequestBuilder(this, GetSnapshotsAction.INSTANCE, repository); + } + + + @Override + public ActionFuture deleteSnapshot(DeleteSnapshotRequest request) { + return execute(DeleteSnapshotAction.INSTANCE, request); + } + + @Override + public void deleteSnapshot(DeleteSnapshotRequest request, ActionListener listener) { + execute(DeleteSnapshotAction.INSTANCE, request, listener); + } + + @Override + public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(String repository, String name) { + return new DeleteSnapshotRequestBuilder(this, DeleteSnapshotAction.INSTANCE, repository, name); + } + + + @Override + public ActionFuture deleteRepository(DeleteRepositoryRequest request) { + return execute(DeleteRepositoryAction.INSTANCE, request); + } + + @Override + public void deleteRepository(DeleteRepositoryRequest request, ActionListener listener) { + execute(DeleteRepositoryAction.INSTANCE, request, listener); + } + + @Override + public DeleteRepositoryRequestBuilder prepareDeleteRepository(String name) { + return new DeleteRepositoryRequestBuilder(this, DeleteRepositoryAction.INSTANCE, name); + } + + @Override + public ActionFuture verifyRepository(VerifyRepositoryRequest request) { + return execute(VerifyRepositoryAction.INSTANCE, request); + } + + @Override + public void verifyRepository(VerifyRepositoryRequest request, ActionListener listener) { + execute(VerifyRepositoryAction.INSTANCE, request, listener); + } + + @Override + public VerifyRepositoryRequestBuilder prepareVerifyRepository(String name) { + return new VerifyRepositoryRequestBuilder(this, VerifyRepositoryAction.INSTANCE, name); + } + + @Override + public ActionFuture getRepositories(GetRepositoriesRequest request) { + return execute(GetRepositoriesAction.INSTANCE, request); + } + + @Override + public void getRepositories(GetRepositoriesRequest request, ActionListener listener) { + execute(GetRepositoriesAction.INSTANCE, request, listener); + } + + @Override + public GetRepositoriesRequestBuilder prepareGetRepositories(String... name) { + return new GetRepositoriesRequestBuilder(this, GetRepositoriesAction.INSTANCE, name); + } + + @Override + public ActionFuture restoreSnapshot(RestoreSnapshotRequest request) { + return execute(RestoreSnapshotAction.INSTANCE, request); + } + + @Override + public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener listener) { + execute(RestoreSnapshotAction.INSTANCE, request, listener); + } + + @Override + public RestoreSnapshotRequestBuilder prepareRestoreSnapshot(String repository, String snapshot) { + return new RestoreSnapshotRequestBuilder(this, RestoreSnapshotAction.INSTANCE, repository, snapshot); + } + + + @Override + public ActionFuture snapshotsStatus(SnapshotsStatusRequest request) { + return execute(SnapshotsStatusAction.INSTANCE, request); + } + + @Override + public void snapshotsStatus(SnapshotsStatusRequest request, ActionListener listener) { + execute(SnapshotsStatusAction.INSTANCE, request, listener); + } + + @Override + public SnapshotsStatusRequestBuilder prepareSnapshotStatus(String repository) { + return new SnapshotsStatusRequestBuilder(this, SnapshotsStatusAction.INSTANCE, repository); + } + + @Override + public SnapshotsStatusRequestBuilder prepareSnapshotStatus() { + return new SnapshotsStatusRequestBuilder(this, SnapshotsStatusAction.INSTANCE); + } + } + + static class IndicesAdmin implements IndicesAdminClient { + + private final ElasticsearchClient client; + + public IndicesAdmin(ElasticsearchClient client) { + this.client = client; + } + + @Override + public > ActionFuture execute(Action action, Request request) { + return client.execute(action, request); + } + + @Override + public > void execute(Action action, Request request, ActionListener listener) { + client.execute(action, request, listener); + } + + @Override + public > RequestBuilder prepareExecute(Action action) { + return client.prepareExecute(action); + } + + @Override + public ThreadPool threadPool() { + return client.threadPool(); + } + + @Override + public ActionFuture exists(final IndicesExistsRequest request) { + return execute(IndicesExistsAction.INSTANCE, request); + } + + @Override + public void exists(final IndicesExistsRequest request, final ActionListener listener) { + execute(IndicesExistsAction.INSTANCE, request, listener); + } + + @Override + public IndicesExistsRequestBuilder prepareExists(String... indices) { + return new IndicesExistsRequestBuilder(this, IndicesExistsAction.INSTANCE, indices); + } + + @Override + public ActionFuture typesExists(TypesExistsRequest request) { + return execute(TypesExistsAction.INSTANCE, request); + } + + @Override + public void typesExists(TypesExistsRequest request, ActionListener listener) { + execute(TypesExistsAction.INSTANCE, request, listener); + } + + @Override + public TypesExistsRequestBuilder prepareTypesExists(String... index) { + return new TypesExistsRequestBuilder(this, TypesExistsAction.INSTANCE, index); + } + + @Override + public ActionFuture aliases(final IndicesAliasesRequest request) { + return execute(IndicesAliasesAction.INSTANCE, request); + } + + @Override + public void aliases(final IndicesAliasesRequest request, final ActionListener listener) { + execute(IndicesAliasesAction.INSTANCE, request, listener); + } + + @Override + public IndicesAliasesRequestBuilder prepareAliases() { + return new IndicesAliasesRequestBuilder(this, IndicesAliasesAction.INSTANCE); + } + + @Override + public ActionFuture getAliases(GetAliasesRequest request) { + return execute(GetAliasesAction.INSTANCE, request); + } + + @Override + public void getAliases(GetAliasesRequest request, ActionListener listener) { + execute(GetAliasesAction.INSTANCE, request, listener); + } + + @Override + public GetAliasesRequestBuilder prepareGetAliases(String... aliases) { + return new GetAliasesRequestBuilder(this, GetAliasesAction.INSTANCE, aliases); + } + + @Override + public ActionFuture clearCache(final ClearIndicesCacheRequest request) { + return execute(ClearIndicesCacheAction.INSTANCE, request); + } + + @Override + public void aliasesExist(GetAliasesRequest request, ActionListener listener) { + execute(AliasesExistAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture aliasesExist(GetAliasesRequest request) { + return execute(AliasesExistAction.INSTANCE, request); + } + + @Override + public AliasesExistRequestBuilder prepareAliasesExist(String... aliases) { + return new AliasesExistRequestBuilder(this, AliasesExistAction.INSTANCE, aliases); + } + + @Override + public ActionFuture getIndex(GetIndexRequest request) { + return execute(GetIndexAction.INSTANCE, request); + } + + @Override + public void getIndex(GetIndexRequest request, ActionListener listener) { + execute(GetIndexAction.INSTANCE, request, listener); + } + + @Override + public GetIndexRequestBuilder prepareGetIndex() { + return new GetIndexRequestBuilder(this, GetIndexAction.INSTANCE); + } + + @Override + public void clearCache(final ClearIndicesCacheRequest request, final ActionListener listener) { + execute(ClearIndicesCacheAction.INSTANCE, request, listener); + } + + @Override + public ClearIndicesCacheRequestBuilder prepareClearCache(String... indices) { + return new ClearIndicesCacheRequestBuilder(this, ClearIndicesCacheAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture create(final CreateIndexRequest request) { + return execute(CreateIndexAction.INSTANCE, request); + } + + @Override + public void create(final CreateIndexRequest request, final ActionListener listener) { + execute(CreateIndexAction.INSTANCE, request, listener); + } + + @Override + public CreateIndexRequestBuilder prepareCreate(String index) { + return new CreateIndexRequestBuilder(this, CreateIndexAction.INSTANCE, index); + } + + @Override + public ActionFuture delete(final DeleteIndexRequest request) { + return execute(DeleteIndexAction.INSTANCE, request); + } + + @Override + public void delete(final DeleteIndexRequest request, final ActionListener listener) { + execute(DeleteIndexAction.INSTANCE, request, listener); + } + + @Override + public DeleteIndexRequestBuilder prepareDelete(String... indices) { + return new DeleteIndexRequestBuilder(this, DeleteIndexAction.INSTANCE, indices); + } + + @Override + public ActionFuture close(final CloseIndexRequest request) { + return execute(CloseIndexAction.INSTANCE, request); + } + + @Override + public void close(final CloseIndexRequest request, final ActionListener listener) { + execute(CloseIndexAction.INSTANCE, request, listener); + } + + @Override + public CloseIndexRequestBuilder prepareClose(String... indices) { + return new CloseIndexRequestBuilder(this, CloseIndexAction.INSTANCE, indices); + } + + @Override + public ActionFuture open(final OpenIndexRequest request) { + return execute(OpenIndexAction.INSTANCE, request); + } + + @Override + public void open(final OpenIndexRequest request, final ActionListener listener) { + execute(OpenIndexAction.INSTANCE, request, listener); + } + + @Override + public OpenIndexRequestBuilder prepareOpen(String... indices) { + return new OpenIndexRequestBuilder(this, OpenIndexAction.INSTANCE, indices); + } + + @Override + public ActionFuture flush(final FlushRequest request) { + return execute(FlushAction.INSTANCE, request); + } + + @Override + public void flush(final FlushRequest request, final ActionListener listener) { + execute(FlushAction.INSTANCE, request, listener); + } + + @Override + public FlushRequestBuilder prepareFlush(String... indices) { + return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices); + } + + @Override + public void getMappings(GetMappingsRequest request, ActionListener listener) { + execute(GetMappingsAction.INSTANCE, request, listener); + } + + @Override + public void getFieldMappings(GetFieldMappingsRequest request, ActionListener listener) { + execute(GetFieldMappingsAction.INSTANCE, request, listener); + } + + @Override + public GetMappingsRequestBuilder prepareGetMappings(String... indices) { + return new GetMappingsRequestBuilder(this, GetMappingsAction.INSTANCE, indices); + } + + @Override + public ActionFuture getMappings(GetMappingsRequest request) { + return execute(GetMappingsAction.INSTANCE, request); + } + + @Override + public GetFieldMappingsRequestBuilder prepareGetFieldMappings(String... indices) { + return new GetFieldMappingsRequestBuilder(this, GetFieldMappingsAction.INSTANCE, indices); + } + + @Override + public ActionFuture getFieldMappings(GetFieldMappingsRequest request) { + return execute(GetFieldMappingsAction.INSTANCE, request); + } + + @Override + public ActionFuture putMapping(final PutMappingRequest request) { + return execute(PutMappingAction.INSTANCE, request); + } + + @Override + public void putMapping(final PutMappingRequest request, final ActionListener listener) { + execute(PutMappingAction.INSTANCE, request, listener); + } + + @Override + public PutMappingRequestBuilder preparePutMapping(String... indices) { + return new PutMappingRequestBuilder(this, PutMappingAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture optimize(final OptimizeRequest request) { + return execute(OptimizeAction.INSTANCE, request); + } + + @Override + public void optimize(final OptimizeRequest request, final ActionListener listener) { + execute(OptimizeAction.INSTANCE, request, listener); + } + + @Override + public OptimizeRequestBuilder prepareOptimize(String... indices) { + return new OptimizeRequestBuilder(this, OptimizeAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture refresh(final RefreshRequest request) { + return execute(RefreshAction.INSTANCE, request); + } + + @Override + public void refresh(final RefreshRequest request, final ActionListener listener) { + execute(RefreshAction.INSTANCE, request, listener); + } + + @Override + public RefreshRequestBuilder prepareRefresh(String... indices) { + return new RefreshRequestBuilder(this, RefreshAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture stats(final IndicesStatsRequest request) { + return execute(IndicesStatsAction.INSTANCE, request); + } + + @Override + public void stats(final IndicesStatsRequest request, final ActionListener listener) { + execute(IndicesStatsAction.INSTANCE, request, listener); + } + + @Override + public IndicesStatsRequestBuilder prepareStats(String... indices) { + return new IndicesStatsRequestBuilder(this, IndicesStatsAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture recoveries(final RecoveryRequest request) { + return execute(RecoveryAction.INSTANCE, request); + } + + @Override + public void recoveries(final RecoveryRequest request, final ActionListener listener) { + execute(RecoveryAction.INSTANCE, request, listener); + } + + @Override + public RecoveryRequestBuilder prepareRecoveries(String... indices) { + return new RecoveryRequestBuilder(this, RecoveryAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture segments(final IndicesSegmentsRequest request) { + return execute(IndicesSegmentsAction.INSTANCE, request); + } + + @Override + public void segments(final IndicesSegmentsRequest request, final ActionListener listener) { + execute(IndicesSegmentsAction.INSTANCE, request, listener); + } + + @Override + public IndicesSegmentsRequestBuilder prepareSegments(String... indices) { + return new IndicesSegmentsRequestBuilder(this, IndicesSegmentsAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture updateSettings(final UpdateSettingsRequest request) { + return execute(UpdateSettingsAction.INSTANCE, request); + } + + @Override + public void updateSettings(final UpdateSettingsRequest request, final ActionListener listener) { + execute(UpdateSettingsAction.INSTANCE, request, listener); + } + + @Override + public UpdateSettingsRequestBuilder prepareUpdateSettings(String... indices) { + return new UpdateSettingsRequestBuilder(this, UpdateSettingsAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture analyze(final AnalyzeRequest request) { + return execute(AnalyzeAction.INSTANCE, request); + } + + @Override + public void analyze(final AnalyzeRequest request, final ActionListener listener) { + execute(AnalyzeAction.INSTANCE, request, listener); + } + + @Override + public AnalyzeRequestBuilder prepareAnalyze(@Nullable String index, String text) { + return new AnalyzeRequestBuilder(this, AnalyzeAction.INSTANCE, index, text); + } + + @Override + public AnalyzeRequestBuilder prepareAnalyze(String text) { + return new AnalyzeRequestBuilder(this, AnalyzeAction.INSTANCE, null, text); + } + + @Override + public ActionFuture putTemplate(final PutIndexTemplateRequest request) { + return execute(PutIndexTemplateAction.INSTANCE, request); + } + + @Override + public void putTemplate(final PutIndexTemplateRequest request, final ActionListener listener) { + execute(PutIndexTemplateAction.INSTANCE, request, listener); + } + + @Override + public PutIndexTemplateRequestBuilder preparePutTemplate(String name) { + return new PutIndexTemplateRequestBuilder(this, PutIndexTemplateAction.INSTANCE, name); + } + + @Override + public ActionFuture getTemplates(final GetIndexTemplatesRequest request) { + return execute(GetIndexTemplatesAction.INSTANCE, request); + } + + @Override + public void getTemplates(final GetIndexTemplatesRequest request, final ActionListener listener) { + execute(GetIndexTemplatesAction.INSTANCE, request, listener); + } + + @Override + public GetIndexTemplatesRequestBuilder prepareGetTemplates(String... names) { + return new GetIndexTemplatesRequestBuilder(this, GetIndexTemplatesAction.INSTANCE, names); + } + + @Override + public ActionFuture deleteTemplate(final DeleteIndexTemplateRequest request) { + return execute(DeleteIndexTemplateAction.INSTANCE, request); + } + + @Override + public void deleteTemplate(final DeleteIndexTemplateRequest request, final ActionListener listener) { + execute(DeleteIndexTemplateAction.INSTANCE, request, listener); + } + + @Override + public DeleteIndexTemplateRequestBuilder prepareDeleteTemplate(String name) { + return new DeleteIndexTemplateRequestBuilder(this, DeleteIndexTemplateAction.INSTANCE, name); + } + + @Override + public ActionFuture validateQuery(final ValidateQueryRequest request) { + return execute(ValidateQueryAction.INSTANCE, request); + } + + @Override + public void validateQuery(final ValidateQueryRequest request, final ActionListener listener) { + execute(ValidateQueryAction.INSTANCE, request, listener); + } + + @Override + public ValidateQueryRequestBuilder prepareValidateQuery(String... indices) { + return new ValidateQueryRequestBuilder(this, ValidateQueryAction.INSTANCE).setIndices(indices); + } + + @Override + public ActionFuture putWarmer(PutWarmerRequest request) { + return execute(PutWarmerAction.INSTANCE, request); + } + + @Override + public void putWarmer(PutWarmerRequest request, ActionListener listener) { + execute(PutWarmerAction.INSTANCE, request, listener); + } + + @Override + public PutWarmerRequestBuilder preparePutWarmer(String name) { + return new PutWarmerRequestBuilder(this, PutWarmerAction.INSTANCE, name); + } + + @Override + public ActionFuture deleteWarmer(DeleteWarmerRequest request) { + return execute(DeleteWarmerAction.INSTANCE, request); + } + + @Override + public void deleteWarmer(DeleteWarmerRequest request, ActionListener listener) { + execute(DeleteWarmerAction.INSTANCE, request, listener); + } + + @Override + public DeleteWarmerRequestBuilder prepareDeleteWarmer() { + return new DeleteWarmerRequestBuilder(this, DeleteWarmerAction.INSTANCE); + } + + @Override + public GetWarmersRequestBuilder prepareGetWarmers(String... indices) { + return new GetWarmersRequestBuilder(this, GetWarmersAction.INSTANCE, indices); + } + + @Override + public ActionFuture getWarmers(GetWarmersRequest request) { + return execute(GetWarmersAction.INSTANCE, request); + } + + @Override + public void getWarmers(GetWarmersRequest request, ActionListener listener) { + execute(GetWarmersAction.INSTANCE, request, listener); + } + + @Override + public GetSettingsRequestBuilder prepareGetSettings(String... indices) { + return new GetSettingsRequestBuilder(this, GetSettingsAction.INSTANCE, indices); + } + + @Override + public ActionFuture getSettings(GetSettingsRequest request) { + return execute(GetSettingsAction.INSTANCE, request); + } + + @Override + public void getSettings(GetSettingsRequest request, ActionListener listener) { + execute(GetSettingsAction.INSTANCE, request, listener); + } } } diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java deleted file mode 100644 index f4a6e58e49a..00000000000 --- a/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.support; - -import org.elasticsearch.action.*; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; -import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; -import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; -import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequestBuilder; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; -import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryAction; -import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; -import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequestBuilder; -import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; -import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction; -import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; -import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; -import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequestBuilder; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsAction; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; -import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotAction; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusAction; -import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; -import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequestBuilder; -import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; -import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest; -import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; -import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.client.ClusterAdminClient; - -/** - * - */ -public abstract class AbstractClusterAdminClient implements ClusterAdminClient { - - @Override - public > RequestBuilder prepareExecute(Action action) { - return action.newRequestBuilder(this); - } - - @Override - public ActionFuture health(final ClusterHealthRequest request) { - return execute(ClusterHealthAction.INSTANCE, request); - } - - @Override - public void health(final ClusterHealthRequest request, final ActionListener listener) { - execute(ClusterHealthAction.INSTANCE, request, listener); - } - - @Override - public ClusterHealthRequestBuilder prepareHealth(String... indices) { - return new ClusterHealthRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture state(final ClusterStateRequest request) { - return execute(ClusterStateAction.INSTANCE, request); - } - - @Override - public void state(final ClusterStateRequest request, final ActionListener listener) { - execute(ClusterStateAction.INSTANCE, request, listener); - } - - @Override - public ClusterStateRequestBuilder prepareState() { - return new ClusterStateRequestBuilder(this); - } - - @Override - public ActionFuture reroute(final ClusterRerouteRequest request) { - return execute(ClusterRerouteAction.INSTANCE, request); - } - - @Override - public void reroute(final ClusterRerouteRequest request, final ActionListener listener) { - execute(ClusterRerouteAction.INSTANCE, request, listener); - } - - @Override - public ClusterRerouteRequestBuilder prepareReroute() { - return new ClusterRerouteRequestBuilder(this); - } - - @Override - public ActionFuture updateSettings(final ClusterUpdateSettingsRequest request) { - return execute(ClusterUpdateSettingsAction.INSTANCE, request); - } - - @Override - public void updateSettings(final ClusterUpdateSettingsRequest request, final ActionListener listener) { - execute(ClusterUpdateSettingsAction.INSTANCE, request, listener); - } - - @Override - public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() { - return new ClusterUpdateSettingsRequestBuilder(this); - } - - @Override - public ActionFuture nodesInfo(final NodesInfoRequest request) { - return execute(NodesInfoAction.INSTANCE, request); - } - - @Override - public void nodesInfo(final NodesInfoRequest request, final ActionListener listener) { - execute(NodesInfoAction.INSTANCE, request, listener); - } - - @Override - public NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds) { - return new NodesInfoRequestBuilder(this).setNodesIds(nodesIds); - } - - @Override - public ActionFuture nodesStats(final NodesStatsRequest request) { - return execute(NodesStatsAction.INSTANCE, request); - } - - @Override - public void nodesStats(final NodesStatsRequest request, final ActionListener listener) { - execute(NodesStatsAction.INSTANCE, request, listener); - } - - @Override - public NodesStatsRequestBuilder prepareNodesStats(String... nodesIds) { - return new NodesStatsRequestBuilder(this).setNodesIds(nodesIds); - } - - @Override - public ActionFuture clusterStats(ClusterStatsRequest request) { - return execute(ClusterStatsAction.INSTANCE, request); - } - - @Override - public void clusterStats(ClusterStatsRequest request, ActionListener listener) { - execute(ClusterStatsAction.INSTANCE, request, listener); - } - - @Override - public ClusterStatsRequestBuilder prepareClusterStats() { - return new ClusterStatsRequestBuilder(this); - } - - @Override - public ActionFuture nodesHotThreads(NodesHotThreadsRequest request) { - return execute(NodesHotThreadsAction.INSTANCE, request); - } - - @Override - public void nodesHotThreads(NodesHotThreadsRequest request, ActionListener listener) { - execute(NodesHotThreadsAction.INSTANCE, request, listener); - } - - @Override - public NodesHotThreadsRequestBuilder prepareNodesHotThreads(String... nodesIds) { - return new NodesHotThreadsRequestBuilder(this).setNodesIds(nodesIds); - } - - @Override - public ActionFuture searchShards(final ClusterSearchShardsRequest request) { - return execute(ClusterSearchShardsAction.INSTANCE, request); - } - - @Override - public void searchShards(final ClusterSearchShardsRequest request, final ActionListener listener) { - execute(ClusterSearchShardsAction.INSTANCE, request, listener); - } - - @Override - public ClusterSearchShardsRequestBuilder prepareSearchShards() { - return new ClusterSearchShardsRequestBuilder(this); - } - - @Override - public ClusterSearchShardsRequestBuilder prepareSearchShards(String... indices) { - return new ClusterSearchShardsRequestBuilder(this).setIndices(indices); - } - - @Override - public PendingClusterTasksRequestBuilder preparePendingClusterTasks() { - return new PendingClusterTasksRequestBuilder(this); - } - - @Override - public ActionFuture pendingClusterTasks(PendingClusterTasksRequest request) { - return execute(PendingClusterTasksAction.INSTANCE, request); - } - - @Override - public void pendingClusterTasks(PendingClusterTasksRequest request, ActionListener listener) { - execute(PendingClusterTasksAction.INSTANCE, request, listener); - } - - @Override - public ActionFuture putRepository(PutRepositoryRequest request) { - return execute(PutRepositoryAction.INSTANCE, request); - } - - @Override - public void putRepository(PutRepositoryRequest request, ActionListener listener) { - execute(PutRepositoryAction.INSTANCE, request, listener); - } - - @Override - public PutRepositoryRequestBuilder preparePutRepository(String name) { - return new PutRepositoryRequestBuilder(this, name); - } - - @Override - public ActionFuture createSnapshot(CreateSnapshotRequest request) { - return execute(CreateSnapshotAction.INSTANCE, request); - } - - @Override - public void createSnapshot(CreateSnapshotRequest request, ActionListener listener) { - execute(CreateSnapshotAction.INSTANCE, request, listener); - } - - @Override - public CreateSnapshotRequestBuilder prepareCreateSnapshot(String repository, String name) { - return new CreateSnapshotRequestBuilder(this, repository, name); - } - - @Override - public ActionFuture getSnapshots(GetSnapshotsRequest request) { - return execute(GetSnapshotsAction.INSTANCE, request); - } - - @Override - public void getSnapshots(GetSnapshotsRequest request, ActionListener listener) { - execute(GetSnapshotsAction.INSTANCE, request, listener); - } - - @Override - public GetSnapshotsRequestBuilder prepareGetSnapshots(String repository) { - return new GetSnapshotsRequestBuilder(this, repository); - } - - - @Override - public ActionFuture deleteSnapshot(DeleteSnapshotRequest request) { - return execute(DeleteSnapshotAction.INSTANCE, request); - } - - @Override - public void deleteSnapshot(DeleteSnapshotRequest request, ActionListener listener) { - execute(DeleteSnapshotAction.INSTANCE, request, listener); - } - - @Override - public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(String repository, String name) { - return new DeleteSnapshotRequestBuilder(this, repository, name); - } - - - @Override - public ActionFuture deleteRepository(DeleteRepositoryRequest request) { - return execute(DeleteRepositoryAction.INSTANCE, request); - } - - @Override - public void deleteRepository(DeleteRepositoryRequest request, ActionListener listener) { - execute(DeleteRepositoryAction.INSTANCE, request, listener); - } - - @Override - public DeleteRepositoryRequestBuilder prepareDeleteRepository(String name) { - return new DeleteRepositoryRequestBuilder(this, name); - } - - @Override - public ActionFuture verifyRepository(VerifyRepositoryRequest request) { - return execute(VerifyRepositoryAction.INSTANCE, request); - } - - @Override - public void verifyRepository(VerifyRepositoryRequest request, ActionListener listener) { - execute(VerifyRepositoryAction.INSTANCE, request, listener); - } - - @Override - public VerifyRepositoryRequestBuilder prepareVerifyRepository(String name) { - return new VerifyRepositoryRequestBuilder(this, name); - } - - @Override - public ActionFuture getRepositories(GetRepositoriesRequest request) { - return execute(GetRepositoriesAction.INSTANCE, request); - } - - @Override - public void getRepositories(GetRepositoriesRequest request, ActionListener listener) { - execute(GetRepositoriesAction.INSTANCE, request, listener); - } - - @Override - public GetRepositoriesRequestBuilder prepareGetRepositories(String... name) { - return new GetRepositoriesRequestBuilder(this, name); - } - - @Override - public ActionFuture restoreSnapshot(RestoreSnapshotRequest request) { - return execute(RestoreSnapshotAction.INSTANCE, request); - } - - @Override - public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener listener) { - execute(RestoreSnapshotAction.INSTANCE, request, listener); - } - - @Override - public RestoreSnapshotRequestBuilder prepareRestoreSnapshot(String repository, String snapshot) { - return new RestoreSnapshotRequestBuilder(this, repository, snapshot); - } - - - @Override - public ActionFuture snapshotsStatus(SnapshotsStatusRequest request) { - return execute(SnapshotsStatusAction.INSTANCE, request); - } - - @Override - public void snapshotsStatus(SnapshotsStatusRequest request, ActionListener listener) { - execute(SnapshotsStatusAction.INSTANCE, request, listener); - } - - @Override - public SnapshotsStatusRequestBuilder prepareSnapshotStatus(String repository) { - return new SnapshotsStatusRequestBuilder(this, repository); - } - - @Override - public SnapshotsStatusRequestBuilder prepareSnapshotStatus() { - return new SnapshotsStatusRequestBuilder(this); - } -} diff --git a/src/main/java/org/elasticsearch/client/support/AbstractIndicesAdminClient.java b/src/main/java/org/elasticsearch/client/support/AbstractIndicesAdminClient.java deleted file mode 100644 index 02f8a015170..00000000000 --- a/src/main/java/org/elasticsearch/client/support/AbstractIndicesAdminClient.java +++ /dev/null @@ -1,602 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.support; - -import org.elasticsearch.action.*; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; -import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction; -import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder; -import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; -import org.elasticsearch.action.admin.indices.close.CloseIndexAction; -import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; -import org.elasticsearch.action.admin.indices.create.CreateIndexAction; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; -import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsAction; -import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; -import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequestBuilder; -import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; -import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction; -import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequest; -import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder; -import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; -import org.elasticsearch.action.admin.indices.flush.FlushAction; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.get.GetIndexAction; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.mapping.get.*; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; -import org.elasticsearch.action.admin.indices.open.OpenIndexAction; -import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; -import org.elasticsearch.action.admin.indices.open.OpenIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; -import org.elasticsearch.action.admin.indices.optimize.OptimizeAction; -import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; -import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder; -import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse; -import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; -import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; -import org.elasticsearch.action.admin.indices.recovery.RecoveryRequestBuilder; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshAction; -import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; -import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequestBuilder; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; -import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; -import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; -import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; -import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequest; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerRequestBuilder; -import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder; -import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerAction; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequest; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerRequestBuilder; -import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse; -import org.elasticsearch.client.IndicesAdminClient; -import org.elasticsearch.common.Nullable; - -/** - * - */ -public abstract class AbstractIndicesAdminClient implements IndicesAdminClient { - - @Override - public > RequestBuilder prepareExecute(final Action action) { - return action.newRequestBuilder(this); - } - - @Override - public ActionFuture exists(final IndicesExistsRequest request) { - return execute(IndicesExistsAction.INSTANCE, request); - } - - @Override - public void exists(final IndicesExistsRequest request, final ActionListener listener) { - execute(IndicesExistsAction.INSTANCE, request, listener); - } - - @Override - public IndicesExistsRequestBuilder prepareExists(String... indices) { - return new IndicesExistsRequestBuilder(this, indices); - } - - @Override - public ActionFuture typesExists(TypesExistsRequest request) { - return execute(TypesExistsAction.INSTANCE, request); - } - - @Override - public void typesExists(TypesExistsRequest request, ActionListener listener) { - execute(TypesExistsAction.INSTANCE, request, listener); - } - - @Override - public TypesExistsRequestBuilder prepareTypesExists(String... index) { - return new TypesExistsRequestBuilder(this, index); - } - - @Override - public ActionFuture aliases(final IndicesAliasesRequest request) { - return execute(IndicesAliasesAction.INSTANCE, request); - } - - @Override - public void aliases(final IndicesAliasesRequest request, final ActionListener listener) { - execute(IndicesAliasesAction.INSTANCE, request, listener); - } - - @Override - public IndicesAliasesRequestBuilder prepareAliases() { - return new IndicesAliasesRequestBuilder(this); - } - - @Override - public ActionFuture getAliases(GetAliasesRequest request) { - return execute(GetAliasesAction.INSTANCE, request); - } - - @Override - public void getAliases(GetAliasesRequest request, ActionListener listener) { - execute(GetAliasesAction.INSTANCE, request, listener); - } - - @Override - public GetAliasesRequestBuilder prepareGetAliases(String... aliases) { - return new GetAliasesRequestBuilder(this, aliases); - } - - @Override - public ActionFuture clearCache(final ClearIndicesCacheRequest request) { - return execute(ClearIndicesCacheAction.INSTANCE, request); - } - - @Override - public void aliasesExist(GetAliasesRequest request, ActionListener listener) { - execute(AliasesExistAction.INSTANCE, request, listener); - } - - @Override - public ActionFuture aliasesExist(GetAliasesRequest request) { - return execute(AliasesExistAction.INSTANCE, request); - } - - @Override - public AliasesExistRequestBuilder prepareAliasesExist(String... aliases) { - return new AliasesExistRequestBuilder(this, aliases); - } - - @Override - public ActionFuture getIndex(GetIndexRequest request) { - return execute(GetIndexAction.INSTANCE, request); - } - - @Override - public void getIndex(GetIndexRequest request, ActionListener listener) { - execute(GetIndexAction.INSTANCE, request, listener); - } - - @Override - public GetIndexRequestBuilder prepareGetIndex() { - return new GetIndexRequestBuilder(this); - } - - @Override - public void clearCache(final ClearIndicesCacheRequest request, final ActionListener listener) { - execute(ClearIndicesCacheAction.INSTANCE, request, listener); - } - - @Override - public ClearIndicesCacheRequestBuilder prepareClearCache(String... indices) { - return new ClearIndicesCacheRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture create(final CreateIndexRequest request) { - return execute(CreateIndexAction.INSTANCE, request); - } - - @Override - public void create(final CreateIndexRequest request, final ActionListener listener) { - execute(CreateIndexAction.INSTANCE, request, listener); - } - - @Override - public CreateIndexRequestBuilder prepareCreate(String index) { - return new CreateIndexRequestBuilder(this, index); - } - - @Override - public ActionFuture delete(final DeleteIndexRequest request) { - return execute(DeleteIndexAction.INSTANCE, request); - } - - @Override - public void delete(final DeleteIndexRequest request, final ActionListener listener) { - execute(DeleteIndexAction.INSTANCE, request, listener); - } - - @Override - public DeleteIndexRequestBuilder prepareDelete(String... indices) { - return new DeleteIndexRequestBuilder(this, indices); - } - - @Override - public ActionFuture close(final CloseIndexRequest request) { - return execute(CloseIndexAction.INSTANCE, request); - } - - @Override - public void close(final CloseIndexRequest request, final ActionListener listener) { - execute(CloseIndexAction.INSTANCE, request, listener); - } - - @Override - public CloseIndexRequestBuilder prepareClose(String... indices) { - return new CloseIndexRequestBuilder(this, indices); - } - - @Override - public ActionFuture open(final OpenIndexRequest request) { - return execute(OpenIndexAction.INSTANCE, request); - } - - @Override - public void open(final OpenIndexRequest request, final ActionListener listener) { - execute(OpenIndexAction.INSTANCE, request, listener); - } - - @Override - public OpenIndexRequestBuilder prepareOpen(String... indices) { - return new OpenIndexRequestBuilder(this, indices); - } - - @Override - public ActionFuture flush(final FlushRequest request) { - return execute(FlushAction.INSTANCE, request); - } - - @Override - public void flush(final FlushRequest request, final ActionListener listener) { - execute(FlushAction.INSTANCE, request, listener); - } - - @Override - public FlushRequestBuilder prepareFlush(String... indices) { - return new FlushRequestBuilder(this).setIndices(indices); - } - - @Override - public void getMappings(GetMappingsRequest request, ActionListener listener) { - execute(GetMappingsAction.INSTANCE, request, listener); - } - - @Override - public void getFieldMappings(GetFieldMappingsRequest request, ActionListener listener) { - execute(GetFieldMappingsAction.INSTANCE, request, listener); - } - - @Override - public GetMappingsRequestBuilder prepareGetMappings(String... indices) { - return new GetMappingsRequestBuilder(this, indices); - } - - @Override - public ActionFuture getMappings(GetMappingsRequest request) { - return execute(GetMappingsAction.INSTANCE, request); - } - - @Override - public GetFieldMappingsRequestBuilder prepareGetFieldMappings(String... indices) { - return new GetFieldMappingsRequestBuilder(this, indices); - } - - @Override - public ActionFuture getFieldMappings(GetFieldMappingsRequest request) { - return execute(GetFieldMappingsAction.INSTANCE, request); - } - - @Override - public ActionFuture putMapping(final PutMappingRequest request) { - return execute(PutMappingAction.INSTANCE, request); - } - - @Override - public void putMapping(final PutMappingRequest request, final ActionListener listener) { - execute(PutMappingAction.INSTANCE, request, listener); - } - - @Override - public PutMappingRequestBuilder preparePutMapping(String... indices) { - return new PutMappingRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture optimize(final OptimizeRequest request) { - return execute(OptimizeAction.INSTANCE, request); - } - - @Override - public void optimize(final OptimizeRequest request, final ActionListener listener) { - execute(OptimizeAction.INSTANCE, request, listener); - } - - @Override - public OptimizeRequestBuilder prepareOptimize(String... indices) { - return new OptimizeRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture refresh(final RefreshRequest request) { - return execute(RefreshAction.INSTANCE, request); - } - - @Override - public void refresh(final RefreshRequest request, final ActionListener listener) { - execute(RefreshAction.INSTANCE, request, listener); - } - - @Override - public RefreshRequestBuilder prepareRefresh(String... indices) { - return new RefreshRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture stats(final IndicesStatsRequest request) { - return execute(IndicesStatsAction.INSTANCE, request); - } - - @Override - public void stats(final IndicesStatsRequest request, final ActionListener listener) { - execute(IndicesStatsAction.INSTANCE, request, listener); - } - - @Override - public IndicesStatsRequestBuilder prepareStats(String... indices) { - return new IndicesStatsRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture recoveries(final RecoveryRequest request) { - return execute(RecoveryAction.INSTANCE, request); - } - - @Override - public void recoveries(final RecoveryRequest request, final ActionListener listener) { - execute(RecoveryAction.INSTANCE, request, listener); - } - - @Override - public RecoveryRequestBuilder prepareRecoveries(String... indices) { - return new RecoveryRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture segments(final IndicesSegmentsRequest request) { - return execute(IndicesSegmentsAction.INSTANCE, request); - } - - @Override - public void segments(final IndicesSegmentsRequest request, final ActionListener listener) { - execute(IndicesSegmentsAction.INSTANCE, request, listener); - } - - @Override - public IndicesSegmentsRequestBuilder prepareSegments(String... indices) { - return new IndicesSegmentsRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture updateSettings(final UpdateSettingsRequest request) { - return execute(UpdateSettingsAction.INSTANCE, request); - } - - @Override - public void updateSettings(final UpdateSettingsRequest request, final ActionListener listener) { - execute(UpdateSettingsAction.INSTANCE, request, listener); - } - - @Override - public UpdateSettingsRequestBuilder prepareUpdateSettings(String... indices) { - return new UpdateSettingsRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture analyze(final AnalyzeRequest request) { - return execute(AnalyzeAction.INSTANCE, request); - } - - @Override - public void analyze(final AnalyzeRequest request, final ActionListener listener) { - execute(AnalyzeAction.INSTANCE, request, listener); - } - - @Override - public AnalyzeRequestBuilder prepareAnalyze(@Nullable String index, String text) { - return new AnalyzeRequestBuilder(this, index, text); - } - - @Override - public AnalyzeRequestBuilder prepareAnalyze(String text) { - return new AnalyzeRequestBuilder(this, null, text); - } - - @Override - public ActionFuture putTemplate(final PutIndexTemplateRequest request) { - return execute(PutIndexTemplateAction.INSTANCE, request); - } - - @Override - public void putTemplate(final PutIndexTemplateRequest request, final ActionListener listener) { - execute(PutIndexTemplateAction.INSTANCE, request, listener); - } - - @Override - public PutIndexTemplateRequestBuilder preparePutTemplate(String name) { - return new PutIndexTemplateRequestBuilder(this, name); - } - - @Override - public ActionFuture getTemplates(final GetIndexTemplatesRequest request) { - return execute(GetIndexTemplatesAction.INSTANCE, request); - } - - @Override - public void getTemplates(final GetIndexTemplatesRequest request, final ActionListener listener) { - execute(GetIndexTemplatesAction.INSTANCE, request, listener); - } - - @Override - public GetIndexTemplatesRequestBuilder prepareGetTemplates(String... names) { - return new GetIndexTemplatesRequestBuilder(this, names); - } - - @Override - public ActionFuture deleteTemplate(final DeleteIndexTemplateRequest request) { - return execute(DeleteIndexTemplateAction.INSTANCE, request); - } - - @Override - public void deleteTemplate(final DeleteIndexTemplateRequest request, final ActionListener listener) { - execute(DeleteIndexTemplateAction.INSTANCE, request, listener); - } - - @Override - public DeleteIndexTemplateRequestBuilder prepareDeleteTemplate(String name) { - return new DeleteIndexTemplateRequestBuilder(this, name); - } - - @Override - public ActionFuture validateQuery(final ValidateQueryRequest request) { - return execute(ValidateQueryAction.INSTANCE, request); - } - - @Override - public void validateQuery(final ValidateQueryRequest request, final ActionListener listener) { - execute(ValidateQueryAction.INSTANCE, request, listener); - } - - @Override - public ValidateQueryRequestBuilder prepareValidateQuery(String... indices) { - return new ValidateQueryRequestBuilder(this).setIndices(indices); - } - - @Override - public ActionFuture putWarmer(PutWarmerRequest request) { - return execute(PutWarmerAction.INSTANCE, request); - } - - @Override - public void putWarmer(PutWarmerRequest request, ActionListener listener) { - execute(PutWarmerAction.INSTANCE, request, listener); - } - - @Override - public PutWarmerRequestBuilder preparePutWarmer(String name) { - return new PutWarmerRequestBuilder(this, name); - } - - @Override - public ActionFuture deleteWarmer(DeleteWarmerRequest request) { - return execute(DeleteWarmerAction.INSTANCE, request); - } - - @Override - public void deleteWarmer(DeleteWarmerRequest request, ActionListener listener) { - execute(DeleteWarmerAction.INSTANCE, request, listener); - } - - @Override - public DeleteWarmerRequestBuilder prepareDeleteWarmer() { - return new DeleteWarmerRequestBuilder(this); - } - - @Override - public GetWarmersRequestBuilder prepareGetWarmers(String... indices) { - return new GetWarmersRequestBuilder(this, indices); - } - - @Override - public ActionFuture getWarmers(GetWarmersRequest request) { - return execute(GetWarmersAction.INSTANCE, request); - } - - @Override - public void getWarmers(GetWarmersRequest request, ActionListener listener) { - execute(GetWarmersAction.INSTANCE, request, listener); - } - - @Override - public GetSettingsRequestBuilder prepareGetSettings(String... indices) { - return new GetSettingsRequestBuilder(this, indices); - } - - @Override - public ActionFuture getSettings(GetSettingsRequest request) { - return execute(GetSettingsAction.INSTANCE, request); - } - - @Override - public void getSettings(GetSettingsRequest request, ActionListener listener) { - execute(GetSettingsAction.INSTANCE, request, listener); - } -} diff --git a/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java b/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java index 573715c10f4..895b3d844f6 100644 --- a/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java +++ b/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java @@ -20,10 +20,7 @@ package org.elasticsearch.client.transport; import org.elasticsearch.client.support.Headers; -import org.elasticsearch.client.transport.support.InternalTransportAdminClient; -import org.elasticsearch.client.transport.support.InternalTransportClient; -import org.elasticsearch.client.transport.support.InternalTransportClusterAdminClient; -import org.elasticsearch.client.transport.support.InternalTransportIndicesAdminClient; +import org.elasticsearch.client.transport.support.TransportProxyClient; import org.elasticsearch.common.inject.AbstractModule; /** @@ -34,10 +31,7 @@ public class ClientTransportModule extends AbstractModule { @Override protected void configure() { bind(Headers.class).asEagerSingleton(); - bind(InternalTransportClient.class).asEagerSingleton(); - bind(InternalTransportAdminClient.class).asEagerSingleton(); - bind(InternalTransportIndicesAdminClient.class).asEagerSingleton(); - bind(InternalTransportClusterAdminClient.class).asEagerSingleton(); + bind(TransportProxyClient.class).asEagerSingleton(); bind(TransportClientNodesService.class).asEagerSingleton(); } } diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 6dd30b02af3..c7866e6b1f9 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -20,40 +20,12 @@ package org.elasticsearch.client.transport; import com.google.common.collect.ImmutableList; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.*; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.count.CountRequest; -import org.elasticsearch.action.count.CountResponse; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.explain.ExplainRequest; -import org.elasticsearch.action.explain.ExplainResponse; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.get.MultiGetRequest; -import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.mlt.MoreLikeThisRequest; -import org.elasticsearch.action.percolate.PercolateRequest; -import org.elasticsearch.action.percolate.PercolateResponse; -import org.elasticsearch.action.search.*; -import org.elasticsearch.action.suggest.SuggestRequest; -import org.elasticsearch.action.suggest.SuggestResponse; -import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; -import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; -import org.elasticsearch.action.termvectors.TermVectorsRequest; -import org.elasticsearch.action.termvectors.TermVectorsResponse; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.client.AdminClient; -import org.elasticsearch.client.Client; import org.elasticsearch.client.support.AbstractClient; -import org.elasticsearch.client.transport.support.InternalTransportClient; +import org.elasticsearch.client.support.Headers; +import org.elasticsearch.client.transport.support.TransportProxyClient; import org.elasticsearch.cluster.ClusterNameModule; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.Tuple; @@ -93,100 +65,99 @@ import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilde */ public class TransportClient extends AbstractClient { - private static final String CLIENT_TYPE = "transport"; + /** + * Handy method ot create a {@link org.elasticsearch.client.transport.TransportClient.Builder}. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder used to create an instance of the transport client. + */ + public static class Builder { + + private Settings settings = ImmutableSettings.EMPTY; + private boolean loadConfigSettings = true; + + /** + * The settings to configure the transport client with. + */ + public Builder settings(Settings.Builder settings) { + return settings(settings.build()); + } + + /** + * The settings to configure the transport client with. + */ + public Builder settings(Settings settings) { + this.settings = settings; + return this; + } + + /** + * Should the transport client load file based configuration automatically or not (and rely + * only on the provided settings), defaults to true. + */ + public Builder loadConfigSettings(boolean loadConfigSettings) { + this.loadConfigSettings = loadConfigSettings; + return this; + } + + /** + * Builds a new instance of the transport client. + */ + public TransportClient build() { + Tuple tuple = InternalSettingsPreparer.prepareSettings(settings, loadConfigSettings); + Settings settings = settingsBuilder() + .put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval + .put(tuple.v1()) + .put("network.server", false) + .put("node.client", true) + .put(CLIENT_TYPE_SETTING, CLIENT_TYPE) + .build(); + Environment environment = tuple.v2(); + + PluginsService pluginsService = new PluginsService(settings, tuple.v2()); + this.settings = pluginsService.updatedSettings(); + + Version version = Version.CURRENT; + + CompressorFactory.configure(this.settings); + + ModulesBuilder modules = new ModulesBuilder(); + modules.add(new Version.Module(version)); + modules.add(new PluginsModule(this.settings, pluginsService)); + modules.add(new EnvironmentModule(environment)); + modules.add(new SettingsModule(this.settings)); + modules.add(new NetworkModule()); + modules.add(new ClusterNameModule(this.settings)); + modules.add(new ThreadPoolModule(this.settings)); + modules.add(new TransportSearchModule()); + modules.add(new TransportModule(this.settings)); + modules.add(new ActionModule(true)); + modules.add(new ClientTransportModule()); + modules.add(new CircuitBreakerModule(this.settings)); + + Injector injector = modules.createInjector(); + injector.getInstance(TransportService.class).start(); + + return new TransportClient(injector); + } + } + + public static final String CLIENT_TYPE = "transport"; final Injector injector; - private final Settings settings; - private final Environment environment; - private final PluginsService pluginsService; private final TransportClientNodesService nodesService; - private final InternalTransportClient internalClient; - - /** - * Constructs a new transport client with settings loaded either from the classpath or the file system (the - * elasticsearch.(yml|json) files optionally prefixed with config/). - */ - public TransportClient() { - this(ImmutableSettings.Builder.EMPTY_SETTINGS, true); - } - - /** - * Constructs a new transport client with explicit settings and settings loaded either from the classpath or the file - * system (the elasticsearch.(yml|json) files optionally prefixed with config/). - */ - public TransportClient(Settings settings) { - this(settings, true); - } - - /** - * Constructs a new transport client with explicit settings and settings loaded either from the classpath or the file - * system (the elasticsearch.(yml|json) files optionally prefixed with config/). - */ - public TransportClient(Settings.Builder settings) { - this(settings.build(), true); - } - - /** - * Constructs a new transport client with the provided settings and the ability to control if settings will - * be loaded from the classpath / file system (the elasticsearch.(yml|json) files optionally prefixed with - * config/). - * - * @param settings The explicit settings. - * @param loadConfigSettings true if settings should be loaded from the classpath/file system. - * @throws org.elasticsearch.ElasticsearchException - */ - public TransportClient(Settings.Builder settings, boolean loadConfigSettings) { - this(settings.build(), loadConfigSettings); - } - - /** - * Constructs a new transport client with the provided settings and the ability to control if settings will - * be loaded from the classpath / file system (the elasticsearch.(yml|json) files optionally prefixed with - * config/). - * - * @param pSettings The explicit settings. - * @param loadConfigSettings true if settings should be loaded from the classpath/file system. - * @throws org.elasticsearch.ElasticsearchException - */ - public TransportClient(Settings pSettings, boolean loadConfigSettings) { - Tuple tuple = InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings); - Settings settings = settingsBuilder() - .put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval - .put(tuple.v1()) - .put("network.server", false) - .put("node.client", true) - .put(CLIENT_TYPE_SETTING, CLIENT_TYPE) - .build(); - this.environment = tuple.v2(); - - this.pluginsService = new PluginsService(settings, tuple.v2()); - this.settings = pluginsService.updatedSettings(); - - Version version = Version.CURRENT; - - CompressorFactory.configure(this.settings); - - ModulesBuilder modules = new ModulesBuilder(); - modules.add(new Version.Module(version)); - modules.add(new PluginsModule(this.settings, pluginsService)); - modules.add(new EnvironmentModule(environment)); - modules.add(new SettingsModule(this.settings)); - modules.add(new NetworkModule()); - modules.add(new ClusterNameModule(this.settings)); - modules.add(new ThreadPoolModule(this.settings)); - modules.add(new TransportSearchModule()); - modules.add(new TransportModule(this.settings)); - modules.add(new ActionModule(true)); - modules.add(new ClientTransportModule()); - modules.add(new CircuitBreakerModule(this.settings)); - - injector = modules.createInjector(); - - injector.getInstance(TransportService.class).start(); + private final TransportProxyClient proxy; + private TransportClient(Injector injector) { + super(injector.getInstance(Settings.class), injector.getInstance(ThreadPool.class), injector.getInstance(Headers.class)); + this.injector = injector; nodesService = injector.getInstance(TransportClientNodesService.class); - internalClient = injector.getInstance(InternalTransportClient.class); + proxy = injector.getInstance(TransportProxyClient.class); } TransportClientNodesService nodeService() { @@ -273,7 +244,7 @@ public class TransportClient extends AbstractClient { // ignore, might not be bounded } - for (Class plugin : pluginsService.services()) { + for (Class plugin : injector.getInstance(PluginsService.class).services()) { injector.getInstance(plugin).close(); } try { @@ -286,187 +257,7 @@ public class TransportClient extends AbstractClient { } @Override - public Settings settings() { - return this.settings; - } - - @Override - public ThreadPool threadPool() { - return internalClient.threadPool(); - } - - @Override - public AdminClient admin() { - return internalClient.admin(); - } - - @Override - public > ActionFuture execute(Action action, Request request) { - return internalClient.execute(action, request); - } - - @Override - public > void execute(Action action, Request request, ActionListener listener) { - internalClient.execute(action, request, listener); - } - - @Override - public ActionFuture index(IndexRequest request) { - return internalClient.index(request); - } - - @Override - public void index(IndexRequest request, ActionListener listener) { - internalClient.index(request, listener); - } - - @Override - public ActionFuture update(UpdateRequest request) { - return internalClient.update(request); - } - - @Override - public void update(UpdateRequest request, ActionListener listener) { - internalClient.update(request, listener); - } - - @Override - public ActionFuture delete(DeleteRequest request) { - return internalClient.delete(request); - } - - @Override - public void delete(DeleteRequest request, ActionListener listener) { - internalClient.delete(request, listener); - } - - @Override - public ActionFuture bulk(BulkRequest request) { - return internalClient.bulk(request); - } - - @Override - public void bulk(BulkRequest request, ActionListener listener) { - internalClient.bulk(request, listener); - } - - @Override - public ActionFuture get(GetRequest request) { - return internalClient.get(request); - } - - @Override - public void get(GetRequest request, ActionListener listener) { - internalClient.get(request, listener); - } - - @Override - public ActionFuture multiGet(MultiGetRequest request) { - return internalClient.multiGet(request); - } - - @Override - public void multiGet(MultiGetRequest request, ActionListener listener) { - internalClient.multiGet(request, listener); - } - - @Override - public ActionFuture count(CountRequest request) { - return internalClient.count(request); - } - - @Override - public void count(CountRequest request, ActionListener listener) { - internalClient.count(request, listener); - } - - @Override - public ActionFuture suggest(SuggestRequest request) { - return internalClient.suggest(request); - } - - @Override - public void suggest(SuggestRequest request, ActionListener listener) { - internalClient.suggest(request, listener); - } - - @Override - public ActionFuture search(SearchRequest request) { - return internalClient.search(request); - } - - @Override - public void search(SearchRequest request, ActionListener listener) { - internalClient.search(request, listener); - } - - @Override - public ActionFuture searchScroll(SearchScrollRequest request) { - return internalClient.searchScroll(request); - } - - @Override - public void searchScroll(SearchScrollRequest request, ActionListener listener) { - internalClient.searchScroll(request, listener); - } - - @Override - public ActionFuture multiSearch(MultiSearchRequest request) { - return internalClient.multiSearch(request); - } - - @Override - public void multiSearch(MultiSearchRequest request, ActionListener listener) { - internalClient.multiSearch(request, listener); - } - - @Override - public ActionFuture moreLikeThis(MoreLikeThisRequest request) { - return internalClient.moreLikeThis(request); - } - - @Override - public void moreLikeThis(MoreLikeThisRequest request, ActionListener listener) { - internalClient.moreLikeThis(request, listener); - } - - @Override - public ActionFuture termVectors(TermVectorsRequest request) { - return internalClient.termVectors(request); - } - - @Override - public void termVectors(TermVectorsRequest request, ActionListener listener) { - internalClient.termVectors(request, listener); - } - - @Override - public ActionFuture multiTermVectors(final MultiTermVectorsRequest request) { - return internalClient.multiTermVectors(request); - } - - @Override - public void multiTermVectors(final MultiTermVectorsRequest request, final ActionListener listener) { - internalClient.multiTermVectors(request, listener); - } - - @Override - public ActionFuture percolate(PercolateRequest request) { - return internalClient.percolate(request); - } - - @Override - public void percolate(PercolateRequest request, ActionListener listener) { - internalClient.percolate(request, listener); - } - - @Override - public ActionFuture explain(ExplainRequest request) { - return internalClient.explain(request); - } - - @Override - public void explain(ExplainRequest request, ActionListener listener) { - internalClient.explain(request, listener); + protected > void doExecute(Action action, Request request, ActionListener listener) { + proxy.execute(action, request, listener); } } diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index e2cf962f65a..44d6e0d7851 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -38,11 +37,9 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -199,7 +196,7 @@ public class TransportClientNodesService extends AbstractComponent { ImmutableList nodes = this.nodes; ensureNodesAreAvailable(nodes); int index = getNodeNumber(); - RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index, threadPool, logger); + RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index); DiscoveryNode node = nodes.get((index) % nodes.size()); try { callback.doWithNode(node, retryListener); @@ -213,20 +210,15 @@ public class TransportClientNodesService extends AbstractComponent { private final NodeListenerCallback callback; private final ActionListener listener; private final ImmutableList nodes; - private final ESLogger logger; private final int index; - private ThreadPool threadPool; private volatile int i; - public RetryListener(NodeListenerCallback callback, ActionListener listener, ImmutableList nodes, - int index, ThreadPool threadPool, ESLogger logger) { + public RetryListener(NodeListenerCallback callback, ActionListener listener, ImmutableList nodes, int index) { this.callback = callback; this.listener = listener; this.nodes = nodes; this.index = index; - this.threadPool = threadPool; - this.logger = logger; } @Override @@ -239,38 +231,21 @@ public class TransportClientNodesService extends AbstractComponent { if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) { int i = ++this.i; if (i >= nodes.size()) { - runFailureInListenerThreadPool(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e)); + listener.onFailure(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e)); } else { try { callback.doWithNode(nodes.get((index + i) % nodes.size()), this); } catch(final Throwable t) { // this exception can't come from the TransportService as it doesn't throw exceptions at all - runFailureInListenerThreadPool(t); + listener.onFailure(t); } } } else { - runFailureInListenerThreadPool(e); + listener.onFailure(e); } } - // need to ensure to not block the netty I/O thread, in case of retry due to the node sampling - private void runFailureInListenerThreadPool(final Throwable t) { - threadPool.executor(ThreadPool.Names.LISTENER).execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - listener.onFailure(t); - } - @Override - public void onFailure(Throwable t) { - if (logger.isDebugEnabled()) { - logger.debug("Could not execute failure listener: [{}]", t, t.getMessage()); - } else { - logger.error("Could not execute failure listener: [{}]", t.getMessage()); - } - } - }); - } } public void close() { @@ -505,7 +480,7 @@ public class TransportClientNodesService extends AbstractComponent { } } - public static interface NodeListenerCallback { + public interface NodeListenerCallback { void doWithNode(DiscoveryNode node, ActionListener listener); } diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java deleted file mode 100644 index d2f51160d8b..00000000000 --- a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportAdminClient.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport.support; - -import org.elasticsearch.client.AdminClient; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.client.IndicesAdminClient; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; - -/** - * - */ -public class InternalTransportAdminClient extends AbstractComponent implements AdminClient { - - private final InternalTransportIndicesAdminClient indicesAdminClient; - - private final InternalTransportClusterAdminClient clusterAdminClient; - - @Inject - public InternalTransportAdminClient(Settings settings, InternalTransportIndicesAdminClient indicesAdminClient, InternalTransportClusterAdminClient clusterAdminClient) { - super(settings); - this.indicesAdminClient = indicesAdminClient; - this.clusterAdminClient = clusterAdminClient; - } - - @Override - public IndicesAdminClient indices() { - return indicesAdminClient; - } - - @Override - public ClusterAdminClient cluster() { - return clusterAdminClient; - } -} diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java deleted file mode 100644 index 11a9959019f..00000000000 --- a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClient.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport.support; - -import com.google.common.collect.ImmutableMap; -import org.elasticsearch.action.*; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.AdminClient; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.support.AbstractClient; -import org.elasticsearch.client.support.Headers; -import org.elasticsearch.client.transport.TransportClientNodesService; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.Map; - -/** - * - */ -public class InternalTransportClient extends AbstractClient { - - private final Settings settings; - private final ThreadPool threadPool; - - private final TransportClientNodesService nodesService; - - private final InternalTransportAdminClient adminClient; - - private final ImmutableMap actions; - - private final Headers headers; - - @Inject - public InternalTransportClient(Settings settings, ThreadPool threadPool, TransportService transportService, - TransportClientNodesService nodesService, InternalTransportAdminClient adminClient, - Map actions, Headers headers) { - this.settings = settings; - this.threadPool = threadPool; - this.nodesService = nodesService; - this.adminClient = adminClient; - this.headers = headers; - MapBuilder actionsBuilder = new MapBuilder<>(); - for (GenericAction action : actions.values()) { - if (action instanceof Action) { - actionsBuilder.put((Action) action, new TransportActionNodeProxy(settings, action, transportService)); - } - } - this.actions = actionsBuilder.immutableMap(); - } - - @Override - public void close() { - // nothing to do here - } - - @Override - public Settings settings() { - return this.settings; - } - - @Override - public ThreadPool threadPool() { - return this.threadPool; - } - - @Override - public AdminClient admin() { - return adminClient; - } - - @SuppressWarnings("unchecked") - @Override - public > ActionFuture execute(final Action action, final Request request) { - PlainActionFuture actionFuture = PlainActionFuture.newFuture(); - execute(action, request, actionFuture); - return actionFuture; - } - - @SuppressWarnings("unchecked") - @Override - public > void execute(final Action action, final Request request, ActionListener listener) { - headers.applyTo(request); - final TransportActionNodeProxy proxy = actions.get(action); - nodesService.execute(new TransportClientNodesService.NodeListenerCallback() { - @Override - public void doWithNode(DiscoveryNode node, ActionListener listener) { - proxy.execute(node, request, listener); - } - }, listener); - } -} diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java b/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java deleted file mode 100644 index 74be67d0009..00000000000 --- a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportClusterAdminClient.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport.support; - -import com.google.common.collect.ImmutableMap; -import org.elasticsearch.action.*; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.client.support.AbstractClusterAdminClient; -import org.elasticsearch.client.support.Headers; -import org.elasticsearch.client.transport.TransportClientNodesService; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.Map; - -/** - * - */ -@SuppressWarnings("unchecked") -public class InternalTransportClusterAdminClient extends AbstractClusterAdminClient implements ClusterAdminClient { - - private final TransportClientNodesService nodesService; - - private final ThreadPool threadPool; - - private final ImmutableMap actions; - - private final Headers headers; - - @Inject - public InternalTransportClusterAdminClient(Settings settings, TransportClientNodesService nodesService, ThreadPool threadPool, TransportService transportService, - Map actions, Headers headers) { - this.nodesService = nodesService; - this.threadPool = threadPool; - this.headers = headers; - MapBuilder actionsBuilder = new MapBuilder<>(); - for (GenericAction action : actions.values()) { - if (action instanceof ClusterAction) { - actionsBuilder.put((ClusterAction) action, new TransportActionNodeProxy(settings, action, transportService)); - } - } - this.actions = actionsBuilder.immutableMap(); - } - - @Override - public ThreadPool threadPool() { - return this.threadPool; - } - - @SuppressWarnings("unchecked") - @Override - public > ActionFuture execute(final Action action, final Request request) { - PlainActionFuture actionFuture = PlainActionFuture.newFuture(); - execute(action, request, actionFuture); - return actionFuture; - } - - @SuppressWarnings("unchecked") - @Override - public > void execute(final Action action, final Request request, final ActionListener listener) { - headers.applyTo(request); - final TransportActionNodeProxy proxy = actions.get(action); - nodesService.execute(new TransportClientNodesService.NodeListenerCallback() { - @Override - public void doWithNode(DiscoveryNode node, ActionListener listener) { - proxy.execute(node, request, listener); - } - }, listener); - } - -} diff --git a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java b/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java similarity index 53% rename from src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java rename to src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java index 8cb63a17c7d..89b3a042450 100644 --- a/src/main/java/org/elasticsearch/client/transport/support/InternalTransportIndicesAdminClient.java +++ b/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java @@ -21,17 +21,11 @@ package org.elasticsearch.client.transport.support; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.*; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.IndicesAdminClient; -import org.elasticsearch.client.support.AbstractIndicesAdminClient; -import org.elasticsearch.client.support.Headers; import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.Map; @@ -39,50 +33,25 @@ import java.util.Map; /** * */ -@SuppressWarnings("unchecked") -public class InternalTransportIndicesAdminClient extends AbstractIndicesAdminClient implements IndicesAdminClient { +public class TransportProxyClient { private final TransportClientNodesService nodesService; - - private final ThreadPool threadPool; - - private final ImmutableMap actions; - - private final Headers headers; + private final ImmutableMap proxies; @Inject - public InternalTransportIndicesAdminClient(Settings settings, TransportClientNodesService nodesService, TransportService transportService, ThreadPool threadPool, - Map actions, Headers headers) { + public TransportProxyClient(Settings settings, TransportService transportService, TransportClientNodesService nodesService, Map actions) { this.nodesService = nodesService; - this.threadPool = threadPool; - this.headers = headers; MapBuilder actionsBuilder = new MapBuilder<>(); for (GenericAction action : actions.values()) { - if (action instanceof IndicesAction) { + if (action instanceof Action) { actionsBuilder.put((Action) action, new TransportActionNodeProxy(settings, action, transportService)); } } - this.actions = actionsBuilder.immutableMap(); + this.proxies = actionsBuilder.immutableMap(); } - @Override - public ThreadPool threadPool() { - return this.threadPool; - } - - @SuppressWarnings("unchecked") - @Override - public > ActionFuture execute(final Action action, final Request request) { - PlainActionFuture actionFuture = PlainActionFuture.newFuture(); - execute(action, request, actionFuture); - return actionFuture; - } - - @SuppressWarnings("unchecked") - @Override - public > void execute(final Action action, final Request request, ActionListener listener) { - headers.applyTo(request); - final TransportActionNodeProxy proxy = actions.get(action); + public > void execute(final Action action, final Request request, ActionListener listener) { + final TransportActionNodeProxy proxy = proxies.get(action); nodesService.execute(new TransportClientNodesService.NodeListenerCallback() { @Override public void doWithNode(DiscoveryNode node, ActionListener listener) { diff --git a/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index 858108ccff5..e3469abde3e 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -105,7 +105,14 @@ public class ClusterChangedEvent { * Returns the indices deleted in this event */ public List indicesDeleted() { - if (previousState == null) { + + // if the new cluster state has a new master then we cannot know if an index which is not in the cluster state + // is actually supposed to be deleted or imported as dangling instead. for example a new master might not have + // the index in its cluster state because it was started with an empty data folder and in this case we want to + // import as dangling. we check here for new master too to be on the safe side in this case. + // norelease because we are not sure this is actually a good solution + // See discussion on https://github.com/elastic/elasticsearch/pull/9952 + if (hasNewMaster() || previousState == null) { return ImmutableList.of(); } if (!metaDataChanged()) { @@ -165,4 +172,23 @@ public class ClusterChangedEvent { public boolean nodesChanged() { return nodesRemoved() || nodesAdded(); } + + /** + * Checks if this cluster state comes from a different master than the previous one. + * This is a workaround for the scenario where a node misses a cluster state that has either + * no master block or state not recovered flag set. In this case we must make sure that + * if an index is missing from the cluster state is not deleted immediately but instead imported + * as dangling. See discussion on https://github.com/elastic/elasticsearch/pull/9952 + */ + private boolean hasNewMaster() { + String oldMaster = previousState().getNodes().masterNodeId(); + String newMaster = state().getNodes().masterNodeId(); + if (oldMaster == null && newMaster == null) { + return false; + } + if (oldMaster == null && newMaster != null) { + return true; + } + return oldMaster.equals(newMaster) == false; + } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java index 4f63d9e00e3..355b3f19875 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -46,6 +47,10 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoveryService; +import org.elasticsearch.discovery.local.LocalDiscovery; +import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import java.io.IOException; import java.util.EnumSet; @@ -54,7 +59,29 @@ import java.util.Locale; import java.util.Map; /** + * Represents the current state of the cluster. * + * The cluster state object is immutable with an + * exception of the {@link RoutingNodes} structure, which is built on demand from the {@link RoutingTable}, + * and cluster state {@link #status}, which is updated during cluster state publishing and applying + * processing. The cluster state can be updated only on the master node. All updates are performed by on a + * single thread and controlled by the {@link InternalClusterService}. After every update the + * {@link DiscoveryService#publish} method publishes new version of the cluster state to all other nodes in the + * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on + * the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish} + * method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The + * publishing mechanism can be overridden by other discovery. + * + * The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state + * differences instead of the entire state on each change. The publishing mechanism should only send differences + * to a node if this node was present in the previous version of the cluster state. If a node is not present was + * not present in the previous version of the cluster state, such node is unlikely to have the previous cluster + * state version and should be sent a complete version. In order to make sure that the differences are applied to + * correct version of the cluster state, each cluster state version update generates {@link #uuid} that uniquely + * identifies this version of the state. This uuid is verified by the {@link ClusterStateDiff#apply} method to + * makes sure that the correct diffs are applied. If uuids don’t match, the {@link ClusterStateDiff#apply} method + * throws the {@link IncompatibleClusterStateVersionException}, which should cause the publishing mechanism to send + * a full version of the cluster state to the node on which this exception was thrown. */ public class ClusterState implements ToXContent, Diffable { diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index fe76d0f3f2b..69bdd2808dc 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -165,6 +165,7 @@ public class IndexMetaData implements Diffable { public static final String SETTING_LEGACY_ROUTING_HASH_FUNCTION = "index.legacy.routing.hash.type"; public static final String SETTING_LEGACY_ROUTING_USE_TYPE = "index.legacy.routing.use_type"; public static final String SETTING_DATA_PATH = "index.data_path"; + public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node"; public static final String INDEX_UUID_NA_VALUE = "_na_"; // hard-coded hash function as of 2.0 diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java deleted file mode 100644 index 96b1a8e4de5..00000000000 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/EvenShardsCountAllocator.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing.allocation.allocator; - -import com.carrotsearch.hppc.ObjectIntOpenHashMap; -import org.elasticsearch.cluster.routing.MutableShardRouting; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; - -import java.util.Arrays; -import java.util.Comparator; -import java.util.Iterator; -import java.util.List; - -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; - -/** - * A {@link ShardsAllocator} that tries to balance shards across nodes in the - * cluster such that each node holds approximatly the same number of shards. The - * allocations algorithm operates on a cluster ie. is index-agnostic. While the - * number of shards per node might be balanced across the cluster a single node - * can hold mulitple shards from a single index such that the shard of an index - * are not necessarily balanced across nodes. Yet, due to high-level - * {@link AllocationDecider decisions} multiple instances of the same shard - * won't be allocated on the same node. - *

- * During {@link #rebalance(RoutingAllocation) re-balancing} the allocator takes - * shards from the most busy nodes and tries to relocate the shards to - * the least busy node until the number of shards per node are equal for all - * nodes in the cluster or until no shards can be relocated anymore. - *

- */ -public class EvenShardsCountAllocator extends AbstractComponent implements ShardsAllocator { - - @Inject - public EvenShardsCountAllocator(Settings settings) { - super(settings); - } - - @Override - public void applyStartedShards(StartedRerouteAllocation allocation) { - } - - @Override - public void applyFailedShards(FailedRerouteAllocation allocation) { - } - - @Override - public boolean allocateUnassigned(RoutingAllocation allocation) { - boolean changed = false; - RoutingNodes routingNodes = allocation.routingNodes(); - /* - * 1. order nodes by the number of shards allocated on them least one first (this takes relocation into account) - * ie. if a shard is relocating the target nodes shard count is incremented. - * 2. iterate over the unassigned shards - * 2a. find the least busy node in the cluster that allows allocation for the current unassigned shard - * 2b. if a node is found add the shard to the node and remove it from the unassigned shards - * 3. iterate over the remaining unassigned shards and try to allocate them on next possible node - */ - // order nodes by number of shards (asc) - RoutingNode[] nodes = sortedNodesLeastToHigh(allocation); - - Iterator unassignedIterator = routingNodes.unassigned().iterator(); - int lastNode = 0; - - while (unassignedIterator.hasNext()) { - MutableShardRouting shard = unassignedIterator.next(); - // do the allocation, finding the least "busy" node - for (int i = 0; i < nodes.length; i++) { - RoutingNode node = nodes[lastNode]; - lastNode++; - if (lastNode == nodes.length) { - lastNode = 0; - } - - Decision decision = allocation.deciders().canAllocate(shard, node, allocation); - if (decision.type() == Decision.Type.YES) { - int numberOfShardsToAllocate = routingNodes.requiredAverageNumberOfShardsPerNode() - node.size(); - if (numberOfShardsToAllocate <= 0) { - continue; - } - - changed = true; - allocation.routingNodes().assign(shard, node.nodeId()); - unassignedIterator.remove(); - break; - } - } - } - - // allocate all the unassigned shards above the average per node. - for (Iterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) { - MutableShardRouting shard = it.next(); - // go over the nodes and try and allocate the remaining ones - for (RoutingNode routingNode : sortedNodesLeastToHigh(allocation)) { - Decision decision = allocation.deciders().canAllocate(shard, routingNode, allocation); - if (decision.type() == Decision.Type.YES) { - changed = true; - allocation.routingNodes().assign(shard, routingNode.nodeId()); - it.remove(); - break; - } - } - } - return changed; - } - - @Override - public boolean rebalance(RoutingAllocation allocation) { - // take shards form busy nodes and move them to less busy nodes - boolean changed = false; - RoutingNode[] sortedNodesLeastToHigh = sortedNodesLeastToHigh(allocation); - if (sortedNodesLeastToHigh.length == 0) { - return false; - } - int lowIndex = 0; - int highIndex = sortedNodesLeastToHigh.length - 1; - boolean relocationPerformed; - do { - relocationPerformed = false; - while (lowIndex != highIndex) { - RoutingNode lowRoutingNode = sortedNodesLeastToHigh[lowIndex]; - RoutingNode highRoutingNode = sortedNodesLeastToHigh[highIndex]; - int averageNumOfShards = allocation.routingNodes().requiredAverageNumberOfShardsPerNode(); - - // only active shards can be removed so must count only active ones. - if (highRoutingNode.numberOfOwningShards() <= averageNumOfShards) { - highIndex--; - continue; - } - - if (lowRoutingNode.size() >= averageNumOfShards) { - lowIndex++; - continue; - } - - // Take a started shard from a "busy" node and move it to less busy node and go on - boolean relocated = false; - List startedShards = highRoutingNode.shardsWithState(STARTED); - for (MutableShardRouting startedShard : startedShards) { - Decision rebalanceDecision = allocation.deciders().canRebalance(startedShard, allocation); - if (rebalanceDecision.type() == Decision.Type.NO) { - continue; - } - - Decision allocateDecision = allocation.deciders().canAllocate(startedShard, lowRoutingNode, allocation); - if (allocateDecision.type() == Decision.Type.YES) { - changed = true; - allocation.routingNodes().assign(new MutableShardRouting(startedShard.index(), startedShard.id(), - lowRoutingNode.nodeId(), startedShard.currentNodeId(), startedShard.restoreSource(), - startedShard.primary(), INITIALIZING, startedShard.version() + 1), lowRoutingNode.nodeId()); - - allocation.routingNodes().relocate(startedShard, lowRoutingNode.nodeId()); - relocated = true; - relocationPerformed = true; - break; - } - } - - if (!relocated) { - highIndex--; - } - } - } while (relocationPerformed); - return changed; - } - - @Override - public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - if (!shardRouting.started()) { - return false; - } - boolean changed = false; - RoutingNode[] sortedNodesLeastToHigh = sortedNodesLeastToHigh(allocation); - if (sortedNodesLeastToHigh.length == 0) { - return false; - } - - for (RoutingNode nodeToCheck : sortedNodesLeastToHigh) { - // check if its the node we are moving from, no sense to check on it - if (nodeToCheck.nodeId().equals(node.nodeId())) { - continue; - } - Decision decision = allocation.deciders().canAllocate(shardRouting, nodeToCheck, allocation); - if (decision.type() == Decision.Type.YES) { - allocation.routingNodes().assign(new MutableShardRouting(shardRouting.index(), shardRouting.id(), - nodeToCheck.nodeId(), shardRouting.currentNodeId(), shardRouting.restoreSource(), - shardRouting.primary(), INITIALIZING, shardRouting.version() + 1), nodeToCheck.nodeId()); - - allocation.routingNodes().relocate(shardRouting, nodeToCheck.nodeId()); - changed = true; - break; - } - } - - return changed; - } - - private RoutingNode[] sortedNodesLeastToHigh(RoutingAllocation allocation) { - // create count per node id, taking into account relocations - final ObjectIntOpenHashMap nodeCounts = new ObjectIntOpenHashMap<>(); - for (RoutingNode node : allocation.routingNodes()) { - for (int i = 0; i < node.size(); i++) { - ShardRouting shardRouting = node.get(i); - String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId(); - nodeCounts.addTo(nodeId, 1); - } - } - RoutingNode[] nodes = allocation.routingNodes().toArray(); - Arrays.sort(nodes, new Comparator() { - @Override - public int compare(RoutingNode o1, RoutingNode o2) { - return nodeCounts.get(o1.nodeId()) - nodeCounts.get(o2.nodeId()); - } - }); - return nodes; - } -} diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java index 38f1e39f8d6..38d8c0a2d7b 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocatorModule.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; @@ -27,7 +29,7 @@ import org.elasticsearch.gateway.GatewayAllocator; */ public class ShardsAllocatorModule extends AbstractModule { - public static final String EVEN_SHARD_COUNT_ALLOCATOR_KEY = "even_shard"; + private static final String EVEN_SHARD_COUNT_ALLOCATOR_KEY = "even_shard"; public static final String BALANCED_ALLOCATOR_KEY = "balanced"; // default @@ -37,13 +39,11 @@ public class ShardsAllocatorModule extends AbstractModule { private Class shardsAllocator; - public ShardsAllocatorModule(Settings settings) { this.settings = settings; shardsAllocator = loadShardsAllocator(settings); } - @Override protected void configure() { if (shardsAllocator == null) { @@ -56,10 +56,13 @@ public class ShardsAllocatorModule extends AbstractModule { private Class loadShardsAllocator(Settings settings) { final Class shardsAllocator; final String type = settings.get(TYPE_KEY, BALANCED_ALLOCATOR_KEY); + if (BALANCED_ALLOCATOR_KEY.equals(type)) { shardsAllocator = BalancedShardsAllocator.class; } else if (EVEN_SHARD_COUNT_ALLOCATOR_KEY.equals(type)) { - shardsAllocator = EvenShardsCountAllocator.class; + final ESLogger logger = Loggers.getLogger(getClass(), settings); + logger.warn("{} allocator has been removed in 2.0 using {} instead", EVEN_SHARD_COUNT_ALLOCATOR_KEY, BALANCED_ALLOCATOR_KEY); + shardsAllocator = BalancedShardsAllocator.class; } else { shardsAllocator = settings.getAsClass(TYPE_KEY, BalancedShardsAllocator.class, "org.elasticsearch.cluster.routing.allocation.allocator.", "Allocator"); diff --git a/src/main/java/org/elasticsearch/common/io/Channels.java b/src/main/java/org/elasticsearch/common/io/Channels.java index a192825e9f8..79e89fcce50 100644 --- a/src/main/java/org/elasticsearch/common/io/Channels.java +++ b/src/main/java/org/elasticsearch/common/io/Channels.java @@ -87,6 +87,22 @@ public final class Channels { return readFromFileChannel(channel, channelPosition, buffer); } + + /** + * read from a file channel into a byte buffer, starting at a certain position. An EOFException will be thrown if you + * attempt to read beyond the end of file. + * + * @param channel channel to read from + * @param channelPosition position to read from + * @param dest destination {@link java.nio.ByteBuffer} to put data in + */ + public static void readFromFileChannelWithEofException(FileChannel channel, long channelPosition, ByteBuffer dest) throws IOException { + int read = readFromFileChannel(channel, channelPosition, dest); + if (read < 0) { + throw new EOFException("read past EOF. pos [" + channelPosition + "] length: [" + dest.limit() + "] end: [" + channel.size() + "]"); + } + } + /** * read from a file channel into a byte buffer, starting at a certain position. * diff --git a/src/main/java/org/elasticsearch/common/jna/Kernel32Library.java b/src/main/java/org/elasticsearch/common/jna/Kernel32Library.java index 386da4a5401..04549d78f1d 100644 --- a/src/main/java/org/elasticsearch/common/jna/Kernel32Library.java +++ b/src/main/java/org/elasticsearch/common/jna/Kernel32Library.java @@ -22,6 +22,8 @@ package org.elasticsearch.common.jna; import com.google.common.collect.ImmutableList; import com.sun.jna.Native; import com.sun.jna.win32.StdCallLibrary; + +import org.apache.lucene.util.Constants; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; @@ -46,13 +48,15 @@ public class Kernel32Library { } private Kernel32Library() { - try { - Native.register("kernel32"); - logger.debug("windows/Kernel32 library loaded"); - } catch (NoClassDefFoundError e) { - logger.warn("JNA not found. native methods and handlers will be disabled."); - } catch (UnsatisfiedLinkError e) { - logger.warn("unable to link Windows/Kernel32 library. native methods and handlers will be disabled."); + if (Constants.WINDOWS) { + try { + Native.register("kernel32"); + logger.debug("windows/Kernel32 library loaded"); + } catch (NoClassDefFoundError e) { + logger.warn("JNA not found. native methods and handlers will be disabled."); + } catch (UnsatisfiedLinkError e) { + logger.warn("unable to link Windows/Kernel32 library. native methods and handlers will be disabled."); + } } } diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java b/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java index 6b74aec0da1..d5b44ed4dfb 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java @@ -38,7 +38,7 @@ public abstract class AbstractRefCounted implements RefCounted { @Override public final void incRef() { if (tryIncRef() == false) { - throw new AlreadyClosedException(name + " is already closed can't increment refCount current count [" + refCount.get() + "]"); + alreadyClosed(); } } @@ -66,6 +66,10 @@ public abstract class AbstractRefCounted implements RefCounted { } + protected void alreadyClosed() { + throw new AlreadyClosedException(name + " is already closed can't increment refCount current count [" + refCount.get() + "]"); + } + /** * Returns the current reference count. */ @@ -73,5 +77,11 @@ public abstract class AbstractRefCounted implements RefCounted { return this.refCount.get(); } + + /** gets the name of this instance */ + public String getName() { + return name; + } + protected abstract void closeInternal(); } diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/ReleasableLock.java b/src/main/java/org/elasticsearch/common/util/concurrent/ReleasableLock.java index 501adc20403..1a90c6992fc 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/ReleasableLock.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/ReleasableLock.java @@ -30,17 +30,48 @@ import java.util.concurrent.locks.Lock; public class ReleasableLock implements Releasable { private final Lock lock; + /* a per thread boolean indicating the lock is held by it. only works when assertions are enabled */ + private final ThreadLocal holdingThreads; + public ReleasableLock(Lock lock) { this.lock = lock; + boolean useHoldingThreads = false; + assert (useHoldingThreads = true); + if (useHoldingThreads) { + holdingThreads = new ThreadLocal<>(); + } else { + holdingThreads = null; + } } @Override public void close() { lock.unlock(); + assert removeCurrentThread(); } + public ReleasableLock acquire() throws EngineException { lock.lock(); + assert addCurrentThread(); return this; } + + private boolean addCurrentThread() { + holdingThreads.set(true); + return true; + } + + private boolean removeCurrentThread() { + holdingThreads.remove(); + return true; + } + + public Boolean isHeldByCurrentThread() { + if (holdingThreads == null) { + throw new UnsupportedOperationException("asserts must be enabled"); + } + Boolean b = holdingThreads.get(); + return b != null && b.booleanValue(); + } } diff --git a/src/main/java/org/elasticsearch/env/Environment.java b/src/main/java/org/elasticsearch/env/Environment.java index cab04792b5d..46947c5667c 100644 --- a/src/main/java/org/elasticsearch/env/Environment.java +++ b/src/main/java/org/elasticsearch/env/Environment.java @@ -32,7 +32,6 @@ import java.nio.file.Path; import java.util.ArrayList; import static org.elasticsearch.common.Strings.cleanPath; -import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS; /** * The environment of where things exists. @@ -69,16 +68,12 @@ public class Environment { fileStores = allStores.toArray(new ESFileStore[allStores.size()]); } - public Environment() { - this(EMPTY_SETTINGS); - } - public Environment(Settings settings) { this.settings = settings; if (settings.get("path.home") != null) { homeFile = PathUtils.get(cleanPath(settings.get("path.home"))); } else { - homeFile = PathUtils.get(System.getProperty("user.dir")); + throw new IllegalStateException("path.home is not configured"); } if (settings.get("path.conf") != null) { @@ -175,26 +170,13 @@ public class Environment { } public URL resolveConfig(String path) throws FailedToResolveConfigException { - String origPath = path; - // first, try it as a path on the file system - Path f1 = PathUtils.get(path); - if (Files.exists(f1)) { + // first, try it as a path in the config directory + Path f = configFile.resolve(path); + if (Files.exists(f)) { try { - return f1.toUri().toURL(); + return f.toUri().toURL(); } catch (MalformedURLException e) { - throw new FailedToResolveConfigException("Failed to resolve path [" + f1 + "]", e); - } - } - if (path.startsWith("/")) { - path = path.substring(1); - } - // next, try it relative to the config location - Path f2 = configFile.resolve(path); - if (Files.exists(f2)) { - try { - return f2.toUri().toURL(); - } catch (MalformedURLException e) { - throw new FailedToResolveConfigException("Failed to resolve path [" + f1 + "]", e); + throw new FailedToResolveConfigException("Failed to resolve path [" + f + "]", e); } } // try and load it from the classpath directly @@ -209,6 +191,6 @@ public class Environment { return resource; } } - throw new FailedToResolveConfigException("Failed to resolve config path [" + origPath + "], tried file path [" + f1 + "], path file [" + f2 + "], and classpath"); + throw new FailedToResolveConfigException("Failed to resolve config path [" + path + "], tried config path [" + f + "] and classpath"); } } diff --git a/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 84d63ed3322..78afe6c11bc 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -382,6 +382,14 @@ public class GatewayAllocator extends AbstractComponent { return changed; } + /** + * Build a map of DiscoveryNodes to shard state number for the given shard. + * A state of -1 means the shard does not exist on the node, where any + * shard state >= 0 is the state version of the shard on that node's disk. + * + * A shard on shared storage will return at least shard state 0 for all + * nodes, indicating that the shard can be allocated to any node. + */ private ObjectLongOpenHashMap buildShardStates(final DiscoveryNodes nodes, MutableShardRouting shard, IndexMetaData indexMetaData) { ObjectLongOpenHashMap shardStates = cachedShardsState.get(shard.shardId()); ObjectOpenHashSet nodeIds; @@ -415,10 +423,18 @@ public class GatewayAllocator extends AbstractComponent { logListActionFailures(shard, "state", response.failures()); for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : response) { + long version = nodeShardState.version(); + Settings idxSettings = indexMetaData.settings(); + if (IndexMetaData.isOnSharedFilesystem(idxSettings) && + idxSettings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false)) { + // Shared filesystems use 0 as a minimum shard state, which + // means that the shard can be allocated to any node + version = Math.max(0, version); + } // -1 version means it does not exists, which is what the API returns, and what we expect to logger.trace("[{}] on node [{}] has version [{}] of shard", - shard, nodeShardState.getNode(), nodeShardState.version()); - shardStates.put(nodeShardState.getNode(), nodeShardState.version()); + shard, nodeShardState.getNode(), version); + shardStates.put(nodeShardState.getNode(), version); } return shardStates; } diff --git a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 158a3df5d91..2bb940806a9 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -27,9 +28,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.DjbHashFunction; -import org.elasticsearch.cluster.routing.HashFunction; -import org.elasticsearch.cluster.routing.SimpleHashFunction; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -43,6 +42,7 @@ import java.io.IOException; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.*; /** * @@ -57,7 +57,9 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL private final DanglingIndicesState danglingIndicesState; @Nullable - private volatile MetaData currentMetaData; + private volatile MetaData previousMetaData; + + private volatile ImmutableSet previouslyWrittenIndices = ImmutableSet.of(); @Inject public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, @@ -76,7 +78,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) { nodeEnv.ensureAtomicMoveSupported(); } - if (DiscoveryNode.masterNode(settings)) { + if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) { try { ensureNoPre019State(); pre20Upgrade(); @@ -96,10 +98,12 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL @Override public void clusterChanged(ClusterChangedEvent event) { + Set relevantIndices = new HashSet<>(); final ClusterState state = event.state(); if (state.blocks().disableStatePersistence()) { // reset the current metadata, we need to start fresh... - this.currentMetaData = null; + this.previousMetaData = null; + previouslyWrittenIndices = ImmutableSet.of(); return; } @@ -107,10 +111,35 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL // we don't check if metaData changed, since we might be called several times and we need to check dangling... boolean success = true; - // only applied to master node, writing the global and index level states - if (state.nodes().localNode().masterNode()) { + // write the state if this node is a master eligible node or if it is a data node and has shards allocated on it + if (state.nodes().localNode().masterNode() || state.nodes().localNode().dataNode()) { + if (previousMetaData == null) { + try { + // we determine if or if not we write meta data on data only nodes by looking at the shard routing + // and only write if a shard of this index is allocated on this node + // however, closed indices do not appear in the shard routing. if the meta data for a closed index is + // updated it will therefore not be written in case the list of previouslyWrittenIndices is empty (because state + // persistence was disabled or the node was restarted), see getRelevantIndicesOnDataOnlyNode(). + // we therefore have to check here if we have shards on disk and add their indices to the previouslyWrittenIndices list + if (isDataOnlyNode(state)) { + ImmutableSet.Builder previouslyWrittenIndicesBuilder = ImmutableSet.builder(); + for (IndexMetaData indexMetaData : newMetaData) { + IndexMetaData indexMetaDataOnDisk = null; + if (indexMetaData.state().equals(IndexMetaData.State.CLOSE)) { + indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.index()); + } + if (indexMetaDataOnDisk != null) { + previouslyWrittenIndicesBuilder.add(indexMetaDataOnDisk.index()); + } + } + previouslyWrittenIndices = previouslyWrittenIndicesBuilder.addAll(previouslyWrittenIndices).build(); + } + } catch (Throwable e) { + success = false; + } + } // check if the global state changed? - if (currentMetaData == null || !MetaData.isGlobalStateEquals(currentMetaData, newMetaData)) { + if (previousMetaData == null || !MetaData.isGlobalStateEquals(previousMetaData, newMetaData)) { try { metaStateService.writeGlobalState("changed", newMetaData); } catch (Throwable e) { @@ -118,33 +147,13 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } + Iterable writeInfo; + relevantIndices = getRelevantIndices(event.state(), previouslyWrittenIndices); + writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData()); // check and write changes in indices - for (IndexMetaData indexMetaData : newMetaData) { - String writeReason = null; - IndexMetaData currentIndexMetaData; - if (currentMetaData == null) { - // a new event..., check from the state stored - try { - currentIndexMetaData = metaStateService.loadIndexState(indexMetaData.index()); - } catch (IOException ex) { - throw new ElasticsearchException("failed to load index state", ex); - } - } else { - currentIndexMetaData = currentMetaData.index(indexMetaData.index()); - } - if (currentIndexMetaData == null) { - writeReason = "freshly created"; - } else if (currentIndexMetaData.version() != indexMetaData.version()) { - writeReason = "version changed from [" + currentIndexMetaData.version() + "] to [" + indexMetaData.version() + "]"; - } - - // we update the writeReason only if we really need to write it - if (writeReason == null) { - continue; - } - + for (IndexMetaWriteInfo indexMetaWrite : writeInfo) { try { - metaStateService.writeIndex(writeReason, indexMetaData, currentIndexMetaData); + metaStateService.writeIndex(indexMetaWrite.reason, indexMetaWrite.newMetaData, indexMetaWrite.previousMetaData); } catch (Throwable e) { success = false; } @@ -154,10 +163,29 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL danglingIndicesState.processDanglingIndices(newMetaData); if (success) { - currentMetaData = newMetaData; + previousMetaData = newMetaData; + ImmutableSet.Builder builder = ImmutableSet.builder(); + previouslyWrittenIndices = builder.addAll(relevantIndices).build(); } } + public static Set getRelevantIndices(ClusterState state, ImmutableSet previouslyWrittenIndices) { + Set relevantIndices; + if (isDataOnlyNode(state)) { + relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previouslyWrittenIndices); + } else if (state.nodes().localNode().masterNode() == true) { + relevantIndices = getRelevantIndicesForMasterEligibleNode(state); + } else { + relevantIndices = Collections.emptySet(); + } + return relevantIndices; + } + + + protected static boolean isDataOnlyNode(ClusterState state) { + return ((state.nodes().localNode().masterNode() == false) && state.nodes().localNode().dataNode()); + } + /** * Throws an IAE if a pre 0.19 state is detected */ @@ -229,7 +257,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } } - if (hasCustomPre20HashFunction|| pre20UseType != null) { + if (hasCustomPre20HashFunction || pre20UseType != null) { logger.warn("Settings [{}] and [{}] are deprecated. Index settings from your old indices have been updated to record the fact that they " + "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE); } @@ -251,4 +279,82 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } } + + /** + * Loads the current meta state for each index in the new cluster state and checks if it has to be persisted. + * Each index state that should be written to disk will be returned. This is only run for data only nodes. + * It will return only the states for indices that actually have a shard allocated on the current node. + * + * @param previouslyWrittenIndices A list of indices for which the state was already written before + * @param potentiallyUnwrittenIndices The list of indices for which state should potentially be written + * @param previousMetaData The last meta data we know of. meta data for all indices in previouslyWrittenIndices list is persisted now + * @param newMetaData The new metadata + * @return iterable over all indices states that should be written to disk + */ + public static Iterable resolveStatesToBeWritten(ImmutableSet previouslyWrittenIndices, Set potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) { + List indicesToWrite = new ArrayList<>(); + for (String index : potentiallyUnwrittenIndices) { + IndexMetaData newIndexMetaData = newMetaData.index(index); + IndexMetaData previousIndexMetaData = previousMetaData == null ? null : previousMetaData.index(index); + String writeReason = null; + if (previouslyWrittenIndices.contains(index) == false || previousIndexMetaData == null) { + writeReason = "freshly created"; + } else if (previousIndexMetaData.version() != newIndexMetaData.version()) { + writeReason = "version changed from [" + previousIndexMetaData.version() + "] to [" + newIndexMetaData.version() + "]"; + } + if (writeReason != null) { + indicesToWrite.add(new GatewayMetaState.IndexMetaWriteInfo(newIndexMetaData, previousIndexMetaData, writeReason)); + } + } + return indicesToWrite; + } + + public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ImmutableSet previouslyWrittenIndices) { + RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().localNodeId()); + if (newRoutingNode == null) { + throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state"); + } + Set indices = new HashSet<>(); + for (MutableShardRouting routing : newRoutingNode) { + indices.add(routing.index()); + } + // we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if we have it written on disk previously + for (IndexMetaData indexMetaData : state.metaData()) { + if (previouslyWrittenIndices.contains(indexMetaData.getIndex()) && state.metaData().getIndices().get(indexMetaData.getIndex()).state().equals(IndexMetaData.State.CLOSE)) { + indices.add(indexMetaData.getIndex()); + } + } + return indices; + } + + public static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { + Set relevantIndices; + relevantIndices = new HashSet<>(); + // we have to iterate over the metadata to make sure we also capture closed indices + for (IndexMetaData indexMetaData : state.metaData()) { + relevantIndices.add(indexMetaData.getIndex()); + } + return relevantIndices; + } + + + public static class IndexMetaWriteInfo { + final IndexMetaData newMetaData; + final String reason; + final IndexMetaData previousMetaData; + + public IndexMetaWriteInfo(IndexMetaData newMetaData, IndexMetaData previousMetaData, String reason) { + this.newMetaData = newMetaData; + this.reason = reason; + this.previousMetaData = previousMetaData; + } + + public IndexMetaData getNewMetaData() { + return newMetaData; + } + + public String getReason() { + return reason; + } + } } diff --git a/src/main/java/org/elasticsearch/index/IndexService.java b/src/main/java/org/elasticsearch/index/IndexService.java index 6b192981dca..fa575e8e884 100644 --- a/src/main/java/org/elasticsearch/index/IndexService.java +++ b/src/main/java/org/elasticsearch/index/IndexService.java @@ -66,8 +66,6 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreModule; import org.elasticsearch.index.suggest.SuggestShardModule; import org.elasticsearch.index.termvectors.ShardTermVectorsModule; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogModule; import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesService; @@ -187,6 +185,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone } return null; } + /** * Return the shard with the provided id, or throw an exception if it doesn't exist. */ @@ -320,7 +319,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone modules.add(new ShardQueryCacheModule()); modules.add(new ShardBitsetFilterCacheModule()); modules.add(new ShardFieldDataModule()); - modules.add(new TranslogModule(indexSettings)); modules.add(new IndexShardGatewayModule()); modules.add(new PercolatorShardModule()); modules.add(new ShardTermVectorsModule()); @@ -386,7 +384,8 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone } } // now we can close the translog service, we need to close it before the we close the shard - closeInjectorResource(sId, shardInjector, TranslogService.class); + // note the that the translog service is not there for shadow replicas + closeInjectorOptionalResource(sId, shardInjector, TranslogService.class); // this logic is tricky, we want to close the engine so we rollback the changes done to it // and close the shard so no operations are allowed to it if (indexShard != null) { @@ -402,7 +401,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone MergeSchedulerProvider.class, MergePolicyProvider.class, IndexShardGatewayService.class, - Translog.class, PercolatorQueriesRegistry.class); // call this before we close the store, so we can release resources for it @@ -423,18 +421,30 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone */ private void closeInjectorResource(ShardId shardId, Injector shardInjector, Class... toClose) { for (Class closeable : toClose) { - try { - final Closeable instance = shardInjector.getInstance(closeable); - if (instance == null) { - throw new NullPointerException("No instance available for " + closeable.getName()); - } - IOUtils.close(instance); - } catch (Throwable t) { - logger.debug("{} failed to close {}", t, shardId, Strings.toUnderscoreCase(closeable.getSimpleName())); + if (closeInjectorOptionalResource(shardId, shardInjector, closeable) == false) { + logger.warn("[{}] no instance available for [{}], ignoring... ", shardId, closeable.getSimpleName()); } } } + /** + * Closes an optional resource. Returns true if the resource was found; + * NOTE: this method swallows all exceptions thrown from the close method of the injector and logs them as debug log + */ + private boolean closeInjectorOptionalResource(ShardId shardId, Injector shardInjector, Class toClose) { + try { + final Closeable instance = shardInjector.getInstance(toClose); + if (instance == null) { + return false; + } + IOUtils.close(instance); + } catch (Throwable t) { + logger.debug("{} failed to close {}", t, shardId, Strings.toUnderscoreCase(toClose.getSimpleName())); + } + return true; + } + + private void onShardClose(ShardLock lock, boolean ownsShard) { if (deleted.get()) { // we remove that shards content if this index has been deleted try { @@ -448,7 +458,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone } } catch (IOException e) { indicesServices.addPendingDelete(lock.getShardId(), indexSettings); - logger.debug("{} failed to delete shard content - scheduled a retry", e, lock.getShardId().id()); + logger.debug("[{}] failed to delete shard content - scheduled a retry", e, lock.getShardId().id()); } } } @@ -464,7 +474,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone @Override public void handle(ShardLock lock) { - assert lock.getShardId().equals(shardId) : "shard Id mismatch, expected: " + shardId + " but got: " + lock.getShardId(); + assert lock.getShardId().equals(shardId) : "shard id mismatch, expected: " + shardId + " but got: " + lock.getShardId(); onShardClose(lock, ownsShard); } } diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java index dc17c856031..52002ecb34f 100644 --- a/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -84,7 +84,6 @@ public abstract class Engine implements Closeable { protected Engine(EngineConfig engineConfig) { Preconditions.checkNotNull(engineConfig.getStore(), "Store must be provided to the engine"); Preconditions.checkNotNull(engineConfig.getDeletionPolicy(), "Snapshot deletion policy must be provided to the engine"); - Preconditions.checkNotNull(engineConfig.getTranslog(), "Translog must be provided to the engine"); this.engineConfig = engineConfig; this.shardId = engineConfig.getShardId(); @@ -295,6 +294,9 @@ public abstract class Engine implements Closeable { } } + /** returns the translog for this engine */ + public abstract Translog getTranslog(); + protected void ensureOpen() { if (isClosed.get()) { throw new EngineClosedException(shardId, failedEngine); @@ -469,12 +471,12 @@ public abstract class Engine implements Closeable { public abstract void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, boolean upgrade, boolean upgradeOnlyAncientSegments) throws EngineException; /** - * Snapshots the index and returns a handle to it. Will always try and "commit" the + * Snapshots the index and returns a handle to it. If needed will try and "commit" the * lucene index to make sure we have a "fresh" copy of the files to snapshot. + * + * @param flushFirst indicates whether the engine should flush before returning the snapshot */ - public abstract SnapshotIndexCommit snapshotIndex() throws EngineException; - - public abstract void recover(RecoveryHandler recoveryHandler) throws EngineException; + public abstract SnapshotIndexCommit snapshotIndex(boolean flushFirst) throws EngineException; /** fail engine due to some error. the engine will also be closed. */ public void failEngine(String reason, Throwable failure) { @@ -1063,12 +1065,19 @@ public abstract class Engine implements Closeable { protected abstract SearcherManager getSearcherManager(); + /** + * Method to close the engine while the write lock is held. + */ protected abstract void closeNoLock(String reason); + /** + * Flush the engine (committing segments to disk and truncating the + * translog) and close it. + */ public void flushAndClose() throws IOException { if (isClosed.get() == false) { logger.trace("flushAndClose now acquire writeLock"); - try (ReleasableLock _ = writeLock.acquire()) { + try (ReleasableLock lock = writeLock.acquire()) { logger.trace("flushAndClose now acquired writeLock"); try { logger.debug("flushing shard on close - this might take some time to sync files to disk"); @@ -1090,7 +1099,7 @@ public abstract class Engine implements Closeable { public void close() throws IOException { if (isClosed.get() == false) { // don't acquire the write lock if we are already closed logger.debug("close now acquiring writeLock"); - try (ReleasableLock _ = writeLock.acquire()) { + try (ReleasableLock lock = writeLock.acquire()) { logger.debug("close acquired writeLock"); closeNoLock("api"); } diff --git a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 9c069139173..bb8006ad6b9 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArray; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; @@ -39,10 +41,12 @@ import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.threadpool.ThreadPool; +import java.io.IOException; +import java.nio.file.Path; import java.util.concurrent.TimeUnit; /* @@ -69,15 +73,17 @@ public final class EngineConfig { private final IndicesWarmer warmer; private final Store store; private final SnapshotDeletionPolicy deletionPolicy; - private final Translog translog; private final MergePolicyProvider mergePolicyProvider; private final MergeSchedulerProvider mergeScheduler; private final Analyzer analyzer; private final Similarity similarity; private final CodecService codecService; private final Engine.FailedEngineListener failedEngineListener; + private final boolean ignoreUnknownTranslog; private final QueryCache filterCache; private final QueryCachingPolicy filterCachingPolicy; + private final BigArrays bigArrays; + private final Path translogPath; /** * Index setting for index concurrency / number of threadstates in the indexwriter. @@ -121,6 +127,11 @@ public final class EngineConfig { */ public static final String INDEX_VERSION_MAP_SIZE = "index.version_map_size"; + + /** if set to true the engine will start even if the translog id in the commit point can not be found */ + public static final String INDEX_IGNORE_UNKNOWN_TRANSLOG = "index.engine.ignore_unknown_translog"; + + public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60); public static final ByteSizeValue DEFAUTL_INDEX_BUFFER_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); @@ -135,10 +146,10 @@ public final class EngineConfig { * Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */ public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, - IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, - Translog translog, MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, - Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, - TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy) { + IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, + MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, + Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, + TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy, BigArrays bigArrays, Path translogPath) { this.shardId = shardId; this.threadPool = threadPool; this.indexingService = indexingService; @@ -146,13 +157,14 @@ public final class EngineConfig { this.warmer = warmer; this.store = store; this.deletionPolicy = deletionPolicy; - this.translog = translog; this.mergePolicyProvider = mergePolicyProvider; this.mergeScheduler = mergeScheduler; this.analyzer = analyzer; this.similarity = similarity; this.codecService = codecService; this.failedEngineListener = failedEngineListener; + this.bigArrays = bigArrays; + this.translogPath = translogPath; Settings indexSettings = indexSettingsService.getSettings(); this.optimizeAutoGenerateId = indexSettings.getAsBoolean(EngineConfig.INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING, false); this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush); @@ -163,6 +175,7 @@ public final class EngineConfig { versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE); updateVersionMapSize(); this.translogRecoveryPerformer = translogRecoveryPerformer; + this.ignoreUnknownTranslog = indexSettings.getAsBoolean(INDEX_IGNORE_UNKNOWN_TRANSLOG, false); this.filterCache = filterCache; this.filterCachingPolicy = filterCachingPolicy; } @@ -192,6 +205,10 @@ public final class EngineConfig { return versionMapSizeSetting; } + /** if true the engine will start even if the translog id in the commit point can not be found */ + public boolean getIgnoreUnknownTranslog() { + return ignoreUnknownTranslog; + } /** * returns the size of the version map that should trigger a refresh @@ -328,13 +345,6 @@ public final class EngineConfig { return deletionPolicy; } - /** - * Returns a {@link Translog instance} - */ - public Translog getTranslog() { - return translog; - } - /** * Returns the {@link org.elasticsearch.index.merge.policy.MergePolicyProvider} used to obtain * a {@link org.apache.lucene.index.MergePolicy} for the engines {@link org.apache.lucene.index.IndexWriter} @@ -420,4 +430,25 @@ public final class EngineConfig { public QueryCachingPolicy getFilterCachingPolicy() { return filterCachingPolicy; } + + /** + * Returns a BigArrays instance for this engine + */ + public BigArrays getBigArrays() { + return bigArrays; + } + + /** + * Returns the translog path for this engine + */ + public Path getTranslogPath() { + return translogPath; + } + + /** + * Returns the {@link org.elasticsearch.index.settings.IndexSettingsService} for this engine. + */ + public IndexSettingsService getIndesSettingService() { + return indexSettingsService; + } } diff --git a/src/main/java/org/elasticsearch/index/engine/EngineFactory.java b/src/main/java/org/elasticsearch/index/engine/EngineFactory.java index 77bcc3b28e4..b29148edff5 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineFactory.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineFactory.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.index.engine; +import org.elasticsearch.index.translog.fs.FsTranslog; + /** * Simple Engine Factory */ diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index d038509c77e..44fe5df2643 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.engine; +import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexWriter.IndexReaderWarmer; @@ -31,9 +32,7 @@ import org.apache.lucene.util.InfoStream; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.routing.DjbHashFunction; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -51,7 +50,7 @@ import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider; import org.elasticsearch.index.search.nested.IncludeNestedDocsQuery; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TruncatedTranslogException; +import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -61,7 +60,6 @@ import java.io.IOException; import java.util.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -81,7 +79,7 @@ public class InternalEngine extends Engine { private final ShardIndexingService indexingService; @Nullable private final IndicesWarmer warmer; - private final Translog translog; + private final FsTranslog translog; private final MergePolicyProvider mergePolicyProvider; private final MergeSchedulerProvider mergeScheduler; @@ -96,14 +94,12 @@ public class InternalEngine extends Engine { private final Lock flushLock = new ReentrantLock(); private final ReentrantLock optimizeLock = new ReentrantLock(); - protected final FlushingRecoveryCounter onGoingRecoveries; // A uid (in the form of BytesRef) to the version map // we use the hashed variant since we iterate over it and check removal and additions on existing keys private final LiveVersionMap versionMap; private final Object[] dirtyLocks; - private final AtomicLong translogIdGenerator = new AtomicLong(); private final AtomicBoolean versionMapRefreshPending = new AtomicBoolean(); private volatile SegmentInfos lastCommittedSegmentInfos; @@ -115,14 +111,13 @@ public class InternalEngine extends Engine { this.versionMap = new LiveVersionMap(); store.incRef(); IndexWriter writer = null; + FsTranslog translog = null; SearcherManager manager = null; boolean success = false; try { - this.onGoingRecoveries = new FlushingRecoveryCounter(this, store, logger); this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis(); this.indexingService = engineConfig.getIndexingService(); this.warmer = engineConfig.getWarmer(); - this.translog = engineConfig.getTranslog(); this.mergePolicyProvider = engineConfig.getMergePolicyProvider(); this.mergeScheduler = engineConfig.getMergeScheduler(); this.dirtyLocks = new Object[engineConfig.getIndexConcurrency() * 50]; // we multiply it to have enough... @@ -132,14 +127,16 @@ public class InternalEngine extends Engine { throttle = new IndexThrottle(); this.searcherFactory = new SearchFactory(engineConfig); - final Tuple translogId; // nextTranslogId, currentTranslogId + final Long committedTranslogId; try { writer = createWriter(); indexWriter = writer; - translogId = loadTranslogIds(writer, translog); + translog = new FsTranslog(engineConfig.getShardId(), engineConfig.getIndesSettingService(), engineConfig.getBigArrays(), engineConfig.getTranslogPath(), engineConfig.getThreadPool()); + committedTranslogId = loadCommittedTranslogId(writer, translog); } catch (IOException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); } + this.translog = translog; manager = createSearcherManager(); this.searcherManager = manager; this.versionMap.setManager(searcherManager); @@ -147,26 +144,12 @@ public class InternalEngine extends Engine { this.mergeSchedulerListener = new MergeSchedulerListener(); this.mergeScheduler.addListener(mergeSchedulerListener); this.mergeScheduler.addFailureListener(mergeSchedulerFailureListener); - final TranslogRecoveryPerformer transformer = engineConfig.getTranslogRecoveryPerformer(); try { - long nextTranslogID = translogId.v2(); - translog.newTranslog(nextTranslogID); - translogIdGenerator.set(nextTranslogID); - - if (translogId.v1() != null && skipInitialTranslogRecovery == false) { - // recovering from local store - recoverFromTranslog(translogId.v1(), transformer); + if (skipInitialTranslogRecovery) { + // make sure we point at the latest translog from now on.. + commitIndexWriter(writer, translog.currentId(), lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID)); } else { - // recovering from a different source - // nocommit - // when we create the Engine on a target shard after recovery we must make sure that - // if a sync id is there then it is not overwritten by a forced flush - if (lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID) == null) { - flush(true, true); - } else { - SyncedFlushResult syncedFlushResult = syncFlushIfNoPendingChanges(lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID), lastCommittedSegmentInfos.getId()); - assert syncedFlushResult.equals(SyncedFlushResult.SUCCESS) : "skipped translog recovery but synced flush failed"; - } + recoverFromTranslog(engineConfig, committedTranslogId); } } catch (IOException | EngineException ex) { throw new EngineCreationFailureException(shardId, "failed to recover from translog", ex); @@ -174,7 +157,7 @@ public class InternalEngine extends Engine { success = true; } finally { if (success == false) { - IOUtils.closeWhileHandlingException(writer, manager); + IOUtils.closeWhileHandlingException(writer, translog, manager); versionMap.clear(); if (isClosed.get() == false) { // failure we need to dec the store reference @@ -185,24 +168,72 @@ public class InternalEngine extends Engine { logger.trace("created new InternalEngine"); } + @Override + public Translog getTranslog() { + ensureOpen(); + return translog; + } + + protected void recoverFromTranslog(EngineConfig engineConfig, Long committedTranslogId) throws IOException { + if (committedTranslogId != null) { + try { + // trim unneeded files + translog.markCommitted(committedTranslogId); + } catch (FileNotFoundException ex) { + if (engineConfig.getIgnoreUnknownTranslog()) { + logger.warn("ignoring committed translog id [{}] ([{}] set to true)", committedTranslogId, + EngineConfig.INDEX_IGNORE_UNKNOWN_TRANSLOG); + } else { + throw ex; + } + } + } + int opsRecovered = 0; + final TranslogRecoveryPerformer handler = engineConfig.getTranslogRecoveryPerformer(); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + Translog.Operation operation; + while ((operation = snapshot.next()) != null) { + try { + handler.performRecoveryOperation(this, operation); + opsRecovered++; + } catch (ElasticsearchException e) { + if (e.status() == RestStatus.BAD_REQUEST) { + // mainly for MapperParsingException and Failure to detect xcontent + logger.info("ignoring recovery of a corrupt translog entry", e); + } else { + throw e; + } + } + } + } catch (Throwable e) { + throw new EngineException(shardId, "failed to recover from translog", e); + } + // flush if we recovered something or if we have references to older translogs + // note: if opsRecovered == 0 and we have older translogs it means they are corrupted or 0 length. + if (opsRecovered > 0) { + logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]", + opsRecovered, committedTranslogId, translog.currentId()); + flush(true, true); + } else if (committedTranslogId != null && translog.currentId() != committedTranslogId){ + commitIndexWriter(indexWriter, translog.currentId(), lastCommittedSegmentInfos.getUserData().get(Engine.SYNC_COMMIT_ID)); + } + } + /** - * Reads the current stored translog ID (v1) from the IW commit data and generates a new/next translog ID (v2) - * from the largest present translog ID. If there is no stored translog ID v1 is null + * Reads the current stored translog ID from the IW commit data. If the id is not found, recommits the current + * translog id into lucene and returns null. */ - private Tuple loadTranslogIds(IndexWriter writer, Translog translog) throws IOException { + @Nullable + private Long loadCommittedTranslogId(IndexWriter writer, Translog translog) throws IOException { // commit on a just opened writer will commit even if there are no changes done to it // we rely on that for the commit data translog id key - final long nextTranslogId = Math.max(0, translog.findLargestPresentTranslogId()) + 1; final Map commitUserData = writer.getCommitData(); if (commitUserData.containsKey(Translog.TRANSLOG_ID_KEY)) { - final long currentTranslogId = Long.parseLong(commitUserData.get(Translog.TRANSLOG_ID_KEY)); - return new Tuple<>(currentTranslogId, nextTranslogId); + return Long.parseLong(commitUserData.get(Translog.TRANSLOG_ID_KEY)); } - // translog id is not in the metadata - fix this inconsistency some code relies on this and old indices might not have it. - writer.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(nextTranslogId))); - commitIndexWriter(writer); logger.debug("no translog ID present in the current commit - creating one"); - return new Tuple<>(null, nextTranslogId); + commitIndexWriter(writer, translog.currentId()); + return null; } private SearcherManager createSearcherManager() throws EngineException { @@ -641,12 +672,8 @@ public class InternalEngine extends Engine { return SyncedFlushResult.FAILED_COMMIT_MISMATCH; } logger.trace("starting sync commit [{}]", syncId); - long translogId = translog.currentId(); - Map commitData = new HashMap<>(2); - commitData.put(SYNC_COMMIT_ID, syncId); - commitData.put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)); - indexWriter.setCommitData(commitData); - commitIndexWriter(indexWriter); + final long translogId = translog.currentId(); + commitIndexWriter(indexWriter, translogId, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); return SyncedFlushResult.SUCCESS; @@ -668,12 +695,6 @@ public class InternalEngine extends Engine { private byte[] flush(boolean commitTranslog, boolean force, boolean waitIfOngoing) throws EngineException { ensureOpen(); - if (commitTranslog) { - // check outside the lock as well so we can check without blocking on the write lock - if (onGoingRecoveries.get() > 0) { - throw new FlushNotAllowedEngineException(shardId, "recovery is in progress, flush with committing translog is not allowed"); - } - } final byte[] newCommitId; /* * Unfortunately the lock order is important here. We have to acquire the readlock first otherwise @@ -687,7 +708,7 @@ public class InternalEngine extends Engine { if (flushLock.tryLock() == false) { // if we can't get the lock right away we block if needed otherwise barf if (waitIfOngoing) { - logger.trace("waiting fore in-flight flush to finish"); + logger.trace("waiting for in-flight flush to finish"); flushLock.lock(); logger.trace("acquired flush lock after blocking"); } else { @@ -698,32 +719,19 @@ public class InternalEngine extends Engine { } try { if (commitTranslog) { - if (onGoingRecoveries.get() > 0) { - throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed"); - } - if (flushNeeded || force) { flushNeeded = false; + final long translogId; try { - long translogId = translogIdGenerator.incrementAndGet(); - translog.newTransientTranslog(translogId); - indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); + translogId = translog.newTranslog(); logger.trace("starting commit for flush; commitTranslog=true"); - commitIndexWriter(indexWriter); + commitIndexWriter(indexWriter, translogId); logger.trace("finished commit for flush"); // we need to refresh in order to clear older version values refresh("version_table_flush"); - // we need to move transient to current only after we refresh - // so items added to current will still be around for realtime get - // when tans overrides it - translog.makeTransientCurrent(); + translog.markCommitted(translogId); } catch (Throwable e) { - try { - translog.revertTransient(); - } catch (IOException ex) { - e.addSuppressed(ex); - } throw new FlushFailedEngineException(shardId, e); } } @@ -735,10 +743,8 @@ public class InternalEngine extends Engine { // its ok to use this, only a flush will cause a new translogId, and we are locked here from // other flushes use flushLock try { - long translogId = translog.currentId(); - indexWriter.setCommitData(Collections.singletonMap(Translog.TRANSLOG_ID_KEY, Long.toString(translogId))); logger.trace("starting commit for flush; commitTranslog=false"); - commitIndexWriter(indexWriter); + commitIndexWriter(indexWriter, translog.currentId()); logger.trace("finished commit for flush"); } catch (Throwable e) { throw new FlushFailedEngineException(shardId, e); @@ -861,12 +867,14 @@ public class InternalEngine extends Engine { } @Override - public SnapshotIndexCommit snapshotIndex() throws EngineException { + public SnapshotIndexCommit snapshotIndex(final boolean flushFirst) throws EngineException { // we have to flush outside of the readlock otherwise we might have a problem upgrading // the to a write lock when we fail the engine in this operation - logger.trace("start flush for snapshot"); - flush(false, false, true); - logger.trace("finish flush for snapshot"); + if (flushFirst) { + logger.trace("start flush for snapshot"); + flush(false, false, true); + logger.trace("finish flush for snapshot"); + } try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); logger.trace("pulling snapshot"); @@ -876,65 +884,6 @@ public class InternalEngine extends Engine { } } - @Override - public void recover(RecoveryHandler recoveryHandler) throws EngineException { - // take a write lock here so it won't happen while a flush is in progress - // this means that next commits will not be allowed once the lock is released - try (ReleasableLock lock = writeLock.acquire()) { - ensureOpen(); - onGoingRecoveries.startRecovery(); - } - - SnapshotIndexCommit phase1Snapshot; - try { - phase1Snapshot = deletionPolicy.snapshot(); - } catch (Throwable e) { - maybeFailEngine("recovery", e); - Releasables.closeWhileHandlingException(onGoingRecoveries); - throw new RecoveryEngineException(shardId, 1, "Snapshot failed", e); - } - - try { - recoveryHandler.phase1(phase1Snapshot); - } catch (Throwable e) { - maybeFailEngine("recovery phase 1", e); - Releasables.closeWhileHandlingException(phase1Snapshot, onGoingRecoveries); - throw new RecoveryEngineException(shardId, 1, "Execution failed", wrapIfClosed(e)); - } - - Translog.Snapshot phase2Snapshot; - try { - phase2Snapshot = translog.snapshot(); - } catch (Throwable e) { - maybeFailEngine("snapshot recovery", e); - Releasables.closeWhileHandlingException(phase1Snapshot, onGoingRecoveries); - throw new RecoveryEngineException(shardId, 2, "Snapshot failed", wrapIfClosed(e)); - } - try { - recoveryHandler.phase2(phase2Snapshot); - } catch (Throwable e) { - maybeFailEngine("recovery phase 2", e); - Releasables.closeWhileHandlingException(phase1Snapshot, phase2Snapshot, onGoingRecoveries); - throw new RecoveryEngineException(shardId, 2, "Execution failed", wrapIfClosed(e)); - } - - writeLock.acquire(); - Translog.Snapshot phase3Snapshot = null; - boolean success = false; - try { - ensureOpen(); - phase3Snapshot = translog.snapshot(phase2Snapshot); - recoveryHandler.phase3(phase3Snapshot); - success = true; - } catch (Throwable e) { - maybeFailEngine("recovery phase 3", e); - throw new RecoveryEngineException(shardId, 3, "Execution failed", wrapIfClosed(e)); - } finally { - Releasables.close(success, phase1Snapshot, phase2Snapshot, phase3Snapshot, - onGoingRecoveries, writeLock); // hmm why can't we use try-with here? - } - } - @Override protected boolean maybeFailEngine(String source, Throwable t) { boolean shouldFail = super.maybeFailEngine(source, t); @@ -1004,18 +953,17 @@ public class InternalEngine extends Engine { if (isClosed.compareAndSet(false, true)) { assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself"; try { - try { - IOUtils.close(this.translog); - } catch (IOException ex) { - logger.warn("failed to close translog", ex); - } this.versionMap.clear(); - logger.trace("close searcherManager"); try { IOUtils.close(searcherManager); } catch (Throwable t) { logger.warn("Failed to close SearcherManager", t); } + try { + IOUtils.close(translog); + } catch (Throwable t) { + logger.warn("Failed to close translog", t); + } // no need to commit in this case!, we snapshot before we close the shard, so translog and all sync'ed logger.trace("rollback indexWriter"); try { @@ -1250,9 +1198,15 @@ public class InternalEngine extends Engine { } } - - private void commitIndexWriter(IndexWriter writer) throws IOException { + private void commitIndexWriter(IndexWriter writer, long translogId, String syncId) throws IOException { try { + logger.trace("committing writer with translog id [{}] and sync id [{}]", translogId, syncId); + Map commitData = new HashMap<>(); + commitData.put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)); + if (syncId != null) { + commitData.put(Engine.SYNC_COMMIT_ID, syncId); + } + indexWriter.setCommitData(commitData); writer.commit(); } catch (Throwable ex) { failEngine("lucene commit failed", ex); @@ -1260,49 +1214,7 @@ public class InternalEngine extends Engine { } } - protected void recoverFromTranslog(long translogId, TranslogRecoveryPerformer handler) throws IOException { - final Translog translog = engineConfig.getTranslog(); - int operationsRecovered = 0; - try (Translog.OperationIterator in = translog.openIterator(translogId)) { - Translog.Operation operation; - while ((operation = in.next()) != null) { - try { - handler.performRecoveryOperation(this, operation); - operationsRecovered++; - } catch (ElasticsearchException e) { - if (e.status() == RestStatus.BAD_REQUEST) { - // mainly for MapperParsingException and Failure to detect xcontent - logger.info("ignoring recovery of a corrupt translog entry", e); - } else { - throw e; - } - } - } - } catch (FileNotFoundException ex) { - logger.debug("no translog file found for ID: " + translogId); - } catch (TruncatedTranslogException e) { - // file is empty or header has been half-written and should be ignored - logger.trace("ignoring truncation exception, the translog is either empty or half-written", e); - } catch (Throwable e) { - IOUtils.closeWhileHandlingException(translog); - throw new EngineException(shardId, "failed to recover from translog", e); - } - - // nocommit: when we recover from gateway we recover ops from the translog we found and then create a new translog with new id. - // we flush here because we need to write a new translog id after recovery. - // we need to make sure here that an existing sync id is not overwritten by this flush if one exists. - // so, in case the old translog did not contain any ops, we should use the old sync id for flushing. - // nocommit because not sure if this here is the best solution for this... - if (operationsRecovered > 0) { - flush(true, true); - refresh("translog recovery"); - } else if (lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID) == null) { - flush(true, true); - } else { - SyncedFlushResult syncedFlushResult = syncFlushIfNoPendingChanges(lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID), lastCommittedSegmentInfos.getId()); - assert syncedFlushResult.equals(SyncedFlushResult.SUCCESS) : "no operations during translog recovery but synced flush failed"; - } - translog.clearUnreferenced(); + private void commitIndexWriter(IndexWriter writer, long translogId) throws IOException { + commitIndexWriter(writer, translogId, null); } - } diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngineFactory.java b/src/main/java/org/elasticsearch/index/engine/InternalEngineFactory.java index fdf708cfd51..c9c13e3d879 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngineFactory.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngineFactory.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.index.engine; +import org.elasticsearch.index.translog.fs.FsTranslog; + public class InternalEngineFactory implements EngineFactory { @Override public Engine newReadWriteEngine(EngineConfig config, boolean skipTranslogRecovery) { diff --git a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 1d783af7460..303426d16b3 100644 --- a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -25,13 +25,13 @@ import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; import org.elasticsearch.index.shard.IndexShardException; +import org.elasticsearch.index.translog.Translog; import java.io.IOException; import java.util.Arrays; @@ -174,6 +174,11 @@ public class ShadowEngine extends Engine { return getFromSearcher(get); } + @Override + public Translog getTranslog() { + throw new UnsupportedOperationException("shadow engines don't have translogs"); + } + @Override public List segments(boolean verbose) { try (ReleasableLock lock = readLock.acquire()) { @@ -205,15 +210,10 @@ public class ShadowEngine extends Engine { } @Override - public SnapshotIndexCommit snapshotIndex() throws EngineException { + public SnapshotIndexCommit snapshotIndex(boolean flushFirst) throws EngineException { throw new UnsupportedOperationException("Can not take snapshot from a shadow engine"); } - @Override - public void recover(RecoveryHandler recoveryHandler) throws EngineException { - throw new UnsupportedOperationException("Can not recover from a shadow engine"); - } - @Override protected SearcherManager getSearcherManager() { return searcherManager; diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java index 1cbfaab0672..a69272b2e43 100644 --- a/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java +++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java @@ -30,14 +30,12 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.threadpool.ThreadPool; @@ -47,7 +45,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Map; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -61,9 +58,7 @@ public class IndexShardGateway extends AbstractIndexShardComponent implements Cl private final IndexService indexService; private final IndexShard indexShard; private final TimeValue waitForMappingUpdatePostRecovery; - private final TimeValue syncInterval; - private volatile ScheduledFuture flushScheduler; private final CancellableThreads cancellableThreads = new CancellableThreads(); @@ -76,17 +71,7 @@ public class IndexShardGateway extends AbstractIndexShardComponent implements Cl this.indexService = indexService; this.indexShard = indexShard; - this.waitForMappingUpdatePostRecovery = indexSettings.getAsTime("index.gateway.wait_for_mapping_update_post_recovery", TimeValue.timeValueMinutes(15)); - syncInterval = indexSettings.getAsTime("index.gateway.sync", TimeValue.timeValueSeconds(5)); - if (syncInterval.millis() > 0) { - this.indexShard.translog().syncOnEachOperation(false); - flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, new Sync()); - } else if (syncInterval.millis() == 0) { - flushScheduler = null; - this.indexShard.translog().syncOnEachOperation(true); - } else { - flushScheduler = null; - } + this.waitForMappingUpdatePostRecovery = indexSettings.getAsTime("index.gateway.wait_for_mapping_update_post_recovery", TimeValue.timeValueSeconds(15)); } /** @@ -198,39 +183,9 @@ public class IndexShardGateway extends AbstractIndexShardComponent implements Cl @Override public void close() { - FutureUtils.cancel(flushScheduler); cancellableThreads.cancel("closed"); } - class Sync implements Runnable { - @Override - public void run() { - // don't re-schedule if its closed..., we are done - if (indexShard.state() == IndexShardState.CLOSED) { - return; - } - if (indexShard.state() == IndexShardState.STARTED && indexShard.translog().syncNeeded()) { - threadPool.executor(ThreadPool.Names.FLUSH).execute(new Runnable() { - @Override - public void run() { - try { - indexShard.translog().sync(); - } catch (Exception e) { - if (indexShard.state() == IndexShardState.STARTED) { - logger.warn("failed to sync translog", e); - } - } - if (indexShard.state() != IndexShardState.CLOSED) { - flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this); - } - } - }); - } else { - flushScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this); - } - } - } - @Override public String toString() { return "shard_gateway"; diff --git a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java index 3fa975a31ed..ff502ea27bc 100644 --- a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java +++ b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java @@ -71,6 +71,7 @@ public class IndexDynamicSettingsModule extends AbstractModule { indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_READ); indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_WRITE); indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_BLOCKS_METADATA); + indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE); indexDynamicSettings.addDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE); indexDynamicSettings.addDynamicSetting(IndexShard.INDEX_REFRESH_INTERVAL, Validator.TIME); indexDynamicSettings.addDynamicSetting(GatewayAllocator.INDEX_RECOVERY_INITIAL_SHARDS); diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index a59d938d5a9..fad982e1437 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.shard; import com.google.common.base.Charsets; - import com.google.common.base.Preconditions; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.CheckIndex; @@ -48,12 +47,15 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.MetaDataStateFormat; @@ -69,14 +71,7 @@ import org.elasticsearch.index.cache.query.ShardQueryCache; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; -import org.elasticsearch.index.engine.CommitStats; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineClosedException; -import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.engine.EngineException; -import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.engine.RefreshFailedEngineException; -import org.elasticsearch.index.engine.SegmentsStats; +import org.elasticsearch.index.engine.*; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ShardFieldData; @@ -85,12 +80,7 @@ import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.ShardGetService; import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.ShardIndexingService; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperAnalyzer; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.policy.MergePolicyProvider; @@ -145,7 +135,6 @@ public class IndexShard extends AbstractIndexShardComponent { private final InternalIndicesLifecycle indicesLifecycle; private final Store store; private final MergeSchedulerProvider mergeScheduler; - private final Translog translog; private final IndexAliasesService indexAliasesService; private final ShardIndexingService indexingService; private final ShardSearchService searchService; @@ -171,6 +160,7 @@ public class IndexShard extends AbstractIndexShardComponent { private final SnapshotDeletionPolicy deletionPolicy; private final SimilarityService similarityService; private final MergePolicyProvider mergePolicyProvider; + private final BigArrays bigArrays; private final EngineConfig engineConfig; private TimeValue refreshInterval; @@ -204,28 +194,30 @@ public class IndexShard extends AbstractIndexShardComponent { public static final String INDEX_FLUSH_ON_CLOSE = "index.flush_on_close"; private final ShardPath path; + private final IndexShardOperationCounter indexShardOperationCounter; + @Inject - public IndexShard(ShardId shardId, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, MergeSchedulerProvider mergeScheduler, Translog translog, + public IndexShard(ShardId shardId, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, MergeSchedulerProvider mergeScheduler, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, ShardIndexingService indexingService, ShardGetService getService, ShardSearchService searchService, ShardIndexWarmerService shardWarmerService, ShardFilterCache shardFilterCache, ShardFieldData shardFieldData, PercolatorQueriesRegistry percolatorQueriesRegistry, ShardPercolateService shardPercolateService, CodecService codecService, - ShardTermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, IndexService indexService, ShardSuggestService shardSuggestService, ShardQueryCache shardQueryCache, ShardBitsetFilterCache shardBitsetFilterCache, + ShardTermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, IndexService indexService, ShardSuggestService shardSuggestService, + ShardQueryCache shardQueryCache, ShardBitsetFilterCache shardBitsetFilterCache, @Nullable IndicesWarmer warmer, SnapshotDeletionPolicy deletionPolicy, SimilarityService similarityService, MergePolicyProvider mergePolicyProvider, EngineFactory factory, - ClusterService clusterService, NodeEnvironment nodeEnv, ShardPath path) { + ClusterService clusterService, NodeEnvironment nodeEnv, ShardPath path, BigArrays bigArrays) { super(shardId, indexSettingsService.getSettings()); this.codecService = codecService; this.warmer = warmer; this.deletionPolicy = deletionPolicy; this.similarityService = similarityService; this.mergePolicyProvider = mergePolicyProvider; + this.bigArrays = bigArrays; Preconditions.checkNotNull(store, "Store must be provided to the index shard"); Preconditions.checkNotNull(deletionPolicy, "Snapshot deletion policy must be provided to the index shard"); - Preconditions.checkNotNull(translog, "Translog must be provided to the index shard"); this.engineFactory = factory; this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle; this.indexSettingsService = indexSettingsService; this.store = store; this.mergeScheduler = mergeScheduler; - this.translog = translog; this.threadPool = threadPool; this.mapperService = mapperService; this.queryParserService = queryParserService; @@ -260,14 +252,20 @@ public class IndexShard extends AbstractIndexShardComponent { this.checkIndexOnStartup = indexSettings.get("index.shard.check_on_startup", "false"); this.engineConfig = newEngineConfig(); + this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId); } public Store store() { return this.store; } - public Translog translog() { - return translog; + /** returns true if this shard supports indexing (i.e., write) operations. */ + public boolean canIndex() { + return true; + } + + public Translog.View newTranslogView() { + return engine().getTranslog().newView(); } public ShardIndexingService indexingService() { @@ -662,7 +660,7 @@ public class IndexShard extends AbstractIndexShardComponent { } public TranslogStats translogStats() { - return translog.stats(); + return engine().getTranslog().stats(); } public SuggestStats suggestStats() { @@ -717,21 +715,16 @@ public class IndexShard extends AbstractIndexShardComponent { optimize.upgrade(), optimize.upgradeOnlyAncientSegments()); } - public SnapshotIndexCommit snapshotIndex() throws EngineException { + public SnapshotIndexCommit snapshotIndex(boolean flushFirst) throws EngineException { IndexShardState state = this.state; // one time volatile read // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { - return engine().snapshotIndex(); + return engine().snapshotIndex(flushFirst); } else { throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); } } - public void recover(Engine.RecoveryHandler recoveryHandler) throws EngineException { - verifyStarted(); - engine().recover(recoveryHandler); - } - public void failShard(String reason, Throwable e) { // fail the engine. This will cause this shard to also be removed from the node's index service. engine().failEngine(reason, e); @@ -757,6 +750,7 @@ public class IndexShard extends AbstractIndexShardComponent { mergeScheduleFuture = null; } changeState(IndexShardState.CLOSED, reason); + indexShardOperationCounter.decRef(); } finally { final Engine engine = this.currentEngineReference.getAndSet(null); try { @@ -788,7 +782,9 @@ public class IndexShard extends AbstractIndexShardComponent { return this; } - /** called before starting to copy index files over */ + /** + * called before starting to copy index files over + */ public void prepareForIndexRecovery() { if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); @@ -828,7 +824,6 @@ public class IndexShard extends AbstractIndexShardComponent { if (Booleans.parseBoolean(checkIndexOnStartup, false)) { checkIndex(); } - recoveryState.setStage(RecoveryState.Stage.TRANSLOG); // we disable deletes since we allow for operations to be executed against the shard while recovering // but we need to make sure we don't loose deletes until we are done recovering @@ -841,15 +836,23 @@ public class IndexShard extends AbstractIndexShardComponent { * After the store has been recovered, we need to start the engine. This method starts a new engine but skips * the replay of the transaction log which is required in cases where we restore a previous index or recover from * a remote peer. + * + * @param wipeTranslogs if set to true all skipped / uncommitted translogs are removed. */ - public void skipTranslogRecovery() { - assert engineUnsafe() == null : "engine was already created"; - Map recoveredTypes = internalPerformTranslogRecovery(true); - assert recoveredTypes.isEmpty(); - assert recoveryState.getTranslog().recoveredOperations() == 0; + public void skipTranslogRecovery(boolean wipeTranslogs) throws IOException { + assert engineUnsafe() == null : "engine was already created"; + Map recoveredTypes = internalPerformTranslogRecovery(true); + assert recoveredTypes.isEmpty(); + assert recoveryState.getTranslog().recoveredOperations() == 0; + if (wipeTranslogs) { + final Translog translog = engine().getTranslog(); + translog.markCommitted(translog.currentId()); + } } - /** called if recovery has to be restarted after network error / delay ** */ + /** + * called if recovery has to be restarted after network error / delay ** + */ public void performRecoveryRestart() throws IOException { synchronized (mutex) { if (state != IndexShardState.RECOVERING) { @@ -861,7 +864,9 @@ public class IndexShard extends AbstractIndexShardComponent { } } - /** returns stats about ongoing recoveries, both source and target */ + /** + * returns stats about ongoing recoveries, both source and target + */ public RecoveryStats recoveryStats() { return recoveryStats; } @@ -880,8 +885,6 @@ public class IndexShard extends AbstractIndexShardComponent { */ public void finalizeRecovery() { recoveryState().setStage(RecoveryState.Stage.FINALIZE); - // clear unreferenced files - translog.clearUnreferenced(); engine().refresh("recovery_finalization"); startScheduledTasksIfNeeded(); engineConfig.setEnableGcDeletes(true); @@ -988,7 +991,10 @@ public class IndexShard extends AbstractIndexShardComponent { logger.debug("updating index_buffer_size from [{}] to [{}]", preValue, shardIndexingBufferSize); } } - translog().updateBuffer(shardTranslogBufferSize); + Engine engine = engineUnsafe(); + if (engine != null) { + engine.getTranslog().updateBuffer(shardTranslogBufferSize); + } } public void markAsInactive() { @@ -1010,6 +1016,7 @@ public class IndexShard extends AbstractIndexShardComponent { } MetaDataStateFormat.deleteMetaState(shardPath().getDataPath()); } + public ShardPath shardPath() { return path; } @@ -1109,7 +1116,9 @@ public class IndexShard extends AbstractIndexShardComponent { }); } - /** Schedules another (future) refresh, if refresh_interval is still enabled. */ + /** + * Schedules another (future) refresh, if refresh_interval is still enabled. + */ private void reschedule() { synchronized (mutex) { if (state != IndexShardState.CLOSED && refreshInterval.millis() > 0) { @@ -1243,7 +1252,6 @@ public class IndexShard extends AbstractIndexShardComponent { return engineFactory.newReadWriteEngine(config, skipTranslogRecovery); } - /** * Returns true iff this shard allows primary promotion, otherwise false */ @@ -1301,7 +1309,40 @@ public class IndexShard extends AbstractIndexShardComponent { } }; return new EngineConfig(shardId, - threadPool, indexingService, indexSettingsService, warmer, store, deletionPolicy, translog, mergePolicyProvider, mergeScheduler, - mapperAnalyzer, similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.filter(), indexCache.filterPolicy()); + threadPool, indexingService, indexSettingsService, warmer, store, deletionPolicy, mergePolicyProvider, mergeScheduler, + mapperAnalyzer, similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.filter(), indexCache.filterPolicy(), bigArrays, shardPath().resolveTranslog()); + } + + private static class IndexShardOperationCounter extends AbstractRefCounted { + final private ESLogger logger; + private final ShardId shardId; + + public IndexShardOperationCounter(ESLogger logger, ShardId shardId) { + super("index-shard-operations-counter"); + this.logger = logger; + this.shardId = shardId; + } + + @Override + protected void closeInternal() { + logger.debug("operations counter reached 0, will not accept any further writes"); + } + + @Override + protected void alreadyClosed() { + throw new IndexShardClosedException(shardId, "could not increment operation counter. shard is closed."); + } + } + + public void incrementOperationCounter() { + indexShardOperationCounter.incRef(); + } + + public void decrementOperationCounter() { + indexShardOperationCounter.decRef(); + } + + public int getOperationsCount() { + return indexShardOperationCounter.refCount(); } } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardException.java index 019b4d13bb4..28812b08805 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShardException.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShardException.java @@ -46,7 +46,7 @@ public class IndexShardException extends IndexException { @Override public String toString() { - return (shardId == null ? "_na" : shardId) + getMessage(); + return (shardId == null ? "_na" : shardId) + " " + getMessage(); } @Override diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java b/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java index 2ba09533eae..fc44f11eab9 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShardModule.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; +import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.index.warmer.ShardIndexWarmerService; /** @@ -47,6 +48,9 @@ public class IndexShardModule extends AbstractModule { this.settings = settings; this.shardId = shardId; this.primary = primary; + if (settings.get("index.translog.type") != null) { + throw new IllegalStateException("a custom translog type is no longer supported. got [" + settings.get("index.translog.type") + "]"); + } } /** Return true if a shadow engine should be used */ @@ -61,6 +65,7 @@ public class IndexShardModule extends AbstractModule { bind(IndexShard.class).to(ShadowIndexShard.class).asEagerSingleton(); } else { bind(IndexShard.class).asEagerSingleton(); + bind(TranslogService.class).asEagerSingleton(); } bind(EngineFactory.class).to(settings.getAsClass(ENGINE_FACTORY, DEFAULT_ENGINE_FACTORY_CLASS, ENGINE_PREFIX, ENGINE_SUFFIX)); diff --git a/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index ba7dcdd3976..9f5537856e6 100644 --- a/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.aliases.IndexAliasesService; @@ -56,6 +57,8 @@ import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.threadpool.ThreadPool; +import java.io.IOException; + /** * ShadowIndexShard extends {@link IndexShard} to add file synchronization * from the primary when a flush happens. It also ensures that a replica being @@ -67,7 +70,7 @@ public final class ShadowIndexShard extends IndexShard { @Inject public ShadowIndexShard(ShardId shardId, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, MergeSchedulerProvider mergeScheduler, - Translog translog, ThreadPool threadPool, MapperService mapperService, + ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, ShardIndexingService indexingService, ShardGetService getService, ShardSearchService searchService, @@ -78,14 +81,15 @@ public final class ShadowIndexShard extends IndexShard { IndexService indexService, ShardSuggestService shardSuggestService, ShardQueryCache shardQueryCache, ShardBitsetFilterCache shardBitsetFilterCache, @Nullable IndicesWarmer warmer, SnapshotDeletionPolicy deletionPolicy, SimilarityService similarityService, - MergePolicyProvider mergePolicyProvider, EngineFactory factory, ClusterService clusterService, NodeEnvironment nodeEnv, ShardPath path) { + MergePolicyProvider mergePolicyProvider, EngineFactory factory, ClusterService clusterService, + NodeEnvironment nodeEnv, ShardPath path, BigArrays bigArrays) throws IOException { super(shardId, indexSettingsService, indicesLifecycle, store, mergeScheduler, - translog, threadPool, mapperService, queryParserService, indexCache, indexAliasesService, + threadPool, mapperService, queryParserService, indexCache, indexAliasesService, indexingService, getService, searchService, shardWarmerService, shardFilterCache, shardFieldData, percolatorQueriesRegistry, shardPercolateService, codecService, termVectorsService, indexFieldDataService, indexService, shardSuggestService, shardQueryCache, shardBitsetFilterCache, warmer, deletionPolicy, similarityService, - mergePolicyProvider, factory, clusterService, nodeEnv, path); + mergePolicyProvider, factory, clusterService, nodeEnv, path, bigArrays); } /** @@ -102,6 +106,11 @@ public final class ShadowIndexShard extends IndexShard { super.updateRoutingEntry(newRouting, persistState); } + @Override + public boolean canIndex() { + return false; + } + @Override protected Engine newEngine(boolean skipInitialTranslogRecovery, EngineConfig config) { assert this.shardRouting.primary() == false; diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java index e64684ed669..de183c02e9f 100644 --- a/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java +++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java @@ -80,7 +80,8 @@ public class IndexShardSnapshotAndRestoreService extends AbstractIndexShardCompo } try { - SnapshotIndexCommit snapshotIndexCommit = indexShard.snapshotIndex(); + // we flush first to make sure we get the latest writes snapshotted + SnapshotIndexCommit snapshotIndexCommit = indexShard.snapshotIndex(true); try { indexShardRepository.snapshot(snapshotId, shardId, snapshotIndexCommit, snapshotStatus); if (logger.isDebugEnabled()) { @@ -124,7 +125,7 @@ public class IndexShardSnapshotAndRestoreService extends AbstractIndexShardCompo snapshotShardId = new ShardId(restoreSource.index(), shardId.id()); } indexShardRepository.restore(restoreSource.snapshotId(), shardId, snapshotShardId, recoveryState); - indexShard.skipTranslogRecovery(); + indexShard.skipTranslogRecovery(true); indexShard.finalizeRecovery(); indexShard.postRecovery("restore done"); restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), shardId); diff --git a/src/main/java/org/elasticsearch/index/translog/ChecksummedTranslogStream.java b/src/main/java/org/elasticsearch/index/translog/ChecksummedTranslogStream.java index e1802c5d5e2..e62436522c2 100644 --- a/src/main/java/org/elasticsearch/index/translog/ChecksummedTranslogStream.java +++ b/src/main/java/org/elasticsearch/index/translog/ChecksummedTranslogStream.java @@ -23,9 +23,14 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.InputStreamDataInput; import org.apache.lucene.store.OutputStreamDataOutput; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.io.stream.*; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.NoopStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; -import java.io.*; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.nio.file.Files; @@ -65,12 +70,12 @@ public class ChecksummedTranslogStream implements TranslogStream { Translog.Operation.Type type = Translog.Operation.Type.fromId(in.readByte()); operation = TranslogStreams.newOperationFromType(type); operation.readFrom(in); + verifyChecksum(in); } catch (EOFException e) { throw new TruncatedTranslogException("reached premature end of file, translog is truncated", e); } catch (AssertionError|Exception e) { throw new TranslogCorruptedException("translog corruption while reading from stream", e); } - verifyChecksum(in); return operation; } @@ -103,6 +108,11 @@ public class ChecksummedTranslogStream implements TranslogStream { // closing it will close the FileChannel OutputStreamDataOutput out = new OutputStreamDataOutput(Channels.newOutputStream(channel)); CodecUtil.writeHeader(out, TranslogStreams.TRANSLOG_CODEC, VERSION); + return headerLength(); + } + + @Override + public int headerLength() { return CodecUtil.headerLength(TranslogStreams.TRANSLOG_CODEC); } diff --git a/src/main/java/org/elasticsearch/index/translog/LegacyTranslogStream.java b/src/main/java/org/elasticsearch/index/translog/LegacyTranslogStream.java index e859065d791..4611524861e 100644 --- a/src/main/java/org/elasticsearch/index/translog/LegacyTranslogStream.java +++ b/src/main/java/org/elasticsearch/index/translog/LegacyTranslogStream.java @@ -23,9 +23,6 @@ import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.IOException; import java.nio.channels.FileChannel; import java.nio.file.Files; @@ -41,6 +38,10 @@ public class LegacyTranslogStream implements TranslogStream { @Override public Translog.Operation read(StreamInput in) throws IOException { + // read the opsize before an operation. + // Note that this was written & read out side of the stream when this class was used, but it makes things more consistent + // to read this here + in.readInt(); Translog.Operation.Type type = Translog.Operation.Type.fromId(in.readByte()); Translog.Operation operation = TranslogStreams.newOperationFromType(type); operation.readFrom(in); @@ -49,8 +50,7 @@ public class LegacyTranslogStream implements TranslogStream { @Override public void write(StreamOutput out, Translog.Operation op) throws IOException { - out.writeByte(op.opType().id()); - op.writeTo(out); + throw new UnsupportedOperationException("LegacyTranslogStream is depracated. Use TranslogStreams.LATEST"); } @Override @@ -59,6 +59,11 @@ public class LegacyTranslogStream implements TranslogStream { return 0; } + @Override + public int headerLength() { + return 0; + } + @Override public StreamInput openInput(Path translogFile) throws IOException { // nothing to do, legacy translogs have no header diff --git a/src/main/java/org/elasticsearch/index/translog/Translog.java b/src/main/java/org/elasticsearch/index/translog/Translog.java index 63970ab3aaf..94afe9a4877 100644 --- a/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -37,17 +37,17 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShardComponent; -import java.io.Closeable; +import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.Path; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; - /** * */ -public interface Translog extends IndexShardComponent, Closeable, Accountable { +public interface Translog extends IndexShardComponent { static ByteSizeValue INACTIVE_SHARD_TRANSLOG_BUFFER = ByteSizeValue.parseBytesSizeValue("1kb"); @@ -61,42 +61,21 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { long currentId(); /** - * Returns the number of operations in the transaction log. + * Returns the number of operations in the transaction files that aren't committed to lucene.. + * Note: may return -1 if unknown */ - int estimatedNumberOfOperations(); + int totalOperations(); /** - * Returns the size in bytes of the translog. + * Returns the size in bytes of the translog files that aren't committed to lucene. */ - long translogSizeInBytes(); + long sizeInBytes(); /** - * Creates a new transaction log internally. - *

- *

Can only be called by one thread. - * @param id the translog id for the new translog + * Creates a new transaction log file internally. That new file will be visible to all outstanding views. + * The id of the new translog file is returned. */ - void newTranslog(long id) throws TranslogException, IOException; - - /** - * Creates a new transient translog, where added ops will be added to the current one, and to - * it. - *

- *

Can only be called by one thread. - */ - void newTransientTranslog(long id) throws TranslogException; - - /** - * Swaps the transient translog to be the current one. - *

- *

Can only be called by one thread. - */ - void makeTransientCurrent() throws IOException; - - /** - * Reverts back to not have a transient translog. - */ - void revertTransient() throws IOException; + long newTranslog() throws TranslogException, IOException; /** * Adds a create operation to the transaction log. @@ -107,22 +86,15 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { /** * Snapshots the current transaction log allowing to safely iterate over the snapshot. + * Snapshots are fixed in time and will not be updated with future operations. */ - Snapshot snapshot() throws TranslogException; + Snapshot newSnapshot() throws TranslogException; /** - * Snapshots the delta between the current state of the translog, and the state defined - * by the provided snapshot. If a new translog has been created after the provided snapshot - * has been take, will return a snapshot on the current trasnlog. + * Returns a view into the current translog that is guaranteed to retain all current operations + * while receiving future ones as well */ - Snapshot snapshot(Snapshot snapshot); - - /** - * Clears unreferenced transaction logs. - * - * @return the number of clean up files - */ - int clearUnreferenced(); + View newView(); /** * Sync's the translog. @@ -140,36 +112,19 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { */ public Path location(); - /** - * Returns the translog filename for the given id. - */ - String getFilename(long translogId); - /** * return stats */ TranslogStats stats(); /** - * Returns the largest translog id present in all locations or -1 if no translog is present. + * notifies the translog that translogId was committed as part of the commit data in lucene, together + * with all operations from previous translogs. This allows releasing all previous translogs. + * + * @throws FileNotFoundException if the given translog id can not be found. */ - long findLargestPresentTranslogId() throws IOException; + void markCommitted(long translogId) throws FileNotFoundException; - /** - * Returns an OperationIterator to iterate over all translog entries in the given translog ID. - * @throws java.io.FileNotFoundException if the file for the translog ID can not be found - */ - OperationIterator openIterator(long translogId) throws IOException; - - /** - * Iterator for translog operations. - */ - public static interface OperationIterator extends Releasable { - /** - * Returns the next operation in the translog or null if we reached the end of the stream. - */ - public Translog.Operation next() throws IOException; - } static class Location implements Accountable { @@ -185,7 +140,7 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { @Override public long ramBytesUsed() { - return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_LONG + RamUsageEstimator.NUM_BYTES_INT; + return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2 * RamUsageEstimator.NUM_BYTES_LONG + RamUsageEstimator.NUM_BYTES_INT; } @Override @@ -202,22 +157,7 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { /** * A snapshot of the transaction log, allows to iterate over all the transaction log operations. */ - static interface Snapshot extends OperationIterator { - - /** - * The id of the translog the snapshot was taken with. - */ - long translogId(); - - /** - * Returns the current position in the translog stream - */ - long position(); - - /** - * Returns the internal length (*not* number of operations) of this snapshot. - */ - long length(); + static interface Snapshot extends Releasable { /** * The total number of operations in the translog. @@ -225,14 +165,31 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { int estimatedTotalOperations(); /** - * Seek to the specified position in the translog stream + * Returns the next operation in the snapshot or null if we reached the end. */ - void seekTo(long position); + public Translog.Operation next() throws IOException; + + } + + /** a view into the current translog that receives all operations from the moment created */ + interface View extends Releasable { /** - * The length in bytes of this stream. + * The total number of operations in the view. */ - long lengthInBytes(); + int totalOperations(); + + /** + * Returns the size in bytes of the files behind the view. + */ + long sizeInBytes(); + + /** create a snapshot from this view */ + Snapshot snapshot(); + + /** this smallest translog id in this view */ + long minTranslogId(); + } /** @@ -277,6 +234,7 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { long estimateSize(); Source getSource(); + } static class Source { @@ -435,6 +393,57 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { out.writeLong(ttl); out.writeByte(versionType.getValue()); } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Create create = (Create) o; + + if (timestamp != create.timestamp || + ttl != create.ttl || + version != create.version || + id.equals(create.id) == false || + type.equals(create.type) == false || + source.equals(create.source) == false) { + return false; + } + if (routing != null ? !routing.equals(create.routing) : create.routing != null) { + return false; + } + if (parent != null ? !parent.equals(create.parent) : create.parent != null) { + return false; + } + return versionType == create.versionType; + + } + + @Override + public int hashCode() { + int result = id.hashCode(); + result = 31 * result + type.hashCode(); + result = 31 * result + source.hashCode(); + result = 31 * result + (routing != null ? routing.hashCode() : 0); + result = 31 * result + (parent != null ? parent.hashCode() : 0); + result = 31 * result + (int) (timestamp ^ (timestamp >>> 32)); + result = 31 * result + (int) (ttl ^ (ttl >>> 32)); + result = 31 * result + (int) (version ^ (version >>> 32)); + result = 31 * result + versionType.hashCode(); + return result; + } + + @Override + public String toString() { + return "Create{" + + "id='" + id + '\'' + + ", type='" + type + '\'' + + '}'; + } } static class Index implements Operation { @@ -581,6 +590,55 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { out.writeLong(ttl); out.writeByte(versionType.getValue()); } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Index index = (Index) o; + + if (version != index.version || + timestamp != index.timestamp || + ttl != index.ttl || + id.equals(index.id) == false || + type.equals(index.type) == false || + versionType != index.versionType || + source.equals(index.source) == false) { + return false; + } + if (routing != null ? !routing.equals(index.routing) : index.routing != null) { + return false; + } + return !(parent != null ? !parent.equals(index.parent) : index.parent != null); + + } + + @Override + public int hashCode() { + int result = id.hashCode(); + result = 31 * result + type.hashCode(); + result = 31 * result + (int) (version ^ (version >>> 32)); + result = 31 * result + versionType.hashCode(); + result = 31 * result + source.hashCode(); + result = 31 * result + (routing != null ? routing.hashCode() : 0); + result = 31 * result + (parent != null ? parent.hashCode() : 0); + result = 31 * result + (int) (timestamp ^ (timestamp >>> 32)); + result = 31 * result + (int) (ttl ^ (ttl >>> 32)); + return result; + } + + @Override + public String toString() { + return "Index{" + + "id='" + id + '\'' + + ", type='" + type + '\'' + + '}'; + } } static class Delete implements Operation { @@ -658,6 +716,37 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { out.writeLong(version); out.writeByte(versionType.getValue()); } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Delete delete = (Delete) o; + + return version == delete.version && + uid.equals(delete.uid) && + versionType == delete.versionType; + } + + @Override + public int hashCode() { + int result = uid.hashCode(); + result = 31 * result + (int) (version ^ (version >>> 32)); + result = 31 * result + versionType.hashCode(); + return result; + } + + @Override + public String toString() { + return "Delete{" + + "uid=" + uid + + '}'; + } } /** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */ @@ -755,5 +844,40 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { out.writeVInt(0); } } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DeleteByQuery that = (DeleteByQuery) o; + + if (!Arrays.equals(filteringAliases, that.filteringAliases)) { + return false; + } + if (!Arrays.equals(types, that.types)) { + return false; + } + return source.equals(that.source); + } + + @Override + public int hashCode() { + int result = source.hashCode(); + result = 31 * result + (filteringAliases != null ? Arrays.hashCode(filteringAliases) : 0); + result = 31 * result + Arrays.hashCode(types); + return result; + } + + @Override + public String toString() { + return "DeleteByQuery{" + + "types=" + Arrays.toString(types) + + '}'; + } } } diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogModule.java b/src/main/java/org/elasticsearch/index/translog/TranslogModule.java deleted file mode 100644 index aeaff131bf3..00000000000 --- a/src/main/java/org/elasticsearch/index/translog/TranslogModule.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.translog; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.Scopes; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.translog.fs.FsTranslog; - -/** - * - */ -public class TranslogModule extends AbstractModule { - - public static class TranslogSettings { - public static final String TYPE = "index.translog.type"; - } - - private final Settings settings; - - public TranslogModule(Settings settings) { - this.settings = settings; - } - - @Override - protected void configure() { - bind(Translog.class) - .to(settings.getAsClass(TranslogSettings.TYPE, FsTranslog.class)) - .in(Scopes.SINGLETON); - bind(TranslogService.class).asEagerSingleton(); - } -} diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogService.java b/src/main/java/org/elasticsearch/index/translog/TranslogService.java index 9bdc29a21f8..633aeae2e83 100644 --- a/src/main/java/org/elasticsearch/index/translog/TranslogService.java +++ b/src/main/java/org/elasticsearch/index/translog/TranslogService.java @@ -29,11 +29,7 @@ import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.index.engine.FlushNotAllowedEngineException; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettingsService; -import org.elasticsearch.index.shard.AbstractIndexShardComponent; -import org.elasticsearch.index.shard.IllegalIndexShardStateException; -import org.elasticsearch.index.shard.IndexShardState; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.*; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; @@ -57,7 +53,7 @@ public class TranslogService extends AbstractIndexShardComponent implements Clos private final ThreadPool threadPool; private final IndexSettingsService indexSettingsService; private final IndexShard indexShard; - private final Translog translog; + private volatile Translog translog; private volatile TimeValue interval; private volatile int flushThresholdOperations; @@ -69,12 +65,11 @@ public class TranslogService extends AbstractIndexShardComponent implements Clos private final ApplySettings applySettings = new ApplySettings(); @Inject - public TranslogService(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, ThreadPool threadPool, IndexShard indexShard, Translog translog) { + public TranslogService(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, ThreadPool threadPool, IndexShard indexShard) { super(shardId, indexSettings); this.threadPool = threadPool; this.indexSettingsService = indexSettingsService; this.indexShard = indexShard; - this.translog = translog; this.flushThresholdOperations = indexSettings.getAsInt(INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, indexSettings.getAsInt("index.translog.flush_threshold", Integer.MAX_VALUE)); this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB)); this.flushThresholdPeriod = indexSettings.getAsTime(INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, TimeValue.timeValueMinutes(30)); @@ -88,7 +83,6 @@ public class TranslogService extends AbstractIndexShardComponent implements Clos indexSettingsService.addListener(applySettings); } - @Override public void close() { indexSettingsService.removeListener(applySettings); @@ -148,12 +142,12 @@ public class TranslogService extends AbstractIndexShardComponent implements Clos return; } - if (indexShard.state() == IndexShardState.CREATED) { + if (indexShard.engine().getTranslog() == null) { reschedule(); return; } - int currentNumberOfOperations = translog.estimatedNumberOfOperations(); + int currentNumberOfOperations = translog.totalOperations(); if (currentNumberOfOperations == 0) { reschedule(); return; @@ -168,7 +162,7 @@ public class TranslogService extends AbstractIndexShardComponent implements Clos } if (flushThresholdSize.bytes() > 0) { - long sizeInBytes = translog.translogSizeInBytes(); + long sizeInBytes = translog.sizeInBytes(); if (sizeInBytes > flushThresholdSize.bytes()) { logger.trace("flushing translog, size [{}], breached [{}]", new ByteSizeValue(sizeInBytes), flushThresholdSize); asyncFlushAndReschedule(); diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/src/main/java/org/elasticsearch/index/translog/TranslogStats.java index d0160ba18d0..1af0a747c27 100644 --- a/src/main/java/org/elasticsearch/index/translog/TranslogStats.java +++ b/src/main/java/org/elasticsearch/index/translog/TranslogStats.java @@ -34,14 +34,13 @@ import java.io.IOException; public class TranslogStats implements ToXContent, Streamable { private long translogSizeInBytes = 0; - private int estimatedNumberOfOperations = 0; + private int estimatedNumberOfOperations = -1; public TranslogStats() { } public TranslogStats(int estimatedNumberOfOperations, long translogSizeInBytes) { - assert estimatedNumberOfOperations >= 0 : "estimatedNumberOfOperations must be >=0, got [" + estimatedNumberOfOperations + "]"; - assert translogSizeInBytes >= 0 : "translogSizeInBytes must be >=0, got [" + translogSizeInBytes + "]"; + assert translogSizeInBytes >= 0 : "translogSizeInBytes must be >= 0, got [" + translogSizeInBytes + "]"; this.estimatedNumberOfOperations = estimatedNumberOfOperations; this.translogSizeInBytes = translogSizeInBytes; } diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogStream.java b/src/main/java/org/elasticsearch/index/translog/TranslogStream.java index cf3446c6ae9..061a12ff445 100644 --- a/src/main/java/org/elasticsearch/index/translog/TranslogStream.java +++ b/src/main/java/org/elasticsearch/index/translog/TranslogStream.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.translog; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import java.io.File; import java.io.IOException; import java.nio.channels.FileChannel; import java.nio.file.Path; @@ -49,6 +48,11 @@ public interface TranslogStream { */ public int writeHeader(FileChannel channel) throws IOException; + /** + * returns the site of the header in bytes + */ + public int headerLength(); + /** * Seek past the header, if any header is present */ diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java b/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java index a9b743b452e..345c027e161 100644 --- a/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java +++ b/src/main/java/org/elasticsearch/index/translog/TranslogStreams.java @@ -28,8 +28,6 @@ import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import java.io.File; -import java.io.FileInputStream; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -91,7 +89,6 @@ public class TranslogStreams { * has zero length, returns the latest version. If the header does not * exist, assumes Version 0 of the translog file format. *

- * The caller is responsible for closing the TranslogStream. * * @throws IOException */ diff --git a/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java index ebd5e125353..7236d19f654 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java @@ -21,34 +21,21 @@ package org.elasticsearch.index.translog.fs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.TranslogStream; -import org.elasticsearch.index.translog.TranslogStreams; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogException; +import org.elasticsearch.index.translog.TranslogStream; import java.io.IOException; import java.io.OutputStream; -import java.nio.file.Path; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.nio.ByteBuffer; /** */ -public class BufferingFsTranslogFile implements FsTranslogFile { - - private final long id; - private final ShardId shardId; - private final ChannelReference channelReference; - private final TranslogStream translogStream; - private final int headerSize; - - private final ReadWriteLock rwl = new ReentrantReadWriteLock(); - private final AtomicBoolean closed = new AtomicBoolean(); +public final class BufferingFsTranslogFile extends FsTranslogFile { private volatile int operationCounter; - private volatile long lastPosition; private volatile long lastWrittenPosition; @@ -59,36 +46,28 @@ public class BufferingFsTranslogFile implements FsTranslogFile { private WrapperOutputStream bufferOs = new WrapperOutputStream(); public BufferingFsTranslogFile(ShardId shardId, long id, ChannelReference channelReference, int bufferSize) throws IOException { - this.shardId = shardId; - this.id = id; - this.channelReference = channelReference; + super(shardId, id, channelReference); this.buffer = new byte[bufferSize]; - this.translogStream = TranslogStreams.translogStreamFor(this.channelReference.file()); - this.headerSize = this.translogStream.writeHeader(channelReference.channel()); + final TranslogStream stream = this.channelReference.stream(); + int headerSize = stream.writeHeader(channelReference.channel()); this.lastPosition += headerSize; this.lastWrittenPosition += headerSize; this.lastSyncPosition += headerSize; } @Override - public long id() { - return this.id; - } - - @Override - public int estimatedNumberOfOperations() { + public int totalOperations() { return operationCounter; } @Override - public long translogSizeInBytes() { + public long sizeInBytes() { return lastWrittenPosition; } @Override public Translog.Location add(BytesReference data) throws IOException { - rwl.writeLock().lock(); - try { + try (ReleasableLock lock = writeLock.acquire()) { operationCounter++; long position = lastPosition; if (data.length() >= buffer.length) { @@ -106,64 +85,49 @@ public class BufferingFsTranslogFile implements FsTranslogFile { data.writeTo(bufferOs); lastPosition += data.length(); return new Translog.Location(id, position, data.length()); - } finally { - rwl.writeLock().unlock(); } } private void flushBuffer() throws IOException { - assert (((ReentrantReadWriteLock.WriteLock) rwl.writeLock()).isHeldByCurrentThread()); + assert writeLock.isHeldByCurrentThread(); if (bufferCount > 0) { // we use the channel to write, since on windows, writing to the RAF might not be reflected // when reading through the channel Channels.writeToChannel(buffer, 0, bufferCount, channelReference.channel()); - lastWrittenPosition += bufferCount; bufferCount = 0; } } @Override - public byte[] read(Translog.Location location) throws IOException { - rwl.readLock().lock(); - try { - if (location.translogLocation >= lastWrittenPosition) { - byte[] data = new byte[location.size]; - System.arraycopy(buffer, (int) (location.translogLocation - lastWrittenPosition), data, 0, location.size); - return data; + protected void readBytes(ByteBuffer targetBuffer, long position) throws IOException { + try (ReleasableLock lock = readLock.acquire()) { + if (position >= lastWrittenPosition) { + System.arraycopy(buffer, (int) (position - lastWrittenPosition), + targetBuffer.array(), targetBuffer.position(), targetBuffer.limit()); + return; } - } finally { - rwl.readLock().unlock(); } // we don't have to have a read lock here because we only write ahead to the file, so all writes has been complete // for the requested location. - return Channels.readFromFileChannel(channelReference.channel(), location.translogLocation, location.size); + Channels.readFromFileChannelWithEofException(channelReference.channel(), position, targetBuffer); } - @Override - public FsChannelSnapshot snapshot() throws TranslogException { + public FsChannelImmutableReader immutableReader() throws TranslogException { if (channelReference.tryIncRef()) { - boolean success = false; - try { - rwl.writeLock().lock(); - try { - flushBuffer(); - FsChannelSnapshot snapshot = new FsChannelSnapshot(this.id, channelReference, lastWrittenPosition, operationCounter); - snapshot.seekTo(this.headerSize); - success = true; - return snapshot; - } catch (Exception e) { - throw new TranslogException(shardId, "exception while creating snapshot", e); - } finally { - rwl.writeLock().unlock(); - } + try (ReleasableLock lock = writeLock.acquire()) { + flushBuffer(); + FsChannelImmutableReader reader = new FsChannelImmutableReader(this.id, channelReference, lastWrittenPosition, operationCounter); + channelReference.incRef(); // for new reader + return reader; + } catch (Exception e) { + throw new TranslogException(shardId, "exception while creating an immutable reader", e); } finally { - if (!success) { - channelReference.decRef(); - } + channelReference.decRef(); } + } else { + throw new TranslogException(shardId, "can't increment channel [" + channelReference + "] ref count"); } - return null; } @Override @@ -171,34 +135,24 @@ public class BufferingFsTranslogFile implements FsTranslogFile { return lastPosition != lastSyncPosition; } - @Override - public TranslogStream getStream() { - return this.translogStream; - } - @Override public void sync() throws IOException { if (!syncNeeded()) { return; } - rwl.writeLock().lock(); - try { + try (ReleasableLock lock = writeLock.acquire()) { flushBuffer(); lastSyncPosition = lastPosition; - } finally { - rwl.writeLock().unlock(); } channelReference.channel().force(false); } @Override - public void close() throws IOException { - if (closed.compareAndSet(false, true)) { - try { - sync(); - } finally { - channelReference.decRef(); - } + protected void doClose() throws IOException { + try { + sync(); + } finally { + super.doClose(); } } @@ -207,21 +161,18 @@ public class BufferingFsTranslogFile implements FsTranslogFile { if (!(other instanceof BufferingFsTranslogFile)) { return; } - rwl.writeLock().lock(); - try { - flushBuffer(); - this.buffer = ((BufferingFsTranslogFile) other).buffer; - } catch (IOException e) { - throw new TranslogException(shardId, "failed to flush", e); - } finally { - rwl.writeLock().unlock(); + try (ReleasableLock lock = writeLock.acquire()) { + try { + flushBuffer(); + this.buffer = ((BufferingFsTranslogFile) other).buffer; + } catch (IOException e) { + throw new TranslogException(shardId, "failed to flush", e); + } } } - @Override public void updateBufferSize(int bufferSize) { - rwl.writeLock().lock(); - try { + try (ReleasableLock lock = writeLock.acquire()) { if (this.buffer.length == bufferSize) { return; } @@ -229,21 +180,9 @@ public class BufferingFsTranslogFile implements FsTranslogFile { this.buffer = new byte[bufferSize]; } catch (IOException e) { throw new TranslogException(shardId, "failed to flush", e); - } finally { - rwl.writeLock().unlock(); } } - @Override - public Path getPath() { - return channelReference.file(); - } - - @Override - public boolean closed() { - return this.closed.get(); - } - class WrapperOutputStream extends OutputStream { @Override @@ -258,4 +197,5 @@ public class BufferingFsTranslogFile implements FsTranslogFile { bufferCount += len; } } + } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/ChannelReference.java b/src/main/java/org/elasticsearch/index/translog/fs/ChannelReference.java index 63467725d78..8e62da169af 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/ChannelReference.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/ChannelReference.java @@ -19,33 +19,40 @@ package org.elasticsearch.index.translog.fs; +import com.google.common.collect.Iterables; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.translog.TranslogStream; +import org.elasticsearch.index.translog.TranslogStreams; -import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; -import java.io.RandomAccessFile; import java.nio.channels.FileChannel; -import java.nio.file.Files; import java.nio.file.OpenOption; import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.Collections; +import java.util.IdentityHashMap; +import java.util.Map; -/** - * - */ -abstract class ChannelReference extends AbstractRefCounted { +class ChannelReference extends AbstractRefCounted { private final Path file; private final FileChannel channel; + private final TranslogStream stream; + public ChannelReference(Path file, OpenOption... openOptions) throws IOException { super(file.toString()); this.file = file; this.channel = FileChannel.open(file, openOptions); + try { + this.stream = TranslogStreams.translogStreamFor(file); + } catch (Throwable t) { + IOUtils.closeWhileHandlingException(channel); + throw t; + } } public Path file() { @@ -56,6 +63,15 @@ abstract class ChannelReference extends AbstractRefCounted { return this.channel; } + public TranslogStream stream() { + return this.stream; + } + + @Override + public String toString() { + return "channel: file [" + file + "], ref count [" + refCount() + "]"; + } + @Override protected void closeInternal() { IOUtils.closeWhileHandlingException(channel); diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelImmutableReader.java b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelImmutableReader.java new file mode 100644 index 00000000000..7e4bc1172d1 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelImmutableReader.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog.fs; + +import org.elasticsearch.common.io.Channels; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * a channel reader which is fixed in length + */ +public final class FsChannelImmutableReader extends FsChannelReader { + + private final int totalOperations; + private final long length; + + /** + * Create a snapshot of translog file channel. The length parameter should be consistent with totalOperations and point + * at the end of the last operation in this snapshot. + */ + public FsChannelImmutableReader(long id, ChannelReference channelReference, long length, int totalOperations) { + super(id, channelReference); + this.length = length; + this.totalOperations = totalOperations; + } + + + public FsChannelImmutableReader clone() { + if (channelReference.tryIncRef()) { + try { + FsChannelImmutableReader reader = new FsChannelImmutableReader(id, channelReference, length, totalOperations); + channelReference.incRef(); // for the new object + return reader; + } finally { + channelReference.decRef(); + } + } else { + throw new IllegalStateException("can't increment translog [" + id + "] channel ref count"); + } + } + + public long sizeInBytes() { + return length; + } + + public int totalOperations() { + return totalOperations; + } + + /** + * reads an operation at the given position into the given buffer. + */ + protected void readBytes(ByteBuffer buffer, long position) throws IOException { + if (position >= length) { + throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "]"); + } + if (position < firstPosition()) { + throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + firstPosition() + "]"); + } + Channels.readFromFileChannelWithEofException(channel, position, buffer); + } + + @Override + public FsChannelSnapshot newSnapshot() { + return new FsChannelSnapshot(clone()); + } +} diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelReader.java b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelReader.java new file mode 100644 index 00000000000..31ec0a07209 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelReader.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog.fs; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.index.translog.Translog; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A base class for all classes that allows reading ops from translog files + */ +public abstract class FsChannelReader implements Closeable, Comparable { + + public static final int UNKNOWN_OP_COUNT = -1; + + protected final long id; + protected final ChannelReference channelReference; + protected final FileChannel channel; + protected final AtomicBoolean closed = new AtomicBoolean(false); + + public FsChannelReader(long id, ChannelReference channelReference) { + this.id = id; + this.channelReference = channelReference; + this.channel = channelReference.channel(); + } + + public long translogId() { + return this.id; + } + + abstract public long sizeInBytes(); + + /** the position the first operation is written at */ + public long firstPosition() { + return channelReference.stream().headerLength(); + } + + abstract public int totalOperations(); + + public Translog.Operation read(Translog.Location location) throws IOException { + assert location.translogId == id : "read location's translog id [" + location.translogId + "] is not [" + id + "]"; + ByteBuffer buffer = ByteBuffer.allocate(location.size); + return read(buffer, location.translogLocation, location.size); + } + + /** read the size of the op (i.e., number of bytes, including the op size) written at the given position */ + public int readSize(ByteBuffer reusableBuffer, long position) { + // read op size from disk + assert reusableBuffer.capacity() >= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" + reusableBuffer.capacity() + "]"; + try { + reusableBuffer.clear(); + reusableBuffer.limit(4); + readBytes(reusableBuffer, position); + reusableBuffer.flip(); + // Add an extra 4 to account for the operation size integer itself + return reusableBuffer.getInt() + 4; + } catch (IOException e) { + throw new ElasticsearchException("unexpected exception reading from translog snapshot of " + this.channelReference.file(), e); + } + } + + /** + * reads an operation at the given position and returns it. The buffer length is equal to the number + * of bytes reads. + */ + public Translog.Operation read(ByteBuffer reusableBuffer, long position, int opSize) throws IOException { + final ByteBuffer buffer; + if (reusableBuffer.capacity() >= opSize) { + buffer = reusableBuffer; + } else { + buffer = ByteBuffer.allocate(opSize); + } + buffer.clear(); + buffer.limit(opSize); + readBytes(buffer, position); + BytesArray bytesArray = new BytesArray(buffer.array(), 0, buffer.limit()); + return channelReference.stream().read(bytesArray.streamInput()); + } + + /** + * reads bytes at position into the given buffer, filling it. + */ + abstract protected void readBytes(ByteBuffer buffer, long position) throws IOException; + + /** create snapshot for this channel */ + abstract public FsChannelSnapshot newSnapshot(); + + @Override + public void close() throws IOException { + if (closed.compareAndSet(false, true)) { + doClose(); + } + } + + protected void doClose() throws IOException { + channelReference.decRef(); + } + + @Override + public String toString() { + return "translog [" + id + "][" + channelReference.file() + "]"; + } + + @Override + public int compareTo(FsChannelReader o) { + return Long.compare(translogId(), o.translogId()); + } +} diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java index bff79bb5dbf..48285ac0d68 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java @@ -16,138 +16,60 @@ * specific language governing permissions and limitations * under the License. */ - package org.elasticsearch.index.translog.fs; -import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.io.Channels; -import org.elasticsearch.common.io.stream.BytesStreamInput; -import org.elasticsearch.index.translog.TranslogStreams; import org.elasticsearch.index.translog.Translog; -import java.io.EOFException; -import java.io.FileNotFoundException; +import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; import java.util.concurrent.atomic.AtomicBoolean; /** - * + * an implementation of {@link org.elasticsearch.index.translog.Translog.Snapshot}, wrapping + * a {@link FsChannelReader}. This class is NOT thread-safe. */ -public class FsChannelSnapshot implements Translog.Snapshot { +public class FsChannelSnapshot implements Closeable { - private final long id; + protected final FsChannelReader reader; + protected final AtomicBoolean closed = new AtomicBoolean(false); - private final int totalOperations; + // we use an atomic long to allow passing it by reference :( + protected long position; - private final ChannelReference channelReference; - - private final FileChannel channel; - - private final long length; - - private Translog.Operation lastOperationRead = null; - - private long position = 0; - - private ByteBuffer cacheBuffer; - - private AtomicBoolean closed = new AtomicBoolean(false); - - /** - * Create a snapshot of translog file channel. The length parameter should be consistent with totalOperations and point - * at the end of the last operation in this snapshot. - */ - public FsChannelSnapshot(long id, ChannelReference channelReference, long length, int totalOperations) throws FileNotFoundException { - this.id = id; - this.channelReference = channelReference; - this.channel = channelReference.channel(); - this.length = length; - this.totalOperations = totalOperations; + public FsChannelSnapshot(FsChannelReader reader) { + this.reader = reader; + this.position = reader.firstPosition(); } - @Override public long translogId() { - return this.id; + return reader.translogId(); } - @Override - public long position() { - return this.position; - } - - @Override - public long length() { - return this.length; - } - - @Override public int estimatedTotalOperations() { - return this.totalOperations; + return reader.totalOperations(); } - @Override - public long lengthInBytes() { - return length - position; - } - - @Override - public Translog.Operation next() { - try { - if (position >= length) { - return null; - } - if (cacheBuffer == null) { - cacheBuffer = ByteBuffer.allocate(1024); - } - cacheBuffer.limit(4); - int bytesRead = Channels.readFromFileChannel(channel, position, cacheBuffer); - if (bytesRead < 0) { - // the snapshot is acquired under a write lock. we should never - // read beyond the EOF, must be an abrupt EOF - throw new EOFException("read past EOF. pos [" + position + "] length: [" + cacheBuffer.limit() + "] end: [" + channel.size() + "]"); - } - assert bytesRead == 4; - cacheBuffer.flip(); - // Add an extra 4 to account for the operation size integer itself - int opSize = cacheBuffer.getInt() + 4; - if ((position + opSize) > length) { - // the snapshot is acquired under a write lock. we should never - // read beyond the EOF, must be an abrupt EOF - throw new EOFException("opSize of [" + opSize + "] pointed beyond EOF. position [" + position + "] length [" + length + "]"); - } - if (cacheBuffer.capacity() < opSize) { - cacheBuffer = ByteBuffer.allocate(opSize); - } - cacheBuffer.clear(); - cacheBuffer.limit(opSize); - bytesRead = Channels.readFromFileChannel(channel, position, cacheBuffer); - if (bytesRead < 0) { - // the snapshot is acquired under a write lock. we should never - // read beyond the EOF, must be an abrupt EOF - throw new EOFException("tried to read past EOF. opSize [" + opSize + "] position [" + position + "] length [" + length + "]"); - } - cacheBuffer.flip(); - position += opSize; - BytesArray bytesArray = new BytesArray(cacheBuffer.array(), 0, opSize); - return TranslogStreams.readTranslogOperation(new BytesStreamInput(bytesArray.copyBytesArray())); - } catch (IOException e) { - throw new ElasticsearchException("unexpected exception reading from translog snapshot of " + this.channelReference.file(), e); + public Translog.Operation next(ByteBuffer reusableBuffer) throws IOException { + if (position >= reader.sizeInBytes()) { + return null; } - } - - @Override - public void seekTo(long position) { - this.position = position; + final int opSize = reader.readSize(reusableBuffer, position); + Translog.Operation op = reader.read(reusableBuffer, position, opSize); + position += opSize; + return op; } @Override public void close() { if (closed.compareAndSet(false, true)) { - channelReference.decRef(); + try { + IOUtils.close(reader); + } catch (IOException e) { + throw new ElasticsearchException("failed to close translogs", e); + } } } } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java index 15b76333b7a..314e73a97a7 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java @@ -19,37 +19,39 @@ package org.elasticsearch.index.translog.fs; -import org.apache.lucene.util.Accountable; +import com.google.common.collect.Iterables; +import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.io.stream.BytesStreamInput; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.index.translog.*; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogException; +import org.elasticsearch.index.translog.TranslogStats; +import org.elasticsearch.index.translog.TranslogStreams; +import org.elasticsearch.threadpool.ThreadPool; -import java.io.EOFException; +import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; -import java.nio.channels.ClosedChannelException; import java.nio.file.*; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; +import java.util.*; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -59,11 +61,21 @@ import java.util.regex.Pattern; /** * */ -public class FsTranslog extends AbstractIndexShardComponent implements Translog { +public class FsTranslog extends AbstractIndexShardComponent implements Translog, Closeable { public static final String INDEX_TRANSLOG_FS_TYPE = "index.translog.fs.type"; - private static final String TRANSLOG_FILE_PREFIX = "translog-"; - private static final Pattern PARSE_ID_PATTERN = Pattern.compile(TRANSLOG_FILE_PREFIX + "(\\d+).*"); + public static final String INDEX_TRANSLOG_BUFFER_SIZE = "index.translog.fs.buffer_size"; + public static final String INDEX_TRANSLOG_SYNC_INTERVAL = "index.translog.sync_interval"; + public static final String TRANSLOG_FILE_PREFIX = "translog-"; + static final Pattern PARSE_ID_PATTERN = Pattern.compile(TRANSLOG_FILE_PREFIX + "(\\d+)(\\.recovering)?$"); + private final TimeValue syncInterval; + private volatile ScheduledFuture syncScheduler; + + + // this is a concurrent set and is not protected by any of the locks. The main reason + // is that is being accessed by two separate classes (additions & reading are done by FsTranslog, remove by FsView when closed) + private final Set outstandingViews = ConcurrentCollections.newConcurrentSet(); + class ApplySettings implements IndexSettingsService.Listener { @Override @@ -78,75 +90,148 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog private final IndexSettingsService indexSettingsService; private final BigArrays bigArrays; + private final ThreadPool threadPool; + + protected final ReleasableLock readLock; + protected final ReleasableLock writeLock; - private final ReadWriteLock rwl = new ReentrantReadWriteLock(); private final Path location; - private volatile FsTranslogFile current; - private volatile FsTranslogFile trans; + // protected by the write lock + private long idGenerator = 1; + private FsTranslogFile current; + // ordered by age + private final List uncommittedTranslogs = new ArrayList<>(); + private long lastCommittedTranslogId = -1; // -1 is safe as it will not cause an translog deletion. private FsTranslogFile.Type type; private boolean syncOnEachOperation = false; private volatile int bufferSize; - private volatile int transientBufferSize; private final ApplySettings applySettings = new ApplySettings(); - @Inject - public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, - BigArrays bigArrays, ShardPath shardPath) throws IOException { - super(shardId, indexSettings); - this.indexSettingsService = indexSettingsService; - this.bigArrays = bigArrays; - this.location = shardPath.resolveTranslog(); - Files.createDirectories(location); - this.type = FsTranslogFile.Type.fromString(indexSettings.get("index.translog.fs.type", FsTranslogFile.Type.BUFFERED.name())); - this.bufferSize = (int) indexSettings.getAsBytesSize("index.translog.fs.buffer_size", ByteSizeValue.parseBytesSizeValue("64k")).bytes(); // Not really interesting, updated by IndexingMemoryController... - this.transientBufferSize = (int) indexSettings.getAsBytesSize("index.translog.fs.transient_buffer_size", ByteSizeValue.parseBytesSizeValue("8k")).bytes(); - indexSettingsService.addListener(applySettings); + private final AtomicBoolean closed = new AtomicBoolean(); + + public FsTranslog(ShardId shardId, IndexSettingsService indexSettingsService, + BigArrays bigArrays, Path location, ThreadPool threadPool) throws IOException { + this(shardId, indexSettingsService.getSettings(), indexSettingsService, bigArrays, location, threadPool); } - public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, Path location) throws IOException { - super(shardId, indexSettings); - this.indexSettingsService = null; - this.location = location; - Files.createDirectories(location); - this.bigArrays = BigArrays.NON_RECYCLING_INSTANCE; + public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, + BigArrays bigArrays, Path location) throws IOException { + this(shardId, indexSettings, null, bigArrays, location, null); + } - this.type = FsTranslogFile.Type.fromString(indexSettings.get("index.translog.fs.type", FsTranslogFile.Type.BUFFERED.name())); - this.bufferSize = (int) indexSettings.getAsBytesSize("index.translog.fs.buffer_size", ByteSizeValue.parseBytesSizeValue("64k")).bytes(); + private FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, @Nullable IndexSettingsService indexSettingsService, + BigArrays bigArrays, Path location, @Nullable ThreadPool threadPool) throws IOException { + super(shardId, indexSettings); + ReadWriteLock rwl = new ReentrantReadWriteLock(); + readLock = new ReleasableLock(rwl.readLock()); + writeLock = new ReleasableLock(rwl.writeLock()); + + this.indexSettingsService = indexSettingsService; + this.bigArrays = bigArrays; + this.location = location; + Files.createDirectories(this.location); + this.threadPool = threadPool; + + this.type = FsTranslogFile.Type.fromString(indexSettings.get(INDEX_TRANSLOG_FS_TYPE, FsTranslogFile.Type.BUFFERED.name())); + this.bufferSize = (int) indexSettings.getAsBytesSize(INDEX_TRANSLOG_BUFFER_SIZE, ByteSizeValue.parseBytesSizeValue("64k")).bytes(); // Not really interesting, updated by IndexingMemoryController... + + syncInterval = indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5)); + if (syncInterval.millis() > 0 && threadPool != null) { + syncOnEachOperation(false); + syncScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, new Sync()); + } else if (syncInterval.millis() == 0) { + syncOnEachOperation(true); + } + + if (indexSettingsService != null) { + indexSettingsService.addListener(applySettings); + } + try { + recoverFromFiles(); + // now that we know which files are there, create a new current one. + current = createTranslogFile(null); + } catch (Throwable t) { + // close the opened translog files if we fail to create a new translog... + IOUtils.closeWhileHandlingException(uncommittedTranslogs); + throw t; + } + } + + /** recover all translog files found on disk */ + private void recoverFromFiles() throws IOException { + boolean success = false; + ArrayList foundTranslogs = new ArrayList<>(); + try (ReleasableLock lock = writeLock.acquire()) { + try (DirectoryStream stream = Files.newDirectoryStream(location, TRANSLOG_FILE_PREFIX + "[0-9]*")) { + for (Path file : stream) { + final long id = parseIdFromFileName(file); + if (id < 0) { + throw new TranslogException(shardId, "failed to parse id from file name matching pattern " + file); + } + idGenerator = Math.max(idGenerator, id + 1); + final ChannelReference raf = new InternalChannelReference(id, location.resolve(getFilename(id)), StandardOpenOption.READ); + foundTranslogs.add(new FsChannelImmutableReader(id, raf, raf.channel().size(), FsChannelReader.UNKNOWN_OP_COUNT)); + logger.debug("found local translog with id [{}]", id); + } + } + CollectionUtil.timSort(foundTranslogs); + uncommittedTranslogs.addAll(foundTranslogs); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(foundTranslogs); + } + } + } + + /* extracts the translog id from a file name. returns -1 upon failure */ + public static long parseIdFromFileName(Path translogFile) { + final String fileName = translogFile.getFileName().toString(); + final Matcher matcher = PARSE_ID_PATTERN.matcher(fileName); + if (matcher.matches()) { + try { + return Long.parseLong(matcher.group(1)); + } catch (NumberFormatException e) { + throw new ElasticsearchException("number formatting issue in a file that passed PARSE_ID_PATTERN: " + fileName + "]", e); + } + } + return -1; } @Override public void updateBuffer(ByteSizeValue bufferSize) { this.bufferSize = bufferSize.bytesAsInt(); - rwl.writeLock().lock(); - try { - FsTranslogFile current1 = this.current; - if (current1 != null) { - current1.updateBufferSize(this.bufferSize); - } - current1 = this.trans; - if (current1 != null) { - current1.updateBufferSize(this.bufferSize); - } - } finally { - rwl.writeLock().unlock(); + try (ReleasableLock lock = writeLock.acquire()) { + current.updateBufferSize(this.bufferSize); } } + boolean isOpen() { + return closed.get() == false; + } + @Override public void close() throws IOException { - if (indexSettingsService != null) { - indexSettingsService.removeListener(applySettings); - } - rwl.writeLock().lock(); - try { - IOUtils.close(this.trans, this.current); - } finally { - rwl.writeLock().unlock(); + if (closed.compareAndSet(false, true)) { + if (indexSettingsService != null) { + indexSettingsService.removeListener(applySettings); + } + + try (ReleasableLock lock = writeLock.acquire()) { + try { + IOUtils.close(this.current); + } finally { + IOUtils.close(uncommittedTranslogs); + } + } finally { + FutureUtils.cancel(syncScheduler); + logger.debug("translog closed"); + } } } @@ -157,137 +242,120 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog @Override public long currentId() { - FsTranslogFile current1 = this.current; - if (current1 == null) { - return -1; + try (ReleasableLock lock = readLock.acquire()) { + return current.translogId(); } - return current1.id(); } @Override - public int estimatedNumberOfOperations() { - FsTranslogFile current1 = this.current; - if (current1 == null) { - return 0; + public int totalOperations() { + int ops = 0; + try (ReleasableLock lock = readLock.acquire()) { + ops += current.totalOperations(); + for (FsChannelReader translog : uncommittedTranslogs) { + int tops = translog.totalOperations(); + if (tops == FsChannelReader.UNKNOWN_OP_COUNT) { + return FsChannelReader.UNKNOWN_OP_COUNT; + } + ops += tops; + } } - return current1.estimatedNumberOfOperations(); + return ops; } @Override - public long ramBytesUsed() { - return 0; - } - - @Override - public Collection getChildResources() { - return Collections.emptyList(); - } - - @Override - public long translogSizeInBytes() { - FsTranslogFile current1 = this.current; - if (current1 == null) { - return 0; + public long sizeInBytes() { + long size = 0; + try (ReleasableLock lock = readLock.acquire()) { + size += current.sizeInBytes(); + for (FsChannelReader translog : uncommittedTranslogs) { + size += translog.sizeInBytes(); + } } - return current1.translogSizeInBytes(); + return size; } @Override - public int clearUnreferenced() { - rwl.writeLock().lock(); - int deleted = 0; - try (DirectoryStream stream = Files.newDirectoryStream(location, TRANSLOG_FILE_PREFIX + "[0-9]*")) { - for (Path file : stream) { - if (isReferencedTranslogFile(file) == false) { - try { - logger.trace("delete unreferenced translog file: " + file); - Files.delete(file); - deleted++; - } catch (Exception ex) { - logger.debug("failed to delete " + file, ex); + public void markCommitted(final long translogId) throws FileNotFoundException { + try (ReleasableLock lock = writeLock.acquire()) { + logger.trace("updating translogs on commit of [{}]", translogId); + if (translogId < lastCommittedTranslogId) { + throw new IllegalArgumentException("committed translog id can only go up (current [" + + lastCommittedTranslogId + "], got [" + translogId + "]"); + } + boolean found = false; + if (current.translogId() == translogId) { + found = true; + } else { + if (translogId > current.translogId()) { + throw new IllegalArgumentException("committed translog id must be lower or equal to current id (current [" + + current.translogId() + "], got [" + translogId + "]"); + } + } + if (found == false) { + // try to find it in uncommittedTranslogs + for (FsChannelImmutableReader translog : uncommittedTranslogs) { + if (translog.translogId() == translogId) { + found = true; + break; } } } - } catch (IOException ex) { - logger.debug("failed to clear unreferenced files ", ex); - } finally { - rwl.writeLock().unlock(); - } - return deleted; - } - - @Override - public void newTranslog(long id) throws TranslogException, IOException { - rwl.writeLock().lock(); - try { - FsTranslogFile newFile; - try { - newFile = type.create(shardId, id, new InternalChannelReference(location.resolve(getFilename(id)), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW), bufferSize); - } catch (IOException e) { - throw new TranslogException(shardId, "failed to create new translog file", e); + if (found == false) { + ArrayList currentIds = new ArrayList<>(); + for (FsChannelReader translog : Iterables.concat(uncommittedTranslogs, Collections.singletonList(current))) { + currentIds.add(translog.translogId()); + } + throw new FileNotFoundException("committed translog id can not be found (current [" + + Strings.collectionToCommaDelimitedString(currentIds) + "], got [" + translogId + "]"); + } + lastCommittedTranslogId = translogId; + while (uncommittedTranslogs.isEmpty() == false && uncommittedTranslogs.get(0).translogId() < translogId) { + FsChannelReader old = uncommittedTranslogs.remove(0); + logger.trace("removed [{}] from uncommitted translog list", old.translogId()); + try { + old.close(); + } catch (IOException e) { + logger.error("failed to closed old translog [{}] (committed id [{}])", e, old, translogId); + } } - FsTranslogFile old = current; - current = newFile; - IOUtils.close(old); - } finally { - rwl.writeLock().unlock(); } } @Override - public void newTransientTranslog(long id) throws TranslogException { - rwl.writeLock().lock(); + public long newTranslog() throws TranslogException, IOException { + try (ReleasableLock lock = writeLock.acquire()) { + final FsTranslogFile old = current; + final FsTranslogFile newFile = createTranslogFile(old); + current = newFile; + FsChannelImmutableReader reader = old.immutableReader(); + uncommittedTranslogs.add(reader); + // notify all outstanding views of the new translog (no views are created now as + // we hold a write lock). + for (FsView view : outstandingViews) { + view.onNewTranslog(old.immutableReader(), current.reader()); + } + IOUtils.close(old); + logger.trace("current translog set to [{}]", current.translogId()); + return current.translogId(); + } + } + + protected FsTranslogFile createTranslogFile(@Nullable FsTranslogFile reuse) throws IOException { + FsTranslogFile newFile; + long size = Long.MAX_VALUE; try { - assert this.trans == null; - this.trans = type.create(shardId, id, new InternalChannelReference(location.resolve(getFilename(id)), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW), transientBufferSize); + long id = idGenerator++; + newFile = type.create(shardId, id, new InternalChannelReference(id, location.resolve(getFilename(id)), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW), bufferSize); } catch (IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); - } finally { - rwl.writeLock().unlock(); } + if (reuse != null) { + newFile.reuse(reuse); + } + return newFile; } - @Override - public void makeTransientCurrent() throws IOException { - FsTranslogFile old; - rwl.writeLock().lock(); - try { - assert this.trans != null; - old = current; - this.current = this.trans; - this.trans = null; - } finally { - rwl.writeLock().unlock(); - } - old.close(); - current.reuse(old); - } - - @Override - public void revertTransient() throws IOException { - rwl.writeLock().lock(); - try { - final FsTranslogFile toClose = this.trans; - this.trans = null; - IOUtils.close(toClose); - } finally { - rwl.writeLock().unlock(); - } - } - - /** - * Returns the translog that should be read for the specified location. If - * the transient or current translog does not match, returns null - */ - private FsTranslogFile translogForLocation(Location location) { - if (trans != null && trans.id() == location.translogId) { - return this.trans; - } - if (current.id() == location.translogId) { - return this.current; - } - return null; - } /** * Read the Operation object from the given location, returns null if the @@ -295,108 +363,112 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog */ @Override public Translog.Operation read(Location location) { - rwl.readLock().lock(); - try { - FsTranslogFile translog = translogForLocation(location); - if (translog != null) { - byte[] data = translog.read(location); - try (BytesStreamInput in = new BytesStreamInput(data)) { - // Return the Operation using the current version of the - // stream based on which translog is being read - return translog.getStream().read(in); + try (ReleasableLock lock = readLock.acquire()) { + FsChannelReader reader = null; + if (current.translogId() == location.translogId) { + reader = current; + } else { + for (FsChannelReader translog : uncommittedTranslogs) { + if (translog.translogId() == location.translogId) { + reader = translog; + break; + } } } - return null; + return reader == null ? null : reader.read(location); } catch (IOException e) { throw new ElasticsearchException("failed to read source from translog location " + location, e); - } finally { - rwl.readLock().unlock(); } } @Override public Location add(Operation operation) throws TranslogException { - rwl.readLock().lock(); - boolean released = false; - ReleasableBytesStreamOutput out = null; + ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays); try { - out = new ReleasableBytesStreamOutput(bigArrays); TranslogStreams.writeTranslogOperation(out, operation); ReleasablePagedBytesReference bytes = out.bytes(); - Location location = current.add(bytes); - if (syncOnEachOperation) { - current.sync(); - } - - assert new BytesArray(current.read(location)).equals(bytes); - - FsTranslogFile trans = this.trans; - if (trans != null) { - try { - location = trans.add(bytes); - } catch (ClosedChannelException e) { - // ignore + try (ReleasableLock lock = readLock.acquire()) { + Location location = current.add(bytes); + if (syncOnEachOperation) { + current.sync(); } + + assert current.assertBytesAtLocation(location, bytes); + return location; } - Releasables.close(bytes); - released = true; - return location; } catch (Throwable e) { throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); } finally { - rwl.readLock().unlock(); - if (!released && out != null) { - Releasables.close(out.bytes()); + Releasables.close(out.bytes()); + } + } + + @Override + public Snapshot newSnapshot() { + try (ReleasableLock lock = readLock.acquire()) { + // leave one place for current. + final FsChannelReader[] readers = uncommittedTranslogs.toArray(new FsChannelReader[uncommittedTranslogs.size() + 1]); + readers[readers.length - 1] = current; + return createdSnapshot(readers); + } + } + + private Snapshot createdSnapshot(FsChannelReader... translogs) { + ArrayList channelSnapshots = new ArrayList<>(); + boolean success = false; + try { + for (FsChannelReader translog : translogs) { + channelSnapshots.add(translog.newSnapshot()); + } + Snapshot snapshot = new FsTranslogSnapshot(channelSnapshots, logger); + success = true; + return snapshot; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(channelSnapshots); } } } @Override - public FsChannelSnapshot snapshot() throws TranslogException { - while (true) { - FsTranslogFile current = this.current; - FsChannelSnapshot snapshot = current.snapshot(); - if (snapshot != null) { - return snapshot; + public Translog.View newView() { + // we need to acquire the read lock to make sure new translog is created + // and will be missed by the view we're making + try (ReleasableLock lock = readLock.acquire()) { + ArrayList translogs = new ArrayList<>(); + try { + for (FsChannelImmutableReader translog : uncommittedTranslogs) { + translogs.add(translog.clone()); + } + translogs.add(current.reader()); + FsView view = new FsView(translogs); + // this is safe as we know that no new translog is being made at the moment + // (we hold a read lock) and the view will be notified of any future one + outstandingViews.add(view); + translogs.clear(); + return view; + } finally { + // close if anything happend and we didn't reach the clear + IOUtils.closeWhileHandlingException(translogs); } - if (current.closed() && this.current == current) { - // check if we are closed and if we are still current - then this translog is closed and we can exit - throw new TranslogException(shardId, "current translog is already closed"); - } - Thread.yield(); } } - @Override - public Snapshot snapshot(Snapshot snapshot) { - FsChannelSnapshot snap = snapshot(); - if (snap.translogId() == snapshot.translogId()) { - snap.seekTo(snapshot.position()); - } - return snap; - } - @Override public void sync() throws IOException { - FsTranslogFile current1 = this.current; - if (current1 == null) { - return; - } - try { - current1.sync(); - } catch (IOException e) { - // if we switches translots (!=), then this failure is not relevant - // we are working on a new translog - if (this.current == current1) { - throw e; + try (ReleasableLock lock = readLock.acquire()) { + if (closed.get()) { + return; } + current.sync(); } } @Override public boolean syncNeeded() { - FsTranslogFile current1 = this.current; - return current1 != null && current1.syncNeeded(); + try (ReleasableLock lock = readLock.acquire()) { + return current.syncNeeded(); + } } @Override @@ -409,138 +481,169 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog } } - @Override - public String getFilename(long translogId) { + /** package private for testing */ + String getFilename(long translogId) { return TRANSLOG_FILE_PREFIX + translogId; } @Override public TranslogStats stats() { - FsTranslogFile current = this.current; - if (current == null) { - return new TranslogStats(0, 0); - } - - return new TranslogStats(current.estimatedNumberOfOperations(), current.translogSizeInBytes()); - } - - @Override - public long findLargestPresentTranslogId() throws IOException { - rwl.readLock().lock(); - try { - long maxId = this.currentId(); - try (DirectoryStream stream = Files.newDirectoryStream(location, TRANSLOG_FILE_PREFIX + "[0-9]*")) { - for (Path translogFile : stream) { - try { - final String fileName = translogFile.getFileName().toString(); - final Matcher matcher = PARSE_ID_PATTERN.matcher(fileName); - if (matcher.matches()) { - maxId = Math.max(maxId, Long.parseLong(matcher.group(1))); - } - } catch (NumberFormatException ex) { - logger.warn("Couldn't parse translog id from file " + translogFile + " skipping"); - } - } - } - return maxId; - } finally { - rwl.readLock().unlock(); + // acquire lock to make the two numbers roughly consistent (no file change half way) + try (ReleasableLock lock = readLock.acquire()) { + return new TranslogStats(totalOperations(), sizeInBytes()); } } - @Override - public OperationIterator openIterator(long translogId) throws IOException { - final String translogName = getFilename(translogId); - Path recoveringTranslogFile = null; - logger.trace("try open translog file {} locations {}", translogName, location); - // we have to support .recovering since it's a leftover from previous version but might still be on the filesystem - // we used to rename the foo into foo.recovering since foo was reused / overwritten but we fixed that in 2.0 - for (Path recoveryFiles : FileSystemUtils.files(location, translogName + "{.recovering,}")) { - logger.trace("translog file found in {}", recoveryFiles); - recoveringTranslogFile = recoveryFiles; - } - final boolean translogFileExists = recoveringTranslogFile != null && Files.exists(recoveringTranslogFile); - if (translogFileExists) { - if (logger.isTraceEnabled()) { - logger.trace("opening iterator for translog file: {} length: {}", recoveringTranslogFile, Files.size(recoveringTranslogFile)); - } - final TranslogStream translogStream = TranslogStreams.translogStreamFor(recoveringTranslogFile); - return new OperationIteratorImpl(logger, translogStream, translogStream.openInput(recoveringTranslogFile)); - } - logger.trace("translog file NOT found in {}", location); - throw new FileNotFoundException("no translog file found for id: " + translogId); - } - - private boolean isReferencedTranslogFile(Path file) { - final FsTranslogFile theCurrent = this.current; - final FsTranslogFile theTrans = this.trans; - return (theCurrent != null && theCurrent.getPath().equals(file)) || - (theTrans != null && theTrans.getPath().equals(file)); + private boolean isReferencedTranslogId(long translogId) { + return translogId >= lastCommittedTranslogId; } private final class InternalChannelReference extends ChannelReference { + final long translogId; - public InternalChannelReference(Path file, OpenOption... openOptions) throws IOException { + public InternalChannelReference(long translogId, Path file, OpenOption... openOptions) throws IOException { super(file, openOptions); + this.translogId = translogId; } @Override protected void closeInternal() { super.closeInternal(); - rwl.writeLock().lock(); - try { - if (isReferencedTranslogFile(file()) == false) { + try (ReleasableLock lock = writeLock.acquire()) { + if (isReferencedTranslogId(translogId) == false) { // if the given path is not the current we can safely delete the file since all references are released logger.trace("delete translog file - not referenced and not current anymore {}", file()); IOUtils.deleteFilesIgnoringExceptions(file()); } - } finally { - rwl.writeLock().unlock(); } } } /** - * Iterator for translog operations. + * a view into the translog, capturing all translog file at the moment of creation + * and updated with any future translog. */ - private static class OperationIteratorImpl implements org.elasticsearch.index.translog.Translog.OperationIterator { + class FsView implements View { - private final TranslogStream translogStream; - private final StreamInput input; - private final ESLogger logger; + boolean closed; + // last in this list is always FsTranslog.current + final List orderedTranslogs; - OperationIteratorImpl(ESLogger logger, TranslogStream translogStream, StreamInput input) { - this.translogStream = translogStream; - this.input = input; - this.logger = logger; + FsView(List orderedTranslogs) { + assert orderedTranslogs.isEmpty() == false; + // clone so we can safely mutate.. + this.orderedTranslogs = new ArrayList<>(orderedTranslogs); } /** - * Returns the next operation in the translog or null if we reached the end of the stream. + * Called by the parent class when ever the current translog changes + * + * @param oldCurrent a new read only reader for the old current (should replace the previous reference) + * @param newCurrent a reader into the new current. */ - public Translog.Operation next() throws IOException { - try { - if (translogStream instanceof LegacyTranslogStream) { - input.readInt(); // ignored opSize + synchronized void onNewTranslog(FsChannelReader oldCurrent, FsChannelReader newCurrent) throws IOException { + // even though the close method removes this view from outstandingViews, there is no synchronisation in place + // between that operation and an ongoing addition of a new translog, already having an iterator. + // As such, this method can be called despite of the fact that we are closed. We need to check and ignore. + if (closed) { + // we have to close the new references created for as as we will not hold them + IOUtils.close(oldCurrent, newCurrent); + return; + } + orderedTranslogs.remove(orderedTranslogs.size() - 1).close(); + orderedTranslogs.add(oldCurrent); + orderedTranslogs.add(newCurrent); + } + + @Override + public synchronized long minTranslogId() { + ensureOpen(); + return orderedTranslogs.get(0).translogId(); + } + + @Override + public synchronized int totalOperations() { + int ops = 0; + for (FsChannelReader translog : orderedTranslogs) { + int tops = translog.totalOperations(); + if (tops == FsChannelReader.UNKNOWN_OP_COUNT) { + return -1; } - return translogStream.read(input); - } catch (TruncatedTranslogException | EOFException e) { - // ignore, not properly written the last op - logger.trace("ignoring translog EOF exception, the last operation was not properly written", e); - return null; - } catch (IOException e) { - // ignore, not properly written last op - logger.trace("ignoring translog IO exception, the last operation was not properly written", e); - return null; + ops += tops; + } + return ops; + } + + @Override + public synchronized long sizeInBytes() { + long size = 0; + for (FsChannelReader translog : orderedTranslogs) { + size += translog.sizeInBytes(); + } + return size; + } + + public synchronized Snapshot snapshot() { + ensureOpen(); + return createdSnapshot(orderedTranslogs.toArray(new FsChannelReader[orderedTranslogs.size()])); + } + + + void ensureOpen() { + if (closed) { + throw new ElasticsearchException("View is already closed"); } } @Override public void close() { + List toClose = new ArrayList<>(); try { - input.close(); - } catch (IOException ex) { - throw new ElasticsearchException("failed to close stream input", ex); + synchronized (this) { + if (closed == false) { + logger.trace("closing view starting at translog [{}]", minTranslogId()); + closed = true; + outstandingViews.remove(this); + toClose.addAll(orderedTranslogs); + orderedTranslogs.clear(); + } + } + } finally { + try { + // Close out of lock to prevent deadlocks between channel close which checks for + // references in InternalChannelReference.closeInternal (waiting on a read lock) + // and other FsTranslog#newTranslog calling FsView.onNewTranslog (while having a write lock) + IOUtils.close(toClose); + } catch (Exception e) { + throw new ElasticsearchException("failed to close view", e); + } + } + } + } + + class Sync implements Runnable { + @Override + public void run() { + // don't re-schedule if its closed..., we are done + if (closed.get()) { + return; + } + if (syncNeeded()) { + threadPool.executor(ThreadPool.Names.FLUSH).execute(new Runnable() { + @Override + public void run() { + try { + sync(); + } catch (Exception e) { + logger.warn("failed to sync translog", e); + } + if (closed.get() == false) { + syncScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this); + } + } + }); + } else { + syncScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this); } } } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java index 751bfc3ec5b..1bfe8dae61c 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java @@ -19,17 +19,32 @@ package org.elasticsearch.index.translog.fs; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogException; -import org.elasticsearch.index.translog.TranslogStream; -import java.io.Closeable; import java.io.IOException; -import java.nio.file.Path; +import java.nio.ByteBuffer; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +public abstract class FsTranslogFile extends FsChannelReader { + + protected final ShardId shardId; + protected final ReleasableLock readLock; + protected final ReleasableLock writeLock; + + public FsTranslogFile(ShardId shardId, long id, ChannelReference channelReference) { + super(id, channelReference); + this.shardId = shardId; + ReadWriteLock rwl = new ReentrantReadWriteLock(); + readLock = new ReleasableLock(rwl.readLock()); + writeLock = new ReleasableLock(rwl.writeLock()); + } -public interface FsTranslogFile extends Closeable { public static enum Type { @@ -58,29 +73,83 @@ public interface FsTranslogFile extends Closeable { } } - long id(); - int estimatedNumberOfOperations(); + /** add the given bytes to the translog and return the location they were written at */ + public abstract Translog.Location add(BytesReference data) throws IOException; - long translogSizeInBytes(); + /** reuse resources from another translog file, which is guaranteed not to be used anymore */ + public abstract void reuse(FsTranslogFile other) throws TranslogException; - Translog.Location add(BytesReference data) throws IOException; + /** change the size of the internal buffer if relevant */ + public abstract void updateBufferSize(int bufferSize) throws TranslogException; - byte[] read(Translog.Location location) throws IOException; + /** write all buffered ops to disk and fsync file */ + public abstract void sync() throws IOException; - FsChannelSnapshot snapshot() throws TranslogException; + /** returns true if there are buffered ops */ + public abstract boolean syncNeeded(); - void reuse(FsTranslogFile other) throws TranslogException; + @Override + public FsChannelSnapshot newSnapshot() { + return new FsChannelSnapshot(immutableReader()); + } - void updateBufferSize(int bufferSize) throws TranslogException; + /** + * returns a new reader that follows the current writes (most importantly allows making + * repeated snapshots that includes new content) + */ + public FsChannelReader reader() { + channelReference.incRef(); + boolean success = false; + try { + FsChannelReader reader = new InnerReader(this.id, channelReference); + success = true; + return reader; + } finally { + if (!success) { + channelReference.decRef(); + } + } + } - void sync() throws IOException; - boolean syncNeeded(); + /** returns a new immutable reader which only exposes the current written operation * */ + abstract public FsChannelImmutableReader immutableReader(); - TranslogStream getStream(); + boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException { + ByteBuffer buffer = ByteBuffer.allocate(location.size); + readBytes(buffer, location.translogLocation); + return new BytesArray(buffer.array()).equals(expectedBytes); + } - public Path getPath(); + /** + * this class is used when one wants a reference to this file which exposes all recently written operation. + * as such it needs access to the internals of the current reader + */ + final class InnerReader extends FsChannelReader { - public boolean closed(); + public InnerReader(long id, ChannelReference channelReference) { + super(id, channelReference); + } + + @Override + public long sizeInBytes() { + return FsTranslogFile.this.sizeInBytes(); + } + + @Override + public int totalOperations() { + return FsTranslogFile.this.totalOperations(); + } + + @Override + protected void readBytes(ByteBuffer buffer, long position) throws IOException { + FsTranslogFile.this.readBytes(buffer, position); + } + + @Override + public FsChannelSnapshot newSnapshot() { + return FsTranslogFile.this.newSnapshot(); + } + } } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogSnapshot.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogSnapshot.java new file mode 100644 index 00000000000..f771bcaef5a --- /dev/null +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogSnapshot.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog.fs; + +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TruncatedTranslogException; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +public class FsTranslogSnapshot implements Translog.Snapshot { + + private final List orderedTranslogs; + private final ESLogger logger; + private final ByteBuffer cacheBuffer; + private AtomicBoolean closed = new AtomicBoolean(false); + private final int estimatedTotalOperations; + private int currentTranslog; + + /** + * Create a snapshot of translog file channel. The length parameter should be consistent with totalOperations and point + * at the end of the last operation in this snapshot. + */ + public FsTranslogSnapshot(List orderedTranslogs, ESLogger logger) { + this.orderedTranslogs = orderedTranslogs; + this.logger = logger; + int ops = 0; + for (FsChannelSnapshot translog : orderedTranslogs) { + + final int tops = translog.estimatedTotalOperations(); + if (tops < 0) { + ops = FsChannelReader.UNKNOWN_OP_COUNT; + break; + } + ops += tops; + } + estimatedTotalOperations = ops; + cacheBuffer = ByteBuffer.allocate(1024); + currentTranslog = 0; + } + + + @Override + public int estimatedTotalOperations() { + return estimatedTotalOperations; + } + + @Override + public Translog.Operation next() throws IOException { + ensureOpen(); + for (; currentTranslog < orderedTranslogs.size(); currentTranslog++) { + final FsChannelSnapshot current = orderedTranslogs.get(currentTranslog); + Translog.Operation op = null; + try { + op = current.next(cacheBuffer); + } catch (TruncatedTranslogException e) { + // file is empty or header has been half-written and should be ignored + logger.trace("ignoring truncation exception, the translog [{}] is either empty or half-written", e, current.translogId()); + } + if (op != null) { + return op; + } + } + return null; + } + + protected void ensureOpen() { + if (closed.get()) { + throw new AlreadyClosedException("snapshot already closed"); + } + } + + @Override + public void close() throws ElasticsearchException { + if (closed.compareAndSet(false, true)) { + try { + IOUtils.close(orderedTranslogs); + } catch (IOException e) { + throw new ElasticsearchException("failed to close channel snapshots", e); + } finally { + orderedTranslogs.clear(); + } + } + } +} diff --git a/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java index 199847d0779..1c4ea31a2b5 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java @@ -19,128 +19,82 @@ package org.elasticsearch.index.translog.fs; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.TranslogStream; -import org.elasticsearch.index.translog.TranslogStreams; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogException; -import java.io.FileNotFoundException; import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.nio.ByteBuffer; -public class SimpleFsTranslogFile implements FsTranslogFile { - - private final long id; - private final ShardId shardId; - private final ChannelReference channelReference; - private final AtomicBoolean closed = new AtomicBoolean(); - private final ReadWriteLock rwl = new ReentrantReadWriteLock(); - private final TranslogStream translogStream; - private final int headerSize; +public final class SimpleFsTranslogFile extends FsTranslogFile { private volatile int operationCounter = 0; - private volatile long lastPosition = 0; private volatile long lastWrittenPosition = 0; - private volatile long lastSyncPosition = 0; public SimpleFsTranslogFile(ShardId shardId, long id, ChannelReference channelReference) throws IOException { - this.shardId = shardId; - this.id = id; - this.channelReference = channelReference; - this.translogStream = TranslogStreams.translogStreamFor(this.channelReference.file()); - this.headerSize = this.translogStream.writeHeader(channelReference.channel()); + super(shardId, id, channelReference); + int headerSize = this.channelReference.stream().writeHeader(channelReference.channel()); this.lastPosition += headerSize; this.lastWrittenPosition += headerSize; this.lastSyncPosition += headerSize; } @Override - public long id() { - return this.id; - } - - @Override - public int estimatedNumberOfOperations() { + public int totalOperations() { return operationCounter; } @Override - public long translogSizeInBytes() { + public long sizeInBytes() { return lastWrittenPosition; } @Override public Translog.Location add(BytesReference data) throws IOException { - rwl.writeLock().lock(); - try { + try (ReleasableLock lock = writeLock.acquire()) { long position = lastPosition; data.writeTo(channelReference.channel()); lastPosition = lastPosition + data.length(); lastWrittenPosition = lastWrittenPosition + data.length(); operationCounter = operationCounter + 1; return new Translog.Location(id, position, data.length()); - } finally { - rwl.writeLock().unlock(); } } @Override - public byte[] read(Translog.Location location) throws IOException { - rwl.readLock().lock(); + protected void readBytes(ByteBuffer buffer, long position) throws IOException { + try (ReleasableLock lock = readLock.acquire()) { + Channels.readFromFileChannelWithEofException(channelReference.channel(), position, buffer); + } + } + + @Override + public void doClose() throws IOException { try { - return Channels.readFromFileChannel(channelReference.channel(), location.translogLocation, location.size); + sync(); } finally { - rwl.readLock().unlock(); + super.doClose(); } } - @Override - public void close() throws IOException { - if (closed.compareAndSet(false, true)) { - try { - sync(); + public FsChannelImmutableReader immutableReader() throws TranslogException { + if (channelReference.tryIncRef()) { + try (ReleasableLock lock = writeLock.acquire()) { + FsChannelImmutableReader reader = new FsChannelImmutableReader(this.id, channelReference, lastWrittenPosition, operationCounter); + channelReference.incRef(); // for the new object + return reader; } finally { channelReference.decRef(); } + } else { + throw new TranslogException(shardId, "can't increment channel [" + channelReference + "] channel ref count"); } - } - /** - * Returns a snapshot on this file, null if it failed to snapshot. - */ - @Override - public FsChannelSnapshot snapshot() throws TranslogException { - if (channelReference.tryIncRef()) { - boolean success = false; - try { - rwl.writeLock().lock(); - try { - FsChannelSnapshot snapshot = new FsChannelSnapshot(this.id, channelReference, lastWrittenPosition, operationCounter); - snapshot.seekTo(this.headerSize); - success = true; - return snapshot; - } finally { - rwl.writeLock().unlock(); - } - } catch (FileNotFoundException e) { - throw new TranslogException(shardId, "failed to create snapshot", e); - } finally { - if (!success) { - channelReference.decRef(); - } - } - } - return null; } @Override @@ -148,28 +102,15 @@ public class SimpleFsTranslogFile implements FsTranslogFile { return lastWrittenPosition != lastSyncPosition; } - @Override - public TranslogStream getStream() { - return this.translogStream; - } - - @Override - public Path getPath() { - return channelReference.file(); - } - @Override public void sync() throws IOException { // check if we really need to sync here... if (!syncNeeded()) { return; } - rwl.writeLock().lock(); - try { + try (ReleasableLock lock = writeLock.acquire()) { lastSyncPosition = lastWrittenPosition; channelReference.channel().force(false); - } finally { - rwl.writeLock().unlock(); } } @@ -182,10 +123,4 @@ public class SimpleFsTranslogFile implements FsTranslogFile { public void updateBufferSize(int bufferSize) throws TranslogException { // nothing to do here... } - - @Override - public boolean closed() { - return this.closed.get(); - } - } diff --git a/src/main/java/org/elasticsearch/indices/IndicesService.java b/src/main/java/org/elasticsearch/indices/IndicesService.java index 663fe402729..d932bbb3803 100644 --- a/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -96,6 +96,8 @@ import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilde */ public class IndicesService extends AbstractLifecycleComponent implements Iterable { + public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; + private final InternalIndicesLifecycle indicesLifecycle; private final IndicesAnalysisService indicesAnalysisService; @@ -104,6 +106,7 @@ public class IndicesService extends AbstractLifecycleComponent i private final PluginsService pluginsService; private final NodeEnvironment nodeEnv; + private final TimeValue shardsClosedTimeout; private volatile Map> indices = ImmutableMap.of(); private final Map> pendingDeletes = new HashMap<>(); @@ -119,6 +122,7 @@ public class IndicesService extends AbstractLifecycleComponent i this.pluginsService = injector.getInstance(PluginsService.class); this.indicesLifecycle.addListener(oldShardsStats); this.nodeEnv = nodeEnv; + this.shardsClosedTimeout = settings.getAsTime(INDICES_SHARDS_CLOSED_TIMEOUT, new TimeValue(1, TimeUnit.DAYS)); } @Override @@ -147,8 +151,8 @@ public class IndicesService extends AbstractLifecycleComponent i }); } try { - if (latch.await(30, TimeUnit.SECONDS) == false) { - logger.warn("Not all shards are closed yet, waited 30sec - stopping service"); + if (latch.await(shardsClosedTimeout.seconds(), TimeUnit.SECONDS) == false) { + logger.warn("Not all shards are closed yet, waited {}sec - stopping service", shardsClosedTimeout.seconds()); } } catch (InterruptedException e) { // ignore diff --git a/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index ccb49a6fd2a..83b9a51b0c8 100644 --- a/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -74,7 +74,7 @@ public class HunspellService extends AbstractComponent { public final static String HUNSPELL_LAZY_LOAD = "indices.analysis.hunspell.dictionary.lazy"; public final static String HUNSPELL_IGNORE_CASE = "indices.analysis.hunspell.dictionary.ignore_case"; - public final static String HUNSPELL_LOCATION = "indices.analysis.hunspell.dictionary.location"; + private final static String OLD_HUNSPELL_LOCATION = "indices.analysis.hunspell.dictionary.location"; private final LoadingCache dictionaries; private final Map knownDictionaries; @@ -116,9 +116,9 @@ public class HunspellService extends AbstractComponent { } private Path resolveHunspellDirectory(Settings settings, Environment env) { - String location = settings.get(HUNSPELL_LOCATION, null); + String location = settings.get(OLD_HUNSPELL_LOCATION, null); if (location != null) { - return PathUtils.get(location); + throw new IllegalArgumentException("please, put your hunspell dictionaries under config/hunspell !"); } return env.configFile().resolve("hunspell"); } diff --git a/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java b/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java index c2f282eb396..426f4cf89d8 100644 --- a/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java +++ b/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.memory; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -191,9 +190,20 @@ public class IndexingMemoryController extends AbstractLifecycleComponent { diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 2535caa7c0b..f7af51628b1 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -22,6 +22,8 @@ package org.elasticsearch.indices.recovery; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RateLimiter; @@ -40,6 +42,7 @@ import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -50,6 +53,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShard; @@ -77,7 +81,7 @@ import java.util.concurrent.atomic.AtomicReference; * everything relating to copying the segment files as well as sending translog * operations across the wire once the segments have been copied. */ -public class RecoverySourceHandler implements Engine.RecoveryHandler { +public class RecoverySourceHandler { protected final ESLogger logger; // Shard that is going to be recovered (the "source") @@ -92,7 +96,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { private final IndexService indexService; private final MappingUpdatedAction mappingUpdatedAction; - private final RecoveryResponse response; + protected final RecoveryResponse response; private final CancellableThreads cancellableThreads = new CancellableThreads() { @Override protected void onCancel(String reason, @Nullable Throwable suppressedException) { @@ -127,11 +131,38 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { this.response = new RecoveryResponse(); } - /** - * @return the {@link RecoveryResponse} after the recovery has completed all three phases - */ - public RecoveryResponse getResponse() { - return this.response; + /** performs the recovery from the local engine to the target */ + public RecoveryResponse recoverToTarget() { + final Engine engine = shard.engine(); + assert engine.getTranslog() != null : "translog must not be null"; + try (Translog.View translogView = engine.getTranslog().newView()) { + logger.trace("captured translog id [{}] for recovery", translogView.minTranslogId()); + final SnapshotIndexCommit phase1Snapshot; + try { + phase1Snapshot = shard.snapshotIndex(false); + } catch (Throwable e) { + Releasables.closeWhileHandlingException(translogView); + throw new RecoveryEngineException(shard.shardId(), 1, "Snapshot failed", e); + } + + try { + phase1(phase1Snapshot, translogView); + } catch (Throwable e) { + throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e); + } finally { + Releasables.closeWhileHandlingException(phase1Snapshot); + } + + logger.trace("snapshot translog for recovery. current size is [{}]", translogView.totalOperations()); + try (Translog.Snapshot phase2Snapshot = translogView.snapshot()) { + phase2(phase2Snapshot); + } catch (Throwable e) { + throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e); + } + + finalizeRecovery(); + } + return response; } /** @@ -142,12 +173,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { * Phase1 examines the segment files on the target node and copies over the * segments that are missing. Only segments that have the same size and * checksum can be reused - *

- * {@code InternalEngine#recover} is responsible for snapshotting the index - * and releasing the snapshot once all 3 phases of recovery are complete */ - @Override - public void phase1(final SnapshotIndexCommit snapshot) { + public void phase1(final SnapshotIndexCommit snapshot, final Translog.View translogView) { cancellableThreads.checkForCancel(); // Total size of segment files that are recovered long totalSize = 0; @@ -157,7 +184,13 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { store.incRef(); try { StopWatch stopWatch = new StopWatch().start(); - final Store.MetadataSnapshot recoverySourceMetadata = store.getMetadata(snapshot); + final Store.MetadataSnapshot recoverySourceMetadata; + try { + recoverySourceMetadata = store.getMetadata(snapshot); + } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { + shard.engine().failEngine("recovery", ex); + throw ex; + } for (String name : snapshot.getFiles()) { final StoreFileMetaData md = recoverySourceMetadata.get(name); if (md == null) { @@ -220,7 +253,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { public void run() throws InterruptedException { RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, - shard.translog().estimatedNumberOfOperations()); + translogView.totalOperations()); transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); @@ -315,7 +348,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { readCount += toRead; final boolean lastChunk = readCount == len; final RecoveryFileChunkRequest fileChunkRequest = new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, - content, lastChunk, shard.translog().estimatedNumberOfOperations(), throttleTimeInNanos); + content, lastChunk, translogView.totalOperations(), throttleTimeInNanos); cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { @@ -364,6 +397,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { }); if (corruptedEngine.get() != null) { + shard.engine().failEngine("recovery", corruptedEngine.get()); throw corruptedEngine.get(); } else { ExceptionsHelper.rethrowAndSuppress(exceptions); @@ -383,7 +417,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { try { final Store.MetadataSnapshot remainingFilesAfterCleanup = recoverWithSyncId? request.metadataSnapshot(): recoverySourceMetadata; transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, - new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), remainingFilesAfterCleanup, shard.translog().estimatedNumberOfOperations()), + new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata, translogView.totalOperations()), TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } catch (RemoteTransportException remoteException) { @@ -424,7 +458,9 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { } } }); - stopWatch.stop(); + + prepareTargetForTranslog(translogView); + logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime()); response.phase1Time = stopWatch.totalTime().millis(); } catch (Throwable e) { @@ -434,25 +470,10 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { } } - /** - * Perform phase2 of the recovery process - *

- * Phase2 takes a snapshot of the current translog *without* acquiring the - * write lock (however, the translog snapshot is a point-in-time view of - * the translog). It then sends each translog operation to the target node - * so it can be replayed into the new shard. - *

- * {@code InternalEngine#recover} is responsible for taking the snapshot - * of the translog and releasing it once all 3 phases of recovery are complete - */ - @Override - public void phase2(Translog.Snapshot snapshot) { - if (shard.state() == IndexShardState.CLOSED) { - throw new IndexShardClosedException(request.shardId()); - } - cancellableThreads.checkForCancel(); - logger.trace("{} recovery [phase2] to {}: start", request.shardId(), request.targetNode()); + protected void prepareTargetForTranslog(final Translog.View translogView) { StopWatch stopWatch = new StopWatch().start(); + logger.trace("{} recovery [phase1] to {}: prepare remote engine for translog", request.shardId(), request.targetNode()); + final long startEngineStart = stopWatch.totalTime().millis(); cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { @@ -460,23 +481,38 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { // operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, - new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), shard.translog().estimatedNumberOfOperations()), + new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), translogView.totalOperations()), TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } }); stopWatch.stop(); - response.startTime = stopWatch.totalTime().millis(); - logger.trace("{} recovery [phase2] to {}: start took [{}]", + + response.startTime = stopWatch.totalTime().millis() - startEngineStart; + logger.trace("{} recovery [phase1] to {}: remote engine start took [{}]", request.shardId(), request.targetNode(), stopWatch.totalTime()); + } + /** + * Perform phase2 of the recovery process + *

+ * Phase2 takes a snapshot of the current translog *without* acquiring the + * write lock (however, the translog snapshot is a point-in-time view of + * the translog). It then sends each translog operation to the target node + * so it can be replayed into the new shard. + */ + public void phase2(Translog.Snapshot snapshot) { + if (shard.state() == IndexShardState.CLOSED) { + throw new IndexShardClosedException(request.shardId()); + } + cancellableThreads.checkForCancel(); + StopWatch stopWatch = new StopWatch().start(); logger.trace("{} recovery [phase2] to {}: updating current mapping to master", request.shardId(), request.targetNode()); // Ensure that the mappings are synced with the master node updateMappingOnMaster(); logger.trace("{} recovery [phase2] to {}: sending transaction log operations", request.shardId(), request.targetNode()); - stopWatch = new StopWatch().start(); // Send all the snapshot's translog operations to the target int totalOperations = sendSnapshot(snapshot); stopWatch.stop(); @@ -486,6 +522,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { } /** +<<<<<<< HEAD * Perform phase 3 of the recovery process *

* Phase3 again takes a snapshot of the translog, however this time the @@ -495,19 +532,18 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { * {@code InternalEngine#recover} is responsible for taking the snapshot * of the translog, and after phase 3 completes the snapshots from all * three phases are released. +======= + * finalizes the recovery process +>>>>>>> origin/master */ - @Override - public void phase3(Translog.Snapshot snapshot) { + public void finalizeRecovery() { if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } cancellableThreads.checkForCancel(); StopWatch stopWatch = new StopWatch().start(); - final int totalOperations; - logger.trace("[{}][{}] recovery [phase3] to {}: sending transaction log operations", indexName, shardId, request.targetNode()); + logger.trace("[{}][{}] finalizing recovery to {}", indexName, shardId, request.targetNode()); - // Send the translog operations to the target node - totalOperations = sendSnapshot(snapshot); cancellableThreads.execute(new Interruptable() { @Override @@ -536,10 +572,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { } } stopWatch.stop(); - logger.trace("[{}][{}] recovery [phase3] to {}: took [{}]", + logger.trace("[{}][{}] finalizing recovery to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime()); - response.phase3Time = stopWatch.totalTime().millis(); - response.phase3Operations = totalOperations; } /** @@ -611,7 +645,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { * * @return the total number of translog operations that were sent */ - protected int sendSnapshot(Translog.Snapshot snapshot) { + protected int sendSnapshot(final Translog.Snapshot snapshot) { int ops = 0; long size = 0; int totalOperations = 0; @@ -659,7 +693,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { @Override public void run() throws InterruptedException { final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest( - request.recoveryId(), request.shardId(), operations, shard.translog().estimatedNumberOfOperations()); + request.recoveryId(), request.shardId(), operations, snapshot.estimatedTotalOperations()); transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } @@ -667,7 +701,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { if (logger.isTraceEnabled()) { logger.trace("[{}][{}] sent batch of [{}][{}] (total: [{}]) translog operations to {}", indexName, shardId, ops, new ByteSizeValue(size), - shard.translog().estimatedNumberOfOperations(), + snapshot.estimatedTotalOperations(), request.targetNode()); } @@ -687,7 +721,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { @Override public void run() throws InterruptedException { RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest( - request.recoveryId(), request.shardId(), operations, shard.translog().estimatedNumberOfOperations()); + request.recoveryId(), request.shardId(), operations, snapshot.estimatedTotalOperations()); transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } @@ -697,7 +731,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { if (logger.isTraceEnabled()) { logger.trace("[{}][{}] sent final batch of [{}][{}] (total: [{}]) translog operations to {}", indexName, shardId, ops, new ByteSizeValue(size), - shard.translog().estimatedNumberOfOperations(), + snapshot.estimatedTotalOperations(), request.targetNode()); } return totalOperations; diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 185f1e8cd07..0a1f6156161 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -43,7 +43,10 @@ import org.elasticsearch.index.IndexShardMissingException; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.*; +import org.elasticsearch.index.shard.IllegalIndexShardStateException; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.indices.IndexMissingException; @@ -204,8 +207,6 @@ public class RecoveryTarget extends AbstractComponent { sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log operations") .append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]") .append("\n"); - sb.append(" phase3: recovered [").append(recoveryResponse.phase3Operations).append("]").append(" transaction log operations") - .append(", took [").append(timeValueMillis(recoveryResponse.phase3Time)).append("]"); logger.trace(sb.toString()); } else { logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryStatus.sourceNode(), recoveryTime); @@ -231,7 +232,7 @@ public class RecoveryTarget extends AbstractComponent { // here, we would add checks against exception that need to be retried (and not removeAndClean in this case) - if (cause instanceof IndexShardNotStartedException || cause instanceof IndexMissingException || cause instanceof IndexShardMissingException) { + if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexMissingException || cause instanceof IndexShardMissingException) { // if the target is not ready yet, retry retryRecovery(recoveryStatus, "remote shard not ready", recoverySettings.retryDelayStateSync(), request); return; @@ -275,7 +276,7 @@ public class RecoveryTarget extends AbstractComponent { try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { final RecoveryStatus recoveryStatus = statusRef.status(); recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps()); - recoveryStatus.indexShard().skipTranslogRecovery(); + recoveryStatus.indexShard().skipTranslogRecovery(false); } channel.sendResponse(TransportResponse.Empty.INSTANCE); } diff --git a/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java index bacd084d058..5aa698e176b 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java +++ b/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java @@ -22,13 +22,17 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + /** * A recovery handler that skips phase 1 as well as sending the snapshot. During phase 3 the shard is marked * as relocated an closed to ensure that the engine is closed and the target can acquire the IW write lock. @@ -37,6 +41,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { private final IndexShard shard; private final StartRecoveryRequest request; + private static final Translog.View EMPTY_VIEW = new EmptyView(); public SharedFSRecoverySourceHandler(IndexShard shard, StartRecoveryRequest request, RecoverySettings recoverySettings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, MappingUpdatedAction mappingUpdatedAction, ESLogger logger) { super(shard, request, recoverySettings, transportService, clusterService, indicesService, mappingUpdatedAction, logger); @@ -45,23 +50,78 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { } @Override - public void phase1(SnapshotIndexCommit snapshot) { - if (request.recoveryType() == RecoveryState.Type.RELOCATION && shard.routingEntry().primary()) { - // here we simply fail the primary shard since we can't move them (have 2 writers open at the same time) - // by failing the shard we play safe and just go through the entire reallocation procedure of the primary - // it would be ideal to make sure we flushed the translog here but that is not possible in the current design. - IllegalStateException exception = new IllegalStateException("Can't relocate primary - failing"); - shard.failShard("primary_relocation", exception); - throw exception; + public RecoveryResponse recoverToTarget() { + boolean engineClosed = false; + try { + logger.trace("{} recovery [phase1] to {}: skipping phase 1 for shared filesystem", request.shardId(), request.targetNode()); + if (isPrimaryRelocation()) { + logger.debug("[phase1] closing engine on primary for shared filesystem recovery"); + try { + // if we relocate we need to close the engine in order to open a new + // IndexWriter on the other end of the relocation + engineClosed = true; + shard.engine().flushAndClose(); + } catch (IOException e) { + logger.warn("close engine failed", e); + shard.failShard("failed to close engine (phase1)", e); + } + } + prepareTargetForTranslog(EMPTY_VIEW); + finalizeRecovery(); + return response; + } catch (Throwable t) { + if (engineClosed) { + // If the relocation fails then the primary is closed and can't be + // used anymore... (because it's closed) that's a problem, so in + // that case, fail the shard to reallocate a new IndexShard and + // create a new IndexWriter + logger.info("recovery failed for primary shadow shard, failing shard"); + shard.failShard("primary relocation failed on shared filesystem", t); + } else { + logger.info("recovery failed on shared filesystem", t); + } + throw t; } - logger.trace("{} recovery [phase2] to {}: skipping phase 1 for shared filesystem", request.shardId(), request.targetNode()); } - @Override protected int sendSnapshot(Translog.Snapshot snapshot) { - logger.trace("{} recovery [phase3] to {}: skipping transaction log operations for file sync", shard.shardId(), request.targetNode()); + logger.trace("{} skipping recovery of translog snapshot on shared filesystem to: {}", + shard.shardId(), request.targetNode()); return 0; } + private boolean isPrimaryRelocation() { + return request.recoveryType() == RecoveryState.Type.RELOCATION && shard.routingEntry().primary(); + } + + /** + * An empty view since we don't recover from translog even in the shared FS case + */ + private static class EmptyView implements Translog.View { + + @Override + public int totalOperations() { + return 0; + } + + @Override + public long sizeInBytes() { + return 0; + } + + @Override + public Translog.Snapshot snapshot() { + return null; + } + + @Override + public long minTranslogId() { + return 0; + } + + @Override + public void close() { + } + } } diff --git a/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 9b83c2648b3..b291c4366de 100644 --- a/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -21,9 +21,8 @@ package org.elasticsearch.rest; import org.elasticsearch.action.*; import org.elasticsearch.client.Client; -import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.FilterClient; -import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.client.support.AbstractClient; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -31,7 +30,7 @@ import java.util.Set; /** * Base handler for REST requests. - * + *

* This handler makes sure that the headers & context of the handled {@link RestRequest requests} are copied over to * the transport requests executed by the associated client. While the context is fully copied over, not all the headers * are copied, but a selected few. It is possible to control what headers are copied over by registering them using @@ -58,15 +57,11 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH static final class HeadersAndContextCopyClient extends FilterClient { private final RestRequest restRequest; - private final IndicesAdmin indicesAdmin; - private final ClusterAdmin clusterAdmin; private final Set headers; HeadersAndContextCopyClient(Client in, RestRequest restRequest, Set headers) { super(in); this.restRequest = restRequest; - this.indicesAdmin = new IndicesAdmin(in.admin().indices(), restRequest, headers); - this.clusterAdmin = new ClusterAdmin(in.admin().cluster(), restRequest, headers); this.headers = headers; } @@ -81,73 +76,9 @@ public abstract class BaseRestHandler extends AbstractComponent implements RestH } @Override - public > ActionFuture execute(Action action, Request request) { + protected > void doExecute(Action action, Request request, ActionListener listener) { copyHeadersAndContext(request, restRequest, headers); - return super.execute(action, request); - } - - @Override - public > void execute(Action action, Request request, ActionListener listener) { - copyHeadersAndContext(request, restRequest, headers); - super.execute(action, request, listener); - } - - @Override - public ClusterAdminClient cluster() { - return clusterAdmin; - } - - @Override - public IndicesAdminClient indices() { - return indicesAdmin; - } - - private static final class ClusterAdmin extends FilterClient.ClusterAdmin { - - private final RestRequest restRequest; - private final Set headers; - - private ClusterAdmin(ClusterAdminClient in, RestRequest restRequest, Set headers) { - super(in); - this.restRequest = restRequest; - this.headers = headers; - } - - @Override - public > ActionFuture execute(Action action, Request request) { - copyHeadersAndContext(request, restRequest, headers); - return super.execute(action, request); - } - - @Override - public > void execute(Action action, Request request, ActionListener listener) { - copyHeadersAndContext(request, restRequest, headers); - super.execute(action, request, listener); - } - } - - private final class IndicesAdmin extends FilterClient.IndicesAdmin { - - private final RestRequest restRequest; - private final Set headers; - - private IndicesAdmin(IndicesAdminClient in, RestRequest restRequest, Set headers) { - super(in); - this.restRequest = restRequest; - this.headers = headers; - } - - @Override - public > ActionFuture execute(Action action, Request request) { - copyHeadersAndContext(request, restRequest, headers); - return super.execute(action, request); - } - - @Override - public > void execute(Action action, Request request, ActionListener listener) { - copyHeadersAndContext(request, restRequest, headers); - super.execute(action, request, listener); - } + super.doExecute(action, request, listener); } } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java index b080e51055d..dfcb4438d57 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/health/RestClusterHealthAction.java @@ -50,7 +50,6 @@ public class RestClusterHealthAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ClusterHealthRequest clusterHealthRequest = clusterHealthRequest(Strings.splitStringByCommaToArray(request.param("index"))); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); - clusterHealthRequest.listenerThreaded(false); clusterHealthRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterHealthRequest.masterNodeTimeout())); clusterHealthRequest.timeout(request.paramAsTime("timeout", clusterHealthRequest.timeout())); String waitForStatus = request.param("wait_for_status"); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java index dbda82ff387..a78c90aca63 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/info/RestNodesInfoAction.java @@ -81,8 +81,6 @@ public class RestNodesInfoAction extends BaseRestHandler { } final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds); - nodesInfoRequest.listenerThreaded(false); - // shortcut, dont do checks if only all is specified if (metrics.size() == 1 && metrics.contains("_all")) { nodesInfoRequest.all(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java index 4704d8ee832..d5bb383c33a 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java @@ -60,7 +60,6 @@ public class RestNodesStatsAction extends BaseRestHandler { Set metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all")); NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(nodesIds); - nodesStatsRequest.listenerThreaded(false); if (metrics.size() == 1 && metrics.contains("_all")) { nodesStatsRequest.all(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java index 46c06ecbe75..758ee34505a 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/delete/RestDeleteRepositoryAction.java @@ -45,7 +45,6 @@ public class RestDeleteRepositoryAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteRepositoryRequest deleteRepositoryRequest = deleteRepositoryRequest(request.param("repository")); deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); - deleteRepositoryRequest.listenerThreaded(false); deleteRepositoryRequest.timeout(request.paramAsTime("timeout", deleteRepositoryRequest.timeout())); deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); client.admin().cluster().deleteRepository(deleteRepositoryRequest, new AcknowledgedRestListener(channel)); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java index 60e68b25f42..b974a9be0fb 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/put/RestPutRepositoryAction.java @@ -47,7 +47,6 @@ public class RestPutRepositoryAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { PutRepositoryRequest putRepositoryRequest = putRepositoryRequest(request.param("repository")); - putRepositoryRequest.listenerThreaded(false); putRepositoryRequest.source(request.content().toUtf8()); putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); putRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRepositoryRequest.masterNodeTimeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java index cdfe3b7992b..bbc39cbd2f3 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/verify/RestVerifyRepositoryAction.java @@ -50,7 +50,6 @@ public class RestVerifyRepositoryAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { VerifyRepositoryRequest verifyRepositoryRequest = verifyRepositoryRequest(request.param("repository")); - verifyRepositoryRequest.listenerThreaded(false); verifyRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", verifyRepositoryRequest.masterNodeTimeout())); verifyRepositoryRequest.timeout(request.paramAsTime("timeout", verifyRepositoryRequest.timeout())); client.admin().cluster().verifyRepository(verifyRepositoryRequest, new RestToXContentListener(channel)); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java index 489acf93db1..7d5d2c9d5ff 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/reroute/RestClusterRerouteAction.java @@ -54,7 +54,6 @@ public class RestClusterRerouteAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { final ClusterRerouteRequest clusterRerouteRequest = Requests.clusterRerouteRequest(); - clusterRerouteRequest.listenerThreaded(false); clusterRerouteRequest.dryRun(request.paramAsBoolean("dry_run", clusterRerouteRequest.dryRun())); clusterRerouteRequest.explain(request.paramAsBoolean("explain", clusterRerouteRequest.explain())); clusterRerouteRequest.timeout(request.paramAsTime("timeout", clusterRerouteRequest.timeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java index 5ef9c403ac2..a1cfdb48ddb 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java @@ -42,7 +42,6 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest() - .listenerThreaded(false) .routingTable(false) .nodes(false); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java index 87363d386ef..8536c037e89 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterUpdateSettingsAction.java @@ -46,7 +46,6 @@ public class RestClusterUpdateSettingsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = Requests.clusterUpdateSettingsRequest(); - clusterUpdateSettingsRequest.listenerThreaded(false); clusterUpdateSettingsRequest.timeout(request.paramAsTime("timeout", clusterUpdateSettingsRequest.timeout())); clusterUpdateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterUpdateSettingsRequest.masterNodeTimeout())); Map source = XContentFactory.xContent(request.content()).createParser(request.content()).mapAndClose(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java index 22151e7b0ad..a797a474eb6 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/shards/RestClusterSearchShardsAction.java @@ -53,7 +53,6 @@ public class RestClusterSearchShardsAction extends BaseRestHandler { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final ClusterSearchShardsRequest clusterSearchShardsRequest = Requests.clusterSearchShardsRequest(indices); clusterSearchShardsRequest.local(request.paramAsBoolean("local", clusterSearchShardsRequest.local())); - clusterSearchShardsRequest.listenerThreaded(false); clusterSearchShardsRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); clusterSearchShardsRequest.routing(request.param("routing")); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java index ff71f7e60f9..c62be2b3db6 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/snapshots/create/RestCreateSnapshotAction.java @@ -46,7 +46,6 @@ public class RestCreateSnapshotAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { CreateSnapshotRequest createSnapshotRequest = createSnapshotRequest(request.param("repository"), request.param("snapshot")); - createSnapshotRequest.listenerThreaded(false); createSnapshotRequest.source(request.content().toUtf8()); createSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createSnapshotRequest.masterNodeTimeout())); createSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java index bac21dd13e4..4e4dc0826ae 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/state/RestClusterStateAction.java @@ -57,7 +57,6 @@ public class RestClusterStateAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest(); - clusterStateRequest.listenerThreaded(false); clusterStateRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterStateRequest.indicesOptions())); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java index 5dd1c638b83..572a48de633 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/stats/RestClusterStatsAction.java @@ -43,7 +43,6 @@ public class RestClusterStatsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null)); - clusterStatsRequest.listenerThreaded(false); client.admin().cluster().clusterStats(clusterStatsRequest, new RestToXContentListener(channel)); } } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java index 2019b71426a..4841500cb66 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java @@ -49,7 +49,6 @@ public class RestIndicesAliasesAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - indicesAliasesRequest.listenerThreaded(false); indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); try (XContentParser parser = XContentFactory.xContent(request.content()).createParser(request.content())) { // { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java index 060ec3f3012..34cdcb27962 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/get/RestGetIndicesAliasesAction.java @@ -61,7 +61,6 @@ public class RestGetIndicesAliasesAction extends BaseRestHandler { .nodes(false) .indices(indices); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.listenerThreaded(false); client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { @Override diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java index 4a5e47b9664..5ce5eaef4ac 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java @@ -59,7 +59,6 @@ public class RestAnalyzeAction extends BaseRestHandler { AnalyzeRequest analyzeRequest = new AnalyzeRequest(request.param("index")); analyzeRequest.text(text); - analyzeRequest.listenerThreaded(false); analyzeRequest.preferLocal(request.paramAsBoolean("prefer_local", analyzeRequest.preferLocalShard())); analyzeRequest.analyzer(request.param("analyzer")); analyzeRequest.field(request.param("field")); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java index 54d9948537e..8eb83cba0d1 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java @@ -56,7 +56,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(Strings.splitStringByCommaToArray(request.param("index"))); - clearIndicesCacheRequest.listenerThreaded(false); clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions())); fromRequest(request, clearIndicesCacheRequest); client.admin().indices().clearCache(clearIndicesCacheRequest, new RestBuilderListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java index 03b5a8c542f..940b6c1cefa 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/close/RestCloseIndexAction.java @@ -44,7 +44,6 @@ public class RestCloseIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); - closeIndexRequest.listenerThreaded(false); closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); closeIndexRequest.timeout(request.paramAsTime("timeout", closeIndexRequest.timeout())); closeIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, closeIndexRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java index 2fca3ff6a27..8f3447ff9f8 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/create/RestCreateIndexAction.java @@ -43,7 +43,6 @@ public class RestCreateIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { CreateIndexRequest createIndexRequest = new CreateIndexRequest(request.param("index")); - createIndexRequest.listenerThreaded(false); if (request.hasContent()) { createIndexRequest.source(request.content()); } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java index 126c471578e..43201592e31 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/delete/RestDeleteIndexAction.java @@ -44,7 +44,6 @@ public class RestDeleteIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); - deleteIndexRequest.listenerThreaded(false); deleteIndexRequest.timeout(request.paramAsTime("timeout", deleteIndexRequest.timeout())); deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout())); deleteIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteIndexRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java index a0cf436a468..8ea4e633bc1 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/indices/RestIndicesExistsAction.java @@ -49,7 +49,6 @@ public class RestIndicesExistsAction extends BaseRestHandler { IndicesExistsRequest indicesExistsRequest = new IndicesExistsRequest(Strings.splitStringByCommaToArray(request.param("index"))); indicesExistsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesExistsRequest.indicesOptions())); indicesExistsRequest.local(request.paramAsBoolean("local", indicesExistsRequest.local())); - indicesExistsRequest.listenerThreaded(false); client.admin().indices().exists(indicesExistsRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(IndicesExistsResponse response) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java index c5571313a2f..a03a7f0fe1d 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/exists/types/RestTypesExistsAction.java @@ -48,7 +48,6 @@ public class RestTypesExistsAction extends BaseRestHandler { TypesExistsRequest typesExistsRequest = new TypesExistsRequest( Strings.splitStringByCommaToArray(request.param("index")), Strings.splitStringByCommaToArray(request.param("type")) ); - typesExistsRequest.listenerThreaded(false); typesExistsRequest.local(request.paramAsBoolean("local", typesExistsRequest.local())); typesExistsRequest.indicesOptions(IndicesOptions.fromRequest(request, typesExistsRequest.indicesOptions())); client.admin().indices().typesExists(typesExistsRequest, new RestResponseListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java index 5bcb775122a..6c95342cf89 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestFlushAction.java @@ -53,7 +53,6 @@ public class RestFlushAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); - flushRequest.listenerThreaded(false); flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); flushRequest.force(request.paramAsBoolean("force", flushRequest.force())); flushRequest.waitIfOngoing(request.paramAsBoolean("wait_if_ongoing", flushRequest.waitIfOngoing())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java index d067ed96bd1..6df8edc0c82 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/mapping/put/RestPutMappingAction.java @@ -67,7 +67,6 @@ public class RestPutMappingAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); - putMappingRequest.listenerThreaded(false); putMappingRequest.type(request.param("type")); putMappingRequest.source(request.content().toUtf8()); putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java index 7ff9a8fb620..e81bca30f6a 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/open/RestOpenIndexAction.java @@ -44,7 +44,6 @@ public class RestOpenIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { OpenIndexRequest openIndexRequest = new OpenIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); - openIndexRequest.listenerThreaded(false); openIndexRequest.timeout(request.paramAsTime("timeout", openIndexRequest.timeout())); openIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", openIndexRequest.masterNodeTimeout())); openIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, openIndexRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java index c49745db629..74379f632c5 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/optimize/RestOptimizeAction.java @@ -53,7 +53,6 @@ public class RestOptimizeAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { OptimizeRequest optimizeRequest = new OptimizeRequest(Strings.splitStringByCommaToArray(request.param("index"))); - optimizeRequest.listenerThreaded(false); optimizeRequest.indicesOptions(IndicesOptions.fromRequest(request, optimizeRequest.indicesOptions())); optimizeRequest.maxNumSegments(request.paramAsInt("max_num_segments", optimizeRequest.maxNumSegments())); optimizeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", optimizeRequest.onlyExpungeDeletes())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java index 2abd624a3c4..9d470c4b051 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/recovery/RestRecoveryAction.java @@ -51,7 +51,6 @@ public class RestRecoveryAction extends BaseRestHandler { final RecoveryRequest recoveryRequest = new RecoveryRequest(Strings.splitStringByCommaToArray(request.param("index"))); recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); - recoveryRequest.listenerThreaded(false); recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); client.admin().indices().recoveries(recoveryRequest, new RestBuilderListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java index aaf8933ea40..949b82270ff 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/refresh/RestRefreshAction.java @@ -53,7 +53,6 @@ public class RestRefreshAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index"))); - refreshRequest.listenerThreaded(false); refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions())); client.admin().indices().refresh(refreshRequest, new RestBuilderListener(channel) { @Override diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java index 3806d8cbe1c..b5b2ba6e7c4 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/segments/RestIndicesSegmentsAction.java @@ -49,7 +49,6 @@ public class RestIndicesSegmentsAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { IndicesSegmentsRequest indicesSegmentsRequest = new IndicesSegmentsRequest(Strings.splitStringByCommaToArray(request.param("index"))); indicesSegmentsRequest.verbose(request.paramAsBoolean("verbose", false)); - indicesSegmentsRequest.listenerThreaded(false); indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); client.admin().indices().segments(indicesSegmentsRequest, new RestBuilderListener(channel) { @Override diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java index d19b5c9a1f4..718d16c4705 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java @@ -55,7 +55,6 @@ public class RestUpdateSettingsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { UpdateSettingsRequest updateSettingsRequest = updateSettingsRequest(Strings.splitStringByCommaToArray(request.param("index"))); - updateSettingsRequest.listenerThreaded(false); updateSettingsRequest.timeout(request.paramAsTime("timeout", updateSettingsRequest.timeout())); updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java index 52ac511366d..43fcbd57171 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java @@ -53,7 +53,6 @@ public class RestIndicesStatsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); - indicesStatsRequest.listenerThreaded(false); indicesStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesStatsRequest.indicesOptions())); indicesStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); indicesStatsRequest.types(Strings.splitStringByCommaToArray(request.param("types"))); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java index 8ebb8675d76..2b6ebbc6023 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/delete/RestDeleteIndexTemplateAction.java @@ -40,7 +40,6 @@ public class RestDeleteIndexTemplateAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteIndexTemplateRequest deleteIndexTemplateRequest = new DeleteIndexTemplateRequest(request.param("name")); - deleteIndexTemplateRequest.listenerThreaded(false); deleteIndexTemplateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexTemplateRequest.masterNodeTimeout())); client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new AcknowledgedRestListener(channel)); } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java index dac342fab95..a4c2539f226 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/get/RestGetIndexTemplateAction.java @@ -58,8 +58,6 @@ public class RestGetIndexTemplateAction extends BaseRestHandler { getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); - getIndexTemplatesRequest.listenerThreaded(false); - final boolean implicitAll = getIndexTemplatesRequest.names().length == 0; client.admin().indices().getTemplates(getIndexTemplatesRequest, new RestBuilderListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java index 7875f12259b..e555cfd0fac 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/template/put/RestPutIndexTemplateAction.java @@ -42,7 +42,6 @@ public class RestPutIndexTemplateAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest(request.param("name")); - putRequest.listenerThreaded(false); putRequest.template(request.param("template", putRequest.template())); putRequest.order(request.paramAsInt("order", putRequest.order())); putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java index 32a2d24e888..4f237465bd6 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java @@ -57,7 +57,6 @@ public class RestValidateQueryAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); - validateQueryRequest.listenerThreaded(false); validateQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, validateQueryRequest.indicesOptions())); if (RestActions.hasBodyContent(request)) { validateQueryRequest.source(RestActions.getRestContent(request)); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java index da41e80cc97..1d3fae87616 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/delete/RestDeleteWarmerAction.java @@ -47,7 +47,6 @@ public class RestDeleteWarmerAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteWarmerRequest deleteWarmerRequest = new DeleteWarmerRequest(Strings.splitStringByCommaToArray(request.param("name"))) .indices(Strings.splitStringByCommaToArray(request.param("index"))); - deleteWarmerRequest.listenerThreaded(false); deleteWarmerRequest.timeout(request.paramAsTime("timeout", deleteWarmerRequest.timeout())); deleteWarmerRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteWarmerRequest.masterNodeTimeout())); deleteWarmerRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteWarmerRequest.indicesOptions())); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java index 9a802f1bf3d..62f666364f5 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/put/RestPutWarmerAction.java @@ -59,7 +59,6 @@ public class RestPutWarmerAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { PutWarmerRequest putWarmerRequest = new PutWarmerRequest(request.param("name")); - putWarmerRequest.listenerThreaded(false); SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index"))) .types(Strings.splitStringByCommaToArray(request.param("type"))) .queryCache(request.paramAsBoolean("query_cache", null)) diff --git a/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java b/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java index 9578e078270..1a3a1b38a6e 100644 --- a/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java +++ b/src/main/java/org/elasticsearch/rest/action/bulk/RestBulkAction.java @@ -71,7 +71,6 @@ public class RestBulkAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { BulkRequest bulkRequest = Requests.bulkRequest(); - bulkRequest.listenerThreaded(false); String defaultIndex = request.param("index"); String defaultType = request.param("type"); String defaultRouting = request.param("routing"); diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java index a7a7eb53e85..e2d277819e0 100644 --- a/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java @@ -68,7 +68,6 @@ public class RestRecoveryAction extends AbstractCatAction { final RecoveryRequest recoveryRequest = new RecoveryRequest(Strings.splitStringByCommaToArray(request.param("index"))); recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); - recoveryRequest.listenerThreaded(false); recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); client.admin().indices().recoveries(recoveryRequest, new RestResponseListener(channel) { diff --git a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java index 58dcfa50973..dc38db49181 100644 --- a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java +++ b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java @@ -58,7 +58,6 @@ public class RestCountAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { CountRequest countRequest = new CountRequest(Strings.splitStringByCommaToArray(request.param("index"))); countRequest.indicesOptions(IndicesOptions.fromRequest(request, countRequest.indicesOptions())); - countRequest.listenerThreaded(false); if (RestActions.hasBodyContent(request)) { countRequest.source(RestActions.getRestContent(request)); } else { diff --git a/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java b/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java index fced1421cb0..69f06cef1f1 100644 --- a/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java +++ b/src/main/java/org/elasticsearch/rest/action/delete/RestDeleteAction.java @@ -51,7 +51,6 @@ public class RestDeleteAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id")); - deleteRequest.listenerThreaded(false); deleteRequest.operationThreaded(true); deleteRequest.routing(request.param("routing")); diff --git a/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java b/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java index 85f73f61ec4..7cfe7caf3fd 100644 --- a/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java @@ -48,7 +48,6 @@ public class RestExistsAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final ExistsRequest existsRequest = new ExistsRequest(Strings.splitStringByCommaToArray(request.param("index"))); existsRequest.indicesOptions(IndicesOptions.fromRequest(request, existsRequest.indicesOptions())); - existsRequest.listenerThreaded(false); if (RestActions.hasBodyContent(request)) { existsRequest.source(RestActions.getRestContent(request)); } else { diff --git a/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java b/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java index fd45c5a56d4..ca382f3c642 100644 --- a/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java @@ -57,7 +57,6 @@ public class RestFieldStatsAction extends BaseRestHandler { fieldStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, fieldStatsRequest.indicesOptions())); fieldStatsRequest.fields(Strings.splitStringByCommaToArray(request.param("fields"))); fieldStatsRequest.level(request.param("level", FieldStatsRequest.DEFAULT_LEVEL)); - fieldStatsRequest.listenerThreaded(false); client.fieldStats(fieldStatsRequest, new RestBuilderListener(channel) { @Override diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java index ae2e76be690..9ed5c4d5fe9 100644 --- a/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java +++ b/src/main/java/org/elasticsearch/rest/action/get/RestGetAction.java @@ -50,7 +50,6 @@ public class RestGetAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); - getRequest.listenerThreaded(false); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java index 4142e667985..db3954ec5e8 100644 --- a/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java +++ b/src/main/java/org/elasticsearch/rest/action/get/RestGetSourceAction.java @@ -51,7 +51,6 @@ public class RestGetSourceAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); - getRequest.listenerThreaded(false); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java index 9217200a7e6..d0c1433bb47 100644 --- a/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java +++ b/src/main/java/org/elasticsearch/rest/action/get/RestHeadAction.java @@ -47,7 +47,6 @@ public class RestHeadAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); - getRequest.listenerThreaded(false); getRequest.operationThreaded(true); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); // order is important, set it after routing, so it will set the routing diff --git a/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java b/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java index b9cdd78063c..14e4496085b 100644 --- a/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java +++ b/src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java @@ -53,7 +53,6 @@ public class RestMultiGetAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { MultiGetRequest multiGetRequest = new MultiGetRequest(); - multiGetRequest.listenerThreaded(false); multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh())); multiGetRequest.preference(request.param("preference")); multiGetRequest.realtime(request.paramAsBoolean("realtime", null)); diff --git a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index d95ef3e9498..a0d5b279e71 100644 --- a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -70,7 +70,6 @@ public class RestIndexAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id")); - indexRequest.listenerThreaded(false); indexRequest.operationThreaded(true); indexRequest.routing(request.param("routing")); indexRequest.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing diff --git a/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java b/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java index a9d60e02aa5..41f28574bdf 100644 --- a/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java +++ b/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java @@ -50,8 +50,6 @@ public class RestMoreLikeThisAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { MoreLikeThisRequest mltRequest = moreLikeThisRequest(request.param("index")).type(request.param("type")).id(request.param("id")); mltRequest.routing(request.param("routing")); - - mltRequest.listenerThreaded(false); //TODO the ParseField class that encapsulates the supported names used for an attribute //needs some work if it is to be used in a REST context like this too // See the MoreLikeThisQueryParser constants that hold the valid syntax diff --git a/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java b/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java index fb609d3e14a..4ee543f5362 100644 --- a/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java @@ -94,8 +94,6 @@ public class RestPercolateAction extends BaseRestHandler { } void executePercolate(final PercolateRequest percolateRequest, final RestChannel restChannel, final Client client) { - // we just send a response, no need to fork - percolateRequest.listenerThreaded(false); client.percolate(percolateRequest, new RestToXContentListener(restChannel)); } diff --git a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java index d63a39ac555..35e3f2cc473 100644 --- a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java @@ -75,7 +75,7 @@ public class RestPutIndexedScriptAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, Client client) { - PutIndexedScriptRequest putRequest = new PutIndexedScriptRequest(getScriptLang(request), request.param("id")).listenerThreaded(false); + PutIndexedScriptRequest putRequest = new PutIndexedScriptRequest(getScriptLang(request), request.param("id")); putRequest.version(request.paramAsLong("version", putRequest.version())); putRequest.versionType(VersionType.fromString(request.param("version_type"), putRequest.versionType())); putRequest.source(request.content()); diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 92c0ba9b217..6dfe605d96b 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -56,7 +56,6 @@ public class RestMultiSearchAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); - multiSearchRequest.listenerThreaded(false); String[] indices = Strings.splitStringByCommaToArray(request.param("index")); String[] types = Strings.splitStringByCommaToArray(request.param("type")); diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 9c3e85a2e94..70060588ded 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -77,7 +77,6 @@ public class RestSearchAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { SearchRequest searchRequest; searchRequest = RestSearchAction.parseSearchRequest(request); - searchRequest.listenerThreaded(false); client.search(searchRequest, new RestStatusToXContentListener(channel)); } diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index 15de56265bc..c53331bb496 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -60,7 +60,6 @@ public class RestSearchScrollAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { String scrollId = request.param("scroll_id"); SearchScrollRequest searchScrollRequest = new SearchScrollRequest(); - searchScrollRequest.listenerThreaded(false); searchScrollRequest.scrollId(scrollId); String scroll = request.param("scroll"); if (scroll != null) { diff --git a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java index 9f9c3946b36..184a62244d1 100644 --- a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java +++ b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java @@ -59,7 +59,6 @@ public class RestSuggestAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { SuggestRequest suggestRequest = new SuggestRequest(Strings.splitStringByCommaToArray(request.param("index"))); suggestRequest.indicesOptions(IndicesOptions.fromRequest(request, suggestRequest.indicesOptions())); - suggestRequest.listenerThreaded(false); if (RestActions.hasBodyContent(request)) { suggestRequest.suggest(RestActions.getRestContent(request)); } else { diff --git a/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java b/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java index b0cd0bd63a7..400869fff0e 100644 --- a/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/termvectors/RestMultiTermVectorsAction.java @@ -49,7 +49,6 @@ public class RestMultiTermVectorsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { MultiTermVectorsRequest multiTermVectorsRequest = new MultiTermVectorsRequest(); - multiTermVectorsRequest.listenerThreaded(false); TermVectorsRequest template = new TermVectorsRequest(); template.index(request.param("index")); template.type(request.param("type")); diff --git a/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java b/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java index c884fe4267e..d019e598cac 100644 --- a/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/update/RestUpdateAction.java @@ -55,7 +55,6 @@ public class RestUpdateAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { UpdateRequest updateRequest = new UpdateRequest(request.param("index"), request.param("type"), request.param("id")); - updateRequest.listenerThreaded(false); updateRequest.routing(request.param("routing")); updateRequest.parent(request.param("parent")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); diff --git a/src/main/java/org/elasticsearch/river/RiversService.java b/src/main/java/org/elasticsearch/river/RiversService.java index ed7369d8ad0..fdb2589a540 100644 --- a/src/main/java/org/elasticsearch/river/RiversService.java +++ b/src/main/java/org/elasticsearch/river/RiversService.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.get.GetRequestBuilder; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -231,7 +232,7 @@ public class RiversService extends AbstractLifecycleComponent { logger.trace("river {} is already allocated", routing.riverName().getName()); continue; } - prepareGetMetaDocument(routing.riverName().name()).execute(new ActionListener() { + prepareGetMetaDocument(routing.riverName().name()).execute(new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, new ActionListener() { @Override public void onResponse(GetResponse getResponse) { if (!rivers.containsKey(routing.riverName())) { @@ -255,7 +256,7 @@ public class RiversService extends AbstractLifecycleComponent { logger.debug("failed to get _meta from [{}]/[{}], retrying...", e, routing.riverName().type(), routing.riverName().name()); final ActionListener listener = this; try { - threadPool.schedule(TimeValue.timeValueSeconds(5), ThreadPool.Names.SAME, new Runnable() { + threadPool.schedule(TimeValue.timeValueSeconds(5), ThreadPool.Names.LISTENER, new Runnable() { @Override public void run() { prepareGetMetaDocument(routing.riverName().name()).execute(listener); @@ -268,12 +269,12 @@ public class RiversService extends AbstractLifecycleComponent { logger.warn("failed to get _meta from [{}]/[{}]", e, routing.riverName().type(), routing.riverName().name()); } } - }); + })); } } private GetRequestBuilder prepareGetMetaDocument(String riverName) { - return client.prepareGet(riverIndexName, riverName, "_meta").setPreference("_primary").setListenerThreaded(true); + return client.prepareGet(riverIndexName, riverName, "_meta").setPreference("_primary"); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java index 464d0a339a8..df356b5b8a3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java @@ -90,13 +90,13 @@ public final class GeoBoundsAggregator extends MetricsAggregator { tops = bigArrays.grow(tops, bucket + 1); tops.fill(from, tops.size(), Double.NEGATIVE_INFINITY); bottoms = bigArrays.resize(bottoms, tops.size()); - bottoms.fill(from, bottoms.size(), Double.NEGATIVE_INFINITY); + bottoms.fill(from, bottoms.size(), Double.POSITIVE_INFINITY); posLefts = bigArrays.resize(posLefts, tops.size()); - posLefts.fill(from, posLefts.size(), Double.NEGATIVE_INFINITY); + posLefts.fill(from, posLefts.size(), Double.POSITIVE_INFINITY); posRights = bigArrays.resize(posRights, tops.size()); posRights.fill(from, posRights.size(), Double.NEGATIVE_INFINITY); negLefts = bigArrays.resize(negLefts, tops.size()); - negLefts.fill(from, negLefts.size(), Double.NEGATIVE_INFINITY); + negLefts.fill(from, negLefts.size(), Double.POSITIVE_INFINITY); negRights = bigArrays.resize(negRights, tops.size()); negRights.fill(from, negRights.size(), Double.NEGATIVE_INFINITY); } diff --git a/src/main/java/org/elasticsearch/tribe/TribeService.java b/src/main/java/org/elasticsearch/tribe/TribeService.java index 51cb9d0ecab..d0cfe765817 100644 --- a/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -127,6 +127,7 @@ public class TribeService extends AbstractLifecycleComponent { for (Map.Entry entry : nodesSettings.entrySet()) { ImmutableSettings.Builder sb = ImmutableSettings.builder().put(entry.getValue()); sb.put("node.name", settings.get("name") + "/" + entry.getKey()); + sb.put("path.home", settings.get("path.home")); // pass through ES home dir sb.put(TRIBE_NAME, entry.getKey()); sb.put("config.ignore_system_properties", true); if (sb.get("http.enabled") == null) { diff --git a/src/main/resources/org/elasticsearch/bootstrap/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy index ffc0032d4a0..e6500109dc7 100644 --- a/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -21,20 +21,26 @@ // On startup, BootStrap reads environment and adds additional permissions // for configured paths to these. +//// System code permissions: +//// These permissions apply to the JDK itself: + +grant codeBase "file:${{java.ext.dirs}}/*" { + permission java.security.AllPermission; +}; + +//// Everything else: + grant { // system jar resources permission java.io.FilePermission "${java.home}${/}-", "read"; - // temporary files - permission java.io.FilePermission "${java.io.tmpdir}", "read,write"; - permission java.io.FilePermission "${java.io.tmpdir}${/}-", "read,write,delete"; - // paths used for running tests - // project base directory - permission java.io.FilePermission "${project.basedir}${/}target${/}-", "read"; + // compiled classes + permission java.io.FilePermission "${project.basedir}${/}target${/}classes${/}-", "read"; + permission java.io.FilePermission "${project.basedir}${/}target${/}test-classes${/}-", "read"; // read permission for lib sigar - permission java.io.FilePermission "${project.basedir}${/}lib/sigar{/}-", "read"; + permission java.io.FilePermission "${project.basedir}${/}lib${/}sigar${/}-", "read"; // mvn custom ./m2/repository for dependency jars permission java.io.FilePermission "${m2.repository}${/}-", "read"; @@ -62,12 +68,6 @@ grant { // needed by ImmutableSettings permission java.lang.RuntimePermission "getenv.*"; - // needed by BootStrap, etc - permission java.lang.RuntimePermission "exitVM.*"; - - // needed by RandomizedTest.globalTempDir() - permission java.lang.RuntimePermission "shutdownHooks"; - // needed by PluginManager permission java.lang.RuntimePermission "setFactory"; @@ -81,13 +81,6 @@ grant { // needed by groovy scripting permission java.lang.RuntimePermission "getProtectionDomain"; - // needed for natives calls - permission java.lang.RuntimePermission "loadLibrary.*"; - - // needed for testing access rules etc - permission java.lang.RuntimePermission "createSecurityManager"; - permission java.security.SecurityPermission "createPolicy.JavaPolicy"; - // reflection hacks: // needed for Striped64 (what is this doing), also enables unmap hack permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; diff --git a/src/test/java/org/elasticsearch/action/ListenerActionTests.java b/src/test/java/org/elasticsearch/action/ListenerActionTests.java new file mode 100644 index 00000000000..50cde99fcdb --- /dev/null +++ b/src/test/java/org/elasticsearch/action/ListenerActionTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +/** + */ +public class ListenerActionTests extends ElasticsearchIntegrationTest { + + @Test + public void verifyThreadedListeners() throws Throwable { + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference failure = new AtomicReference<>(); + final AtomicReference threadName = new AtomicReference<>(); + Client client = client(); + + IndexRequest request = new IndexRequest("test", "type", "1"); + if (randomBoolean()) { + // set the source, without it, we will have a verification failure + request.source("field1", "value1"); + } + + client.index(request, new ActionListener() { + @Override + public void onResponse(IndexResponse indexResponse) { + threadName.set(Thread.currentThread().getName()); + latch.countDown(); + } + + @Override + public void onFailure(Throwable e) { + threadName.set(Thread.currentThread().getName()); + failure.set(e); + latch.countDown(); + } + }); + + latch.await(); + + boolean shouldBeThreaded = DiscoveryNode.clientNode(client.settings()) || TransportClient.CLIENT_TYPE.equals(client.settings().get(Client.CLIENT_TYPE_SETTING)); + if (shouldBeThreaded) { + assertTrue(threadName.get().contains("listener")); + } else { + assertFalse(threadName.get().contains("listener")); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java b/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java index bace2472a3c..ea25cfb767f 100644 --- a/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java +++ b/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java @@ -33,7 +33,7 @@ public class BulkIntegrationTests extends ElasticsearchIntegrationTest { @Test public void testBulkIndexCreatesMapping() throws Exception { String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/bulk-log.json"); - BulkRequestBuilder bulkBuilder = new BulkRequestBuilder(client()); + BulkRequestBuilder bulkBuilder = client().prepareBulk(); bulkBuilder.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null); bulkBuilder.get(); assertBusy(new Runnable() { diff --git a/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java index e357206bc38..920ae42333a 100644 --- a/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java +++ b/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -157,7 +158,10 @@ public class BulkProcessorTests extends ElasticsearchIntegrationTest { //https://github.com/elasticsearch/elasticsearch/issues/5038 public void testBulkProcessorConcurrentRequestsNoNodeAvailableException() throws Exception { //we create a transport client with no nodes to make sure it throws NoNodeAvailableException - Client transportClient = new TransportClient(); + Settings settings = ImmutableSettings.builder() + .put("path.home", createTempDir().toString()) + .build(); + Client transportClient = TransportClient.builder().settings(settings).build(); int bulkActions = randomIntBetween(10, 100); int numDocs = randomIntBetween(bulkActions, bulkActions + 100); diff --git a/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java b/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java index 22387d277e8..ceabdd58965 100644 --- a/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java +++ b/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -46,7 +48,10 @@ public class CountRequestBuilderTests extends ElasticsearchTestCase { public static void initClient() { //this client will not be hit by any request, but it needs to be a non null proper client //that is why we create it but we don't add any transport address to it - client = new TransportClient(); + Settings settings = ImmutableSettings.builder() + .put("path.home", createTempDir().toString()) + .build(); + client = TransportClient.builder().settings(settings).build(); } @AfterClass @@ -57,20 +62,20 @@ public class CountRequestBuilderTests extends ElasticsearchTestCase { @Test public void testEmptySourceToString() { - CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + CountRequestBuilder countRequestBuilder = client.prepareCount(); assertThat(countRequestBuilder.toString(), equalTo(new QuerySourceBuilder().toString())); } @Test public void testQueryBuilderQueryToString() { - CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + CountRequestBuilder countRequestBuilder = client.prepareCount(); countRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); assertThat(countRequestBuilder.toString(), equalTo(new QuerySourceBuilder().setQuery(QueryBuilders.matchAllQuery()).toString())); } @Test public void testStringQueryToString() { - CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + CountRequestBuilder countRequestBuilder = client.prepareCount(); String query = "{ \"match_all\" : {} }"; countRequestBuilder.setQuery(new BytesArray(query)); assertThat(countRequestBuilder.toString(), containsString("\"query\":{ \"match_all\" : {} }")); @@ -78,7 +83,7 @@ public class CountRequestBuilderTests extends ElasticsearchTestCase { @Test public void testXContentBuilderQueryToString() throws IOException { - CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + CountRequestBuilder countRequestBuilder = client.prepareCount(); XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); xContentBuilder.startObject(); xContentBuilder.startObject("match_all"); @@ -90,7 +95,7 @@ public class CountRequestBuilderTests extends ElasticsearchTestCase { @Test public void testStringSourceToString() { - CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + CountRequestBuilder countRequestBuilder = client.prepareCount(); String query = "{ \"query\": { \"match_all\" : {} } }"; countRequestBuilder.setSource(new BytesArray(query)); assertThat(countRequestBuilder.toString(), equalTo("{ \"query\": { \"match_all\" : {} } }")); @@ -98,7 +103,7 @@ public class CountRequestBuilderTests extends ElasticsearchTestCase { @Test public void testXContentBuilderSourceToString() throws IOException { - CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + CountRequestBuilder countRequestBuilder = client.prepareCount(); XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); xContentBuilder.startObject(); xContentBuilder.startObject("match_all"); @@ -119,7 +124,7 @@ public class CountRequestBuilderTests extends ElasticsearchTestCase { " }\n" + " }\n" + " }"; - CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client).setSource(new BytesArray(source)); + CountRequestBuilder countRequestBuilder = client.prepareCount().setSource(new BytesArray(source)); String preToString = countRequestBuilder.request().source().toUtf8(); assertThat(countRequestBuilder.toString(), equalTo(source)); String postToString = countRequestBuilder.request().source().toUtf8(); diff --git a/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java b/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java index 57a48bbbcc0..dec0e8238ea 100644 --- a/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java +++ b/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java @@ -21,6 +21,8 @@ package org.elasticsearch.action.search; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -45,7 +47,10 @@ public class SearchRequestBuilderTests extends ElasticsearchTestCase { public static void initClient() { //this client will not be hit by any request, but it needs to be a non null proper client //that is why we create it but we don't add any transport address to it - client = new TransportClient(); + Settings settings = ImmutableSettings.builder() + .put("path.home", createTempDir().toString()) + .build(); + client = TransportClient.builder().settings(settings).build(); } @AfterClass @@ -56,20 +61,20 @@ public class SearchRequestBuilderTests extends ElasticsearchTestCase { @Test public void testEmptySourceToString() { - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch(); assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().toString())); } @Test public void testQueryBuilderQueryToString() { - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch(); searchRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).toString())); } @Test public void testXContentBuilderQueryToString() throws IOException { - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch(); XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); xContentBuilder.startObject(); xContentBuilder.startObject("match_all"); @@ -81,7 +86,7 @@ public class SearchRequestBuilderTests extends ElasticsearchTestCase { @Test public void testStringQueryToString() { - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch(); String query = "{ \"match_all\" : {} }"; searchRequestBuilder.setQuery(query); assertThat(searchRequestBuilder.toString(), containsString("\"query\":{ \"match_all\" : {} }")); @@ -89,7 +94,7 @@ public class SearchRequestBuilderTests extends ElasticsearchTestCase { @Test public void testStringSourceToString() { - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch(); String source = "{ \"query\" : { \"match_all\" : {} } }"; searchRequestBuilder.setSource(source); assertThat(searchRequestBuilder.toString(), equalTo(source)); @@ -97,7 +102,7 @@ public class SearchRequestBuilderTests extends ElasticsearchTestCase { @Test public void testXContentBuilderSourceToString() throws IOException { - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch(); XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); xContentBuilder.startObject(); xContentBuilder.startObject("query"); @@ -120,7 +125,7 @@ public class SearchRequestBuilderTests extends ElasticsearchTestCase { " }\n" + " }\n" + " }"; - SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client).setSource(source); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch().setSource(source); String preToString = searchRequestBuilder.request().source().toUtf8(); assertThat(searchRequestBuilder.toString(), equalTo(source)); String postToString = searchRequestBuilder.request().source().toUtf8(); diff --git a/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index 1eccf3521f0..e32eb6cdb7c 100644 --- a/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -92,7 +92,7 @@ public class TransportActionFilterChainTests extends ElasticsearchTestCase { } } - PlainListenableActionFuture future = new PlainListenableActionFuture<>(false, null); + PlainListenableActionFuture future = new PlainListenableActionFuture<>(null); transportAction.execute(new TestRequest(), future); try { assertThat(future.get(), notNullValue()); @@ -174,7 +174,7 @@ public class TransportActionFilterChainTests extends ElasticsearchTestCase { } } - PlainListenableActionFuture future = new PlainListenableActionFuture<>(false, null); + PlainListenableActionFuture future = new PlainListenableActionFuture<>(null); transportAction.execute(new TestRequest(), future); try { assertThat(future.get(), notNullValue()); diff --git a/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java b/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java index a28c78ccd34..7bd0cf373a5 100644 --- a/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java +++ b/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.support.replication; +import com.google.common.base.Predicate; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -41,9 +42,11 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; @@ -55,7 +58,9 @@ import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseOptions; import org.elasticsearch.transport.TransportService; import org.junit.AfterClass; import org.junit.Before; @@ -66,6 +71,7 @@ import java.io.IOException; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -82,7 +88,10 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { private TransportService transportService; private CapturingTransport transport; private Action action; - + /* * + * TransportShardReplicationOperationAction needs an instance of IndexShard to count operations. + * indexShards is reset to null before each test and will be initialized upon request in the tests. + */ @BeforeClass public static void beforeClass() { @@ -97,6 +106,7 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { transportService = new TransportService(transport, threadPool); transportService.start(); action = new Action(ImmutableSettings.EMPTY, "testAction", transportService, clusterService, threadPool); + count.set(1); } @AfterClass @@ -105,7 +115,6 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { threadPool = null; } - void assertListenerThrows(String msg, PlainActionFuture listener, Class klass) throws InterruptedException { try { listener.get(); @@ -113,7 +122,6 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { } catch (ExecutionException ex) { assertThat(ex.getCause(), instanceOf(klass)); } - } @Test @@ -145,7 +153,12 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { block = ClusterBlocks.builder() .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); - assertListenerThrows("primary phase should fail operation when moving from a retryable block a non-retryable one", listener, ClusterBlockException.class); + assertListenerThrows("primary phase should fail operation when moving from a retryable block to a non-retryable one", listener, ClusterBlockException.class); + assertIndexShardUninitialized(); + } + + public void assertIndexShardUninitialized() { + assertEquals(1, count.get()); } ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) { @@ -163,7 +176,6 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { replicaStates[i] = ShardRoutingState.UNASSIGNED; } return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); - } ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { @@ -225,7 +237,6 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { } indexShardRoutingBuilder.addShard( new ImmutableShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState, 0)); - } ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); @@ -268,6 +279,7 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { listener.get(); assertTrue("request wasn't processed on primary, despite of it being assigned", request.processedOnPrimary.get()); + assertIndexShardCounter(1); } @Test @@ -290,17 +302,23 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { if (primaryNodeId.equals(clusterService.localNode().id())) { logger.info("--> primary is assigned locally, testing for execution"); assertTrue("request failed to be processed on a local primary", request.processedOnPrimary.get()); + if (transport.capturedRequests().length > 0) { + assertIndexShardCounter(2); + } else { + assertIndexShardCounter(1); + } } else { logger.info("--> primary is assigned to [{}], checking request forwarded", primaryNodeId); final List capturedRequests = transport.capturedRequestsByTargetNode().get(primaryNodeId); assertThat(capturedRequests, notNullValue()); assertThat(capturedRequests.size(), equalTo(1)); assertThat(capturedRequests.get(0).action, equalTo("testAction")); + assertIndexShardUninitialized(); } } @Test - public void testWriteConsistency() { + public void testWriteConsistency() throws ExecutionException, InterruptedException { action = new ActionWithConsistency(ImmutableSettings.EMPTY, "testActionWithConsistency", transportService, clusterService, threadPool); final String index = "test"; final ShardId shardId = new ShardId(index, 0); @@ -348,17 +366,23 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), nullValue()); primaryPhase.run(); assertTrue("operations should have been perform, consistency level is met", request.processedOnPrimary.get()); + if (assignedReplicas > 0) { + assertIndexShardCounter(2); + } else { + assertIndexShardCounter(1); + } } else { assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), notNullValue()); primaryPhase.run(); assertFalse("operations should not have been perform, consistency level is *NOT* met", request.processedOnPrimary.get()); + assertIndexShardUninitialized(); for (int i = 0; i < replicaStates.length; i++) { replicaStates[i] = ShardRoutingState.STARTED; } clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates)); assertTrue("once the consistency level met, operation should continue", request.processedOnPrimary.get()); + assertIndexShardCounter(2); } - } @Test @@ -407,7 +431,6 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { totalShards++; } } - runReplicateTest(shardRoutingTable, assignedReplicas, totalShards); } @@ -421,13 +444,14 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint()); - final TransportShardReplicationOperationAction.InternalRequest internalRequest = action.new InternalRequest(request); internalRequest.concreteIndex(shardId.index().name()); + Releasable reference = getOrCreateIndexShardOperationsCounter(); + assertIndexShardCounter(2); TransportShardReplicationOperationAction.ReplicationPhase replicationPhase = action.new ReplicationPhase(shardIt, request, new Response(), new ClusterStateObserver(clusterService, logger), - primaryShard, internalRequest, listener); + primaryShard, internalRequest, listener, reference); assertThat(replicationPhase.totalShards(), equalTo(totalShards)); assertThat(replicationPhase.pending(), equalTo(assignedReplicas)); @@ -472,8 +496,158 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { for (CapturingTransport.CapturedRequest capturedRequest : transport.capturedRequests()) { assertThat(capturedRequest.action, equalTo(ShardStateAction.SHARD_FAILED_ACTION_NAME)); } + // all replicas have responded so the counter should be decreased again + assertIndexShardCounter(1); } + @Test + public void testCounterOnPrimary() throws InterruptedException, ExecutionException, IOException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + // no replica, we only want to test on primary + clusterService.setState(state(index, true, + ShardRoutingState.STARTED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Request request = new Request(shardId).timeout("100ms"); + PlainActionFuture listener = new PlainActionFuture<>(); + + /** + * Execute an action that is stuck in shard operation until a latch is counted down. + * That way we can start the operation, check if the counter was incremented and then unblock the operation + * again to see if the counter is decremented afterwards. + * TODO: I could also write an action that asserts that the counter is 2 in the shard operation. + * However, this failure would only become apparent once listener.get is called. Seems a little implicit. + * */ + action = new ActionWithDelay(ImmutableSettings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); + final TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + Thread t = new Thread() { + public void run() { + primaryPhase.run(); + } + }; + t.start(); + // shard operation should be ongoing, so the counter is at 2 + // we have to wait here because increment happens in thread + awaitBusy(new Predicate() { + @Override + public boolean apply(@Nullable Object input) { + return (count.get() == 2); + } + }); + + assertIndexShardCounter(2); + assertThat(transport.capturedRequests().length, equalTo(0)); + ((ActionWithDelay) action).countDownLatch.countDown(); + t.join(); + listener.get(); + // operation finished, counter back to 0 + assertIndexShardCounter(1); + assertThat(transport.capturedRequests().length, equalTo(0)); + } + + @Test + public void testCounterIncrementedWhileReplicationOngoing() throws InterruptedException, ExecutionException, IOException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + // one replica to make sure replication is attempted + clusterService.setState(state(index, true, + ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Request request = new Request(shardId).timeout("100ms"); + PlainActionFuture listener = new PlainActionFuture<>(); + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + primaryPhase.run(); + assertIndexShardCounter(2); + assertThat(transport.capturedRequests().length, equalTo(1)); + // try once with successful response + transport.handleResponse(transport.capturedRequests()[0].requestId, TransportResponse.Empty.INSTANCE); + assertIndexShardCounter(1); + transport.clear(); + request = new Request(shardId).timeout("100ms"); + primaryPhase = action.new PrimaryPhase(request, listener); + primaryPhase.run(); + assertIndexShardCounter(2); + assertThat(transport.capturedRequests().length, equalTo(1)); + // try with failure response + transport.handleResponse(transport.capturedRequests()[0].requestId, new CorruptIndexException("simulated", (String) null)); + assertIndexShardCounter(1); + } + + @Test + public void testReplicasCounter() throws Exception { + final ShardId shardId = new ShardId("test", 0); + clusterService.setState(state(shardId.index().getName(), true, + ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + action = new ActionWithDelay(ImmutableSettings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); + final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); + Thread t = new Thread() { + public void run() { + try { + replicaOperationTransportHandler.messageReceived(new Request(), createTransportChannel()); + } catch (Exception e) { + } + } + }; + t.start(); + // shard operation should be ongoing, so the counter is at 2 + // we have to wait here because increment happens in thread + awaitBusy(new Predicate() { + @Override + public boolean apply(@Nullable Object input) { + return count.get() == 2; + } + }); + ((ActionWithDelay) action).countDownLatch.countDown(); + t.join(); + // operation should have finished and counter decreased because no outstanding replica requests + assertIndexShardCounter(1); + // now check if this also works if operation throws exception + action = new ActionWithExceptions(ImmutableSettings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); + final Action.ReplicaOperationTransportHandler replicaOperationTransportHandlerForException = action.new ReplicaOperationTransportHandler(); + try { + replicaOperationTransportHandlerForException.messageReceived(new Request(shardId), createTransportChannel()); + fail(); + } catch (Throwable t2) { + } + assertIndexShardCounter(1); + } + + @Test + public void testCounterDecrementedIfShardOperationThrowsException() throws InterruptedException, ExecutionException, IOException { + action = new ActionWithExceptions(ImmutableSettings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + clusterService.setState(state(index, true, + ShardRoutingState.STARTED, ShardRoutingState.STARTED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Request request = new Request(shardId).timeout("100ms"); + PlainActionFuture listener = new PlainActionFuture<>(); + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + primaryPhase.run(); + // no replica request should have been sent yet + assertThat(transport.capturedRequests().length, equalTo(0)); + // no matter if the operation is retried or not, counter must be be back to 1 + assertIndexShardCounter(1); + } + + private void assertIndexShardCounter(int expected) { + assertThat(count.get(), equalTo(expected)); + } + + private final AtomicInteger count = new AtomicInteger(0); + + /* + * Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run. + * */ + private synchronized Releasable getOrCreateIndexShardOperationsCounter() { + count.incrementAndGet(); + return new Releasable() { + @Override + public void close() { + count.decrementAndGet(); + } + }; + } static class Request extends ShardReplicationOperationRequest { int shardId; @@ -481,7 +655,7 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { public AtomicInteger processedOnReplicas = new AtomicInteger(); Request() { - this.operationThreaded(false); + this.operationThreaded(randomBoolean()); } Request(ShardId shardId) { @@ -505,10 +679,9 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { } static class Response extends ActionWriteResponse { - } - static class Action extends TransportShardReplicationOperationAction { + class Action extends TransportShardReplicationOperationAction { Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, @@ -549,9 +722,14 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { protected boolean resolveIndex() { return false; } + + @Override + protected Releasable getIndexShardOperationsCounter(ShardId shardId) { + return getOrCreateIndexShardOperationsCounter(); + } } - static class ActionWithConsistency extends Action { + class ActionWithConsistency extends Action { ActionWithConsistency(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { super(settings, actionName, transportService, clusterService, threadPool); @@ -567,5 +745,97 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT); } + /* + * Throws exceptions when executed. Used for testing if the counter is correctly decremented in case an operation fails. + * */ + class ActionWithExceptions extends Action { + + ActionWithExceptions(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) throws IOException { + super(settings, actionName, transportService, clusterService, threadPool); + } + + @Override + protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { + return throwException(shardRequest.shardId); + } + + private Tuple throwException(ShardId shardId) { + try { + if (randomBoolean()) { + // throw a generic exception + // for testing on replica this will actually cause an NPE because it will make the shard fail but + // for this we need an IndicesService which is null. + throw new ElasticsearchException("simulated"); + } else { + // throw an exception which will cause retry on primary and be ignored on replica + throw new IndexShardNotStartedException(shardId, IndexShardState.RECOVERING); + } + } catch (Exception e) { + logger.info("throwing ", e); + throw e; + } + } + + @Override + protected void shardOperationOnReplica(ShardId shardId, Request shardRequest) { + throwException(shardRequest.internalShardId); + } + } + + /** + * Delays the operation until countDownLatch is counted down + */ + class ActionWithDelay extends Action { + CountDownLatch countDownLatch = new CountDownLatch(1); + + ActionWithDelay(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) throws IOException { + super(settings, actionName, transportService, clusterService, threadPool); + } + + @Override + protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { + awaitLatch(); + return new Tuple<>(new Response(), shardRequest.request); + } + + private void awaitLatch() throws InterruptedException { + countDownLatch.await(); + countDownLatch = new CountDownLatch(1); + } + + @Override + protected void shardOperationOnReplica(ShardId shardId, Request shardRequest) { + try { + awaitLatch(); + } catch (InterruptedException e) { + } + } + + } + + /* + * Transport channel that is needed for replica operation testing. + * */ + public TransportChannel createTransportChannel() { + return new TransportChannel() { + + @Override + public String action() { + return null; + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + } + + @Override + public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { + } + + @Override + public void sendResponse(Throwable error) throws IOException { + } + }; + } } diff --git a/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsTests.java b/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsTests.java index ecc767c3274..58a96aa5e1f 100644 --- a/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsTests.java +++ b/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsTests.java @@ -74,7 +74,7 @@ public class MultiTermVectorsTests extends AbstractTermVectorsTests { @Test public void testMissingIndexThrowsMissingIndex() throws Exception { TermVectorsRequestBuilder requestBuilder = client().prepareTermVectors("testX", "typeX", Integer.toString(1)); - MultiTermVectorsRequestBuilder mtvBuilder = new MultiTermVectorsRequestBuilder(client()); + MultiTermVectorsRequestBuilder mtvBuilder = client().prepareMultiTermVectors(); mtvBuilder.add(requestBuilder.request()); MultiTermVectorsResponse response = mtvBuilder.execute().actionGet(); assertThat(response.getResponses().length, equalTo(1)); diff --git a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java index 4c2ddcd47eb..33433f1494c 100644 --- a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java +++ b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java @@ -24,12 +24,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ElasticsearchTestCase; -import java.io.ByteArrayInputStream; import java.io.FilePermission; import java.nio.file.Path; -import java.security.Policy; -import java.security.ProtectionDomain; -import java.security.URIParameter; +import java.security.Permissions; public class SecurityTests extends ElasticsearchTestCase { @@ -42,17 +39,25 @@ public class SecurityTests extends ElasticsearchTestCase { settingsBuilder.put("path.home", esHome.toString()); Settings settings = settingsBuilder.build(); - Environment environment = new Environment(settings); - Path policyFile = Security.processTemplate(new ByteArrayInputStream(new byte[0]), environment); + Environment environment = new Environment(settings); + Path fakeTmpDir = createTempDir(); + String realTmpDir = System.getProperty("java.io.tmpdir"); + Permissions permissions; + try { + System.setProperty("java.io.tmpdir", fakeTmpDir.toString()); + permissions = Security.createPermissions(environment); + } finally { + System.setProperty("java.io.tmpdir", realTmpDir); + } - ProtectionDomain domain = getClass().getProtectionDomain(); - Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toUri())); // the fake es home - assertTrue(policy.implies(domain, new FilePermission(esHome.toString(), "read"))); + assertTrue(permissions.implies(new FilePermission(esHome.toString(), "read"))); // its parent - assertFalse(policy.implies(domain, new FilePermission(path.toString(), "read"))); + assertFalse(permissions.implies(new FilePermission(path.toString(), "read"))); // some other sibling - assertFalse(policy.implies(domain, new FilePermission(path.resolve("other").toString(), "read"))); + assertFalse(permissions.implies(new FilePermission(path.resolve("other").toString(), "read"))); + // double check we overwrote java.io.tmpdir correctly for the test + assertFalse(permissions.implies(new FilePermission(realTmpDir.toString(), "read"))); } /** test generated permissions for all configured paths */ @@ -67,29 +72,38 @@ public class SecurityTests extends ElasticsearchTestCase { settingsBuilder.put("path.logs", path.resolve("logs").toString()); Settings settings = settingsBuilder.build(); - Environment environment = new Environment(settings); - Path policyFile = Security.processTemplate(new ByteArrayInputStream(new byte[0]), environment); - - ProtectionDomain domain = getClass().getProtectionDomain(); - Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toUri())); + Environment environment = new Environment(settings); + Path fakeTmpDir = createTempDir(); + String realTmpDir = System.getProperty("java.io.tmpdir"); + Permissions permissions; + try { + System.setProperty("java.io.tmpdir", fakeTmpDir.toString()); + permissions = Security.createPermissions(environment); + } finally { + System.setProperty("java.io.tmpdir", realTmpDir); + } // check that all directories got permissions: // homefile: this is needed unless we break out rules for "lib" dir. // TODO: make read-only - assertTrue(policy.implies(domain, new FilePermission(environment.homeFile().toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(environment.homeFile().toString(), "read,readlink,write,delete"))); // config file // TODO: make read-only - assertTrue(policy.implies(domain, new FilePermission(environment.configFile().toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(environment.configFile().toString(), "read,readlink,write,delete"))); // plugins: r/w, TODO: can this be minimized? - assertTrue(policy.implies(domain, new FilePermission(environment.pluginsFile().toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(environment.pluginsFile().toString(), "read,readlink,write,delete"))); // data paths: r/w for (Path dataPath : environment.dataFiles()) { - assertTrue(policy.implies(domain, new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); } for (Path dataPath : environment.dataWithClusterFiles()) { - assertTrue(policy.implies(domain, new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); } // logs: r/w - assertTrue(policy.implies(domain, new FilePermission(environment.logsFile().toString(), "read,readlink,write,delete"))); + assertTrue(permissions.implies(new FilePermission(environment.logsFile().toString(), "read,readlink,write,delete"))); + // temp dir: r/w + assertTrue(permissions.implies(new FilePermission(fakeTmpDir.toString(), "read,readlink,write,delete"))); + // double check we overwrote java.io.tmpdir correctly for the test + assertFalse(permissions.implies(new FilePermission(realTmpDir.toString(), "read"))); } } diff --git a/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java b/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java index c55681d3fd2..e67890bf030 100644 --- a/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java @@ -104,6 +104,6 @@ public class ClusterStateBackwardsCompatTests extends ElasticsearchBackwardsComp private TransportClient newTransportClient() { Settings settings = ImmutableSettings.settingsBuilder().put("client.transport.ignore_cluster_name", true) .put("node.name", "transport_client_" + getTestName()).build(); - return new TransportClient(settings); + return TransportClient.builder().settings(settings).build(); } } diff --git a/src/test/java/org/elasticsearch/bwcompat/NodesStatsBasicBackwardsCompatTests.java b/src/test/java/org/elasticsearch/bwcompat/NodesStatsBasicBackwardsCompatTests.java index 0f93011a249..63b45909944 100644 --- a/src/test/java/org/elasticsearch/bwcompat/NodesStatsBasicBackwardsCompatTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/NodesStatsBasicBackwardsCompatTests.java @@ -48,7 +48,7 @@ public class NodesStatsBasicBackwardsCompatTests extends ElasticsearchBackwardsC // We explicitly connect to each node with a custom TransportClient for (NodeInfo n : nodesInfo.getNodes()) { - TransportClient tc = new TransportClient(settings).addTransportAddress(n.getNode().address()); + TransportClient tc = TransportClient.builder().settings(settings).build().addTransportAddress(n.getNode().address()); // Just verify that the NS can be sent and serialized/deserialized between nodes with basic indices NodesStatsResponse ns = tc.admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); tc.close(); @@ -67,7 +67,7 @@ public class NodesStatsBasicBackwardsCompatTests extends ElasticsearchBackwardsC // We explicitly connect to each node with a custom TransportClient for (NodeInfo n : nodesInfo.getNodes()) { - TransportClient tc = new TransportClient(settings).addTransportAddress(n.getNode().address()); + TransportClient tc = TransportClient.builder().settings(settings).build().addTransportAddress(n.getNode().address()); // randomize the combination of flags set // Uses reflection to find methods in an attempt to future-proof this test against newly added flags diff --git a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java index 2ff82d9f464..75f2eb1a6f2 100644 --- a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.bwcompat; import com.google.common.util.concurrent.ListenableFuture; - import org.apache.lucene.index.IndexWriter; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -60,16 +59,11 @@ import org.junit.Before; import java.io.IOException; import java.io.InputStream; -import java.nio.file.DirectoryStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; +import java.nio.file.*; import java.nio.file.attribute.BasicFileAttributes; import java.util.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; // needs at least 2 nodes since it bumps replicas to 1 @@ -381,6 +375,7 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio // TODO: remove this once #10262 is fixed return; } + // these documents are supposed to be deleted by a delete by query operation in the translog SearchRequestBuilder searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.queryStringQuery("long_sort:[10 TO 20]")); assertEquals(0, searchReq.get().getHits().getTotalHits()); } diff --git a/src/test/java/org/elasticsearch/bwcompat/TransportClientBackwardsCompatibilityTest.java b/src/test/java/org/elasticsearch/bwcompat/TransportClientBackwardsCompatibilityTest.java index a6bd13731f9..195165715aa 100644 --- a/src/test/java/org/elasticsearch/bwcompat/TransportClientBackwardsCompatibilityTest.java +++ b/src/test/java/org/elasticsearch/bwcompat/TransportClientBackwardsCompatibilityTest.java @@ -50,7 +50,7 @@ public class TransportClientBackwardsCompatibilityTest extends ElasticsearchBack CompositeTestCluster compositeTestCluster = backwardsCluster(); TransportAddress transportAddress = compositeTestCluster.externalTransportAddress(); - try(TransportClient client = new TransportClient(settings)) { + try(TransportClient client = TransportClient.builder().settings(settings).build()) { client.addTransportAddress(transportAddress); assertAcked(client.admin().indices().prepareCreate("test")); diff --git a/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java b/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java index 05561a9dec0..c9c7a9fdf52 100644 --- a/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java +++ b/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java @@ -51,6 +51,7 @@ import org.elasticsearch.client.support.Headers; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportMessage; import org.junit.After; import org.junit.Before; @@ -84,16 +85,23 @@ public abstract class AbstractClientHeadersTests extends ElasticsearchTestCase { CreateIndexAction.INSTANCE, IndicesStatsAction.INSTANCE, ClearIndicesCacheAction.INSTANCE, FlushAction.INSTANCE }; + protected ThreadPool threadPool; private Client client; @Before public void initClient() { - client = buildClient(HEADER_SETTINGS, ACTIONS); + Settings settings = ImmutableSettings.builder() + .put(HEADER_SETTINGS) + .put("path.home", createTempDir().toString()) + .build(); + threadPool = new ThreadPool("test-" + getTestName()); + client = buildClient(settings, ACTIONS); } @After - public void cleanupClient() { + public void cleanupClient() throws Exception { client.close(); + terminate(threadPool); } protected abstract Client buildClient(Settings headersSettings, GenericAction[] testedActions); diff --git a/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index 1bf3bf5be32..9f8dc0cf2f6 100644 --- a/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -42,29 +42,12 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTests { private static final ActionFilters EMPTY_FILTERS = new ActionFilters(ImmutableSet.of()); - private ThreadPool threadPool; - - @Before - public void init() { - threadPool = new ThreadPool("test"); - } - - @After - public void cleanup() throws InterruptedException { - terminate(threadPool); - } - @Override protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) { Settings settings = HEADER_SETTINGS; - Headers headers = new Headers(settings); Actions actions = new Actions(settings, threadPool, testedActions); - - NodeClusterAdminClient clusterClient = new NodeClusterAdminClient(threadPool, actions, headers); - NodeIndicesAdminClient indicesClient = new NodeIndicesAdminClient(threadPool, actions, headers); - NodeAdminClient adminClient = new NodeAdminClient(settings, clusterClient, indicesClient); - return new NodeClient(settings, threadPool, adminClient, actions, headers); + return new NodeClient(settings, threadPool, headers, actions); } private static class Actions extends HashMap { diff --git a/src/test/java/org/elasticsearch/client/transport/InternalTransportClientTests.java b/src/test/java/org/elasticsearch/client/transport/InternalTransportClientTests.java deleted file mode 100644 index bdccf82deb1..00000000000 --- a/src/test/java/org/elasticsearch/client/transport/InternalTransportClientTests.java +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.action.*; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; -import org.elasticsearch.action.admin.indices.IndicesAction; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.client.IndicesAdminClient; -import org.elasticsearch.client.support.Headers; -import org.elasticsearch.client.transport.support.InternalTransportAdminClient; -import org.elasticsearch.client.transport.support.InternalTransportClient; -import org.elasticsearch.client.transport.support.InternalTransportClusterAdminClient; -import org.elasticsearch.client.transport.support.InternalTransportIndicesAdminClient; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.transport.LocalTransportAddress; -import org.elasticsearch.test.ElasticsearchTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.junit.Test; - -import java.io.Closeable; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.hamcrest.CoreMatchers.*; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; - -public class InternalTransportClientTests extends ElasticsearchTestCase { - - private static class TestIteration implements Closeable { - private final ThreadPool threadPool; - private final FailAndRetryMockTransport transport; - private final TransportService transportService; - private final TransportClientNodesService transportClientNodesService; - private final InternalTransportClient internalTransportClient; - private final int nodesCount; - - TestIteration() { - threadPool = new ThreadPool("internal-transport-client-tests"); - transport = new FailAndRetryMockTransport(getRandom()) { - @Override - protected TestResponse newResponse() { - return new TestResponse(); - } - }; - transportService = new TransportService(ImmutableSettings.EMPTY, transport, threadPool); - transportService.start(); - transportClientNodesService = new TransportClientNodesService(ImmutableSettings.EMPTY, ClusterName.DEFAULT, transportService, threadPool, Headers.EMPTY, Version.CURRENT); - Map actions = new HashMap<>(); - actions.put(NodesInfoAction.NAME, NodesInfoAction.INSTANCE); - actions.put(TestAction.NAME, TestAction.INSTANCE); - actions.put(IndicesAdminTestAction.NAME, IndicesAdminTestAction.INSTANCE); - actions.put(ClusterAdminTestAction.NAME, ClusterAdminTestAction.INSTANCE); - - InternalTransportIndicesAdminClient indicesAdminClient = new InternalTransportIndicesAdminClient(ImmutableSettings.EMPTY, transportClientNodesService, transportService, threadPool, actions, Headers.EMPTY); - InternalTransportClusterAdminClient clusterAdminClient = new InternalTransportClusterAdminClient(ImmutableSettings.EMPTY, transportClientNodesService, threadPool, transportService, actions, Headers.EMPTY); - InternalTransportAdminClient adminClient = new InternalTransportAdminClient(ImmutableSettings.EMPTY, indicesAdminClient, clusterAdminClient); - internalTransportClient = new InternalTransportClient(ImmutableSettings.EMPTY, threadPool, transportService, transportClientNodesService, adminClient, actions, Headers.EMPTY); - - nodesCount = randomIntBetween(1, 10); - for (int i = 0; i < nodesCount; i++) { - transportClientNodesService.addTransportAddresses(new LocalTransportAddress("node" + i)); - } - transport.endConnectMode(); - } - - @Override - public void close() { - threadPool.shutdown(); - try { - threadPool.awaitTermination(1, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().isInterrupted(); - } - transportService.stop(); - transportClientNodesService.close(); - internalTransportClient.close(); - } - } - - @Test - public void testListenerFailures() throws InterruptedException { - - int iters = iterations(10, 100); - for (int i = 0; i < iters; i++) { - try(final TestIteration iteration = new TestIteration()) { - final CountDownLatch latch = new CountDownLatch(1); - final AtomicInteger finalFailures = new AtomicInteger(); - final AtomicReference finalFailure = new AtomicReference<>(); - final AtomicReference response = new AtomicReference<>(); - ActionListener actionListener = new ActionListener() { - @Override - public void onResponse(TestResponse testResponse) { - response.set(testResponse); - latch.countDown(); - } - - @Override - public void onFailure(Throwable e) { - finalFailures.incrementAndGet(); - finalFailure.set(e); - latch.countDown(); - } - }; - - final AtomicInteger preSendFailures = new AtomicInteger(); - - Action action = randomFrom(Action.values()); - action.execute(iteration, actionListener); - - assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); - - //there can be only either one failure that causes the request to fail straightaway or success - assertThat(preSendFailures.get() + iteration.transport.failures() + iteration.transport.successes(), lessThanOrEqualTo(1)); - - if (iteration.transport.successes() == 1) { - assertThat(finalFailures.get(), equalTo(0)); - assertThat(finalFailure.get(), nullValue()); - assertThat(response.get(), notNullValue()); - } else { - assertThat(finalFailures.get(), equalTo(1)); - assertThat(finalFailure.get(), notNullValue()); - assertThat(response.get(), nullValue()); - if (preSendFailures.get() == 0 && iteration.transport.failures() == 0) { - assertThat(finalFailure.get(), instanceOf(NoNodeAvailableException.class)); - } - } - - assertThat(iteration.transport.triedNodes().size(), lessThanOrEqualTo(iteration.nodesCount)); - assertThat(iteration.transport.triedNodes().size(), equalTo(iteration.transport.connectTransportExceptions() + iteration.transport.failures() + iteration.transport.successes())); - } - } - } - - @Test - public void testSyncFailures() throws InterruptedException { - - int iters = iterations(10, 100); - for (int i = 0; i < iters; i++) { - try(final TestIteration iteration = new TestIteration()) { - TestResponse testResponse = null; - Throwable finalFailure = null; - - try { - Action action = randomFrom(Action.values()); - ActionFuture future = action.execute(iteration); - testResponse = future.actionGet(); - } catch (Throwable t) { - finalFailure = t; - } - - //there can be only either one failure that causes the request to fail straightaway or success - assertThat(iteration.transport.failures() + iteration.transport.successes(), lessThanOrEqualTo(1)); - - if (iteration.transport.successes() == 1) { - assertThat(finalFailure, nullValue()); - assertThat(testResponse, notNullValue()); - } else { - assertThat(testResponse, nullValue()); - assertThat(finalFailure, notNullValue()); - if (iteration.transport.failures() == 0) { - assertThat(finalFailure, instanceOf(NoNodeAvailableException.class)); - } - } - - assertThat(iteration.transport.triedNodes().size(), lessThanOrEqualTo(iteration.nodesCount)); - assertThat(iteration.transport.triedNodes().size(), equalTo(iteration.transport.connectTransportExceptions() + iteration.transport.failures() + iteration.transport.successes())); - } - } - } - - private static enum Action { - TEST { - @Override - ActionFuture execute(TestIteration iteration) { - return iteration.internalTransportClient.execute(TestAction.INSTANCE, new TestRequest()); - } - - @Override - void execute(TestIteration iteration, ActionListener listener) { - iteration.internalTransportClient.execute(TestAction.INSTANCE, new TestRequest(), listener); - } - }, - INDICES_ADMIN { - @Override - ActionFuture execute(TestIteration iteration) { - return iteration.internalTransportClient.admin().indices().execute(IndicesAdminTestAction.INSTANCE, new TestRequest()); - } - - @Override - void execute(TestIteration iteration, ActionListener listener) { - iteration.internalTransportClient.admin().indices().execute(IndicesAdminTestAction.INSTANCE, new TestRequest(), listener); - } - }, - CLUSTER_ADMIN { - @Override - ActionFuture execute(TestIteration iteration) { - return iteration.internalTransportClient.admin().cluster().execute(ClusterAdminTestAction.INSTANCE, new TestRequest()); - } - - @Override - void execute(TestIteration iteration, ActionListener listener) { - iteration.internalTransportClient.admin().cluster().execute(ClusterAdminTestAction.INSTANCE, new TestRequest(), listener); - } - }; - - abstract ActionFuture execute(TestIteration iteration); - - abstract void execute(TestIteration iteration, ActionListener listener); - } - - private static class TestRequest extends ActionRequest { - @Override - public ActionRequestValidationException validate() { - return null; - } - } - - private static class TestResponse extends ActionResponse { - - } - - private static class TestAction extends ClientAction { - static final String NAME = "test-action"; - static final TestAction INSTANCE = new TestAction(NAME); - - private TestAction(String name) { - super(name); - } - - @Override - public TestRequestBuilder newRequestBuilder(Client client) { - throw new UnsupportedOperationException(); - } - - @Override - public TestResponse newResponse() { - return new TestResponse(); - } - } - - private static class TestRequestBuilder extends ActionRequestBuilder { - - protected TestRequestBuilder(Client client, TestRequest request) { - super(client, request); - } - - @Override - protected void doExecute(ActionListener listener) { - throw new UnsupportedOperationException(); - } - } - - private static class IndicesAdminTestAction extends IndicesAction { - static final String NAME = "test-indices-action"; - static final IndicesAdminTestAction INSTANCE = new IndicesAdminTestAction(NAME); - - private IndicesAdminTestAction(String name) { - super(name); - } - - @Override - public IndicesAdminTestRequestBuilder newRequestBuilder(IndicesAdminClient client) { - throw new UnsupportedOperationException(); - } - - @Override - public TestResponse newResponse() { - return new TestResponse(); - } - } - - private static class IndicesAdminTestRequestBuilder extends ActionRequestBuilder { - - protected IndicesAdminTestRequestBuilder(IndicesAdminClient client, TestRequest request) { - super(client, request); - } - - @Override - protected void doExecute(ActionListener listener) { - throw new UnsupportedOperationException(); - } - } - - private static class ClusterAdminTestAction extends ClusterAction { - static final String NAME = "test-cluster-action"; - static final ClusterAdminTestAction INSTANCE = new ClusterAdminTestAction(NAME); - - private ClusterAdminTestAction(String name) { - super(name); - } - - @Override - public ClusterAdminTestRequestBuilder newRequestBuilder(ClusterAdminClient client) { - throw new UnsupportedOperationException(); - } - - @Override - public TestResponse newResponse() { - return new TestResponse(); - } - } - - private static class ClusterAdminTestRequestBuilder extends ActionRequestBuilder { - - protected ClusterAdminTestRequestBuilder(ClusterAdminClient client, TestRequest request) { - super(client, request); - } - - @Override - protected void doExecute(ActionListener listener) { - throw new UnsupportedOperationException(); - } - } -} diff --git a/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index c744c3cd7ee..8400b490301 100644 --- a/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -55,12 +55,12 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTests { @Override protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) { - TransportClient client = new TransportClient(ImmutableSettings.builder() + TransportClient client = TransportClient.builder().settings(ImmutableSettings.builder() .put("client.transport.sniff", false) .put("node.name", "transport_client_" + this.getTestName()) .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, InternalTransportService.class.getName()) - .put(HEADER_SETTINGS) - .build()); + .put(headersSettings) + .build()).build(); client.addTransportAddress(address); return client; @@ -68,14 +68,15 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTests { @Test public void testWithSniffing() throws Exception { - TransportClient client = new TransportClient(ImmutableSettings.builder() + TransportClient client = TransportClient.builder().settings(ImmutableSettings.builder() .put("client.transport.sniff", true) .put("cluster.name", "cluster1") .put("node.name", "transport_client_" + this.getTestName() + "_1") .put("client.transport.nodes_sampler_interval", "1s") .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, InternalTransportService.class.getName()) .put(HEADER_SETTINGS) - .build()); + .put("path.home", createTempDir().toString()) + .build()).build(); try { client.addTransportAddress(address); diff --git a/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java b/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java index c1d11bb2312..b0090fff142 100644 --- a/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java +++ b/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java @@ -62,9 +62,10 @@ public class TransportClientRetryTests extends ElasticsearchIntegrationTest { .put("node.mode", InternalTestCluster.nodeMode()) .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false) .put(ClusterName.SETTING, internalCluster().getClusterName()) - .put("config.ignore_system_properties", true); + .put("config.ignore_system_properties", true) + .put("path.home", createTempDir()); - try (TransportClient transportClient = new TransportClient(builder.build())) { + try (TransportClient transportClient = TransportClient.builder().settings(builder.build()).build()) { transportClient.addTransportAddresses(addresses); assertThat(transportClient.connectedNodes().size(), equalTo(internalCluster().size())); @@ -84,7 +85,7 @@ public class TransportClientRetryTests extends ElasticsearchIntegrationTest { if (randomBoolean()) { clusterState = transportClient.admin().cluster().state(clusterStateRequest).get().getState(); } else { - PlainListenableActionFuture future = new PlainListenableActionFuture<>(clusterStateRequest.listenerThreaded(), transportClient.threadPool()); + PlainListenableActionFuture future = new PlainListenableActionFuture<>(transportClient.threadPool()); transportClient.admin().cluster().state(clusterStateRequest, future); clusterState = future.get().getState(); } diff --git a/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java index 8e347935241..3abb071d4ee 100644 --- a/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java +++ b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.node.Node; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; @@ -35,7 +34,10 @@ import org.junit.Test; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.node.NodeBuilder.nodeBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 1.0) public class TransportClientTests extends ElasticsearchIntegrationTest { @@ -92,7 +94,8 @@ public class TransportClientTests extends ElasticsearchIntegrationTest { @Test public void testThatTransportClientSettingCannotBeChanged() { - try (TransportClient client = new TransportClient(settingsBuilder().put(Client.CLIENT_TYPE_SETTING, "anything"))) { + Settings baseSettings = settingsBuilder().put(Client.CLIENT_TYPE_SETTING, "anything").put("path.home", createTempDir()).build(); + try (TransportClient client = TransportClient.builder().settings(baseSettings).build()) { Settings settings = client.injector.getInstance(Settings.class); assertThat(settings.get(Client.CLIENT_TYPE_SETTING), is("transport")); } diff --git a/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java b/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java index af419cf68a9..d261b0a3b01 100644 --- a/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java +++ b/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java @@ -161,7 +161,7 @@ public class NoMasterNodeTests extends ElasticsearchIntegrationTest { client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet(); } - void checkWriteAction(boolean autoCreateIndex, TimeValue timeout, ActionRequestBuilder builder) { + void checkWriteAction(boolean autoCreateIndex, TimeValue timeout, ActionRequestBuilder builder) { // we clean the metadata when loosing a master, therefore all operations on indices will auto create it, if allowed long now = System.currentTimeMillis(); try { diff --git a/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java b/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java index 07e83bed51b..cecb6a4a498 100644 --- a/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java +++ b/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.allocation; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.elasticsearch.cluster.routing.allocation.allocator.EvenShardsCountAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule; import org.elasticsearch.common.settings.ImmutableSettings; @@ -42,20 +41,20 @@ public class ShardsAllocatorModuleTests extends ElasticsearchIntegrationTest { } public void testLoadByShortKeyShardsAllocator() throws IOException { - Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.EVEN_SHARD_COUNT_ALLOCATOR_KEY) + Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "even_shard") // legacy just to make sure we don't barf .build(); - assertAllocatorInstance(build, EvenShardsCountAllocator.class); + assertAllocatorInstance(build, BalancedShardsAllocator.class); build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.BALANCED_ALLOCATOR_KEY).build(); assertAllocatorInstance(build, BalancedShardsAllocator.class); } public void testLoadByClassNameShardsAllocator() throws IOException { - Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "EvenShardsCount").build(); - assertAllocatorInstance(build, EvenShardsCountAllocator.class); + Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "BalancedShards").build(); + assertAllocatorInstance(build, BalancedShardsAllocator.class); build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, - "org.elasticsearch.cluster.routing.allocation.allocator.EvenShardsCountAllocator").build(); - assertAllocatorInstance(build, EvenShardsCountAllocator.class); + "org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator").build(); + assertAllocatorInstance(build, BalancedShardsAllocator.class); } private void assertAllocatorInstance(Settings settings, Class clazz) throws IOException { diff --git a/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java b/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java index 112d8969433..0d131af78ae 100644 --- a/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java +++ b/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java @@ -54,73 +54,73 @@ public class CheckFileCommandTests extends ElasticsearchTestCase { @Test public void testThatCommandLogsErrorMessageOnFail() throws Exception { - executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(captureOutputTerminal, Mode.CHANGE)); + executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE)); assertThat(captureOutputTerminal.getTerminalOutput(), hasItem(containsString("Please ensure that the user account running Elasticsearch has read access to this file"))); } @Test public void testThatCommandLogsNothingWhenPermissionRemains() throws Exception { - executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(captureOutputTerminal, Mode.KEEP)); + executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP)); assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); } @Test public void testThatCommandLogsNothingWhenDisabled() throws Exception { - executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(captureOutputTerminal, Mode.DISABLED)); + executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); } @Test public void testThatCommandLogsNothingIfFilesystemDoesNotSupportPermissions() throws Exception { - executeCommand(jimFsConfigurationWithoutPermissions, new PermissionCheckFileCommand(captureOutputTerminal, Mode.DISABLED)); + executeCommand(jimFsConfigurationWithoutPermissions, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); } @Test public void testThatCommandLogsOwnerChange() throws Exception { - executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(captureOutputTerminal, Mode.CHANGE)); + executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE)); assertThat(captureOutputTerminal.getTerminalOutput(), hasItem(allOf(containsString("Owner of file ["), containsString("] used to be ["), containsString("], but now is [")))); } @Test public void testThatCommandLogsNothingIfOwnerRemainsSame() throws Exception { - executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(captureOutputTerminal, Mode.KEEP)); + executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP)); assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); } @Test public void testThatCommandLogsNothingIfOwnerIsDisabled() throws Exception { - executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(captureOutputTerminal, Mode.DISABLED)); + executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); } @Test public void testThatCommandLogsNothingIfFileSystemDoesNotSupportOwners() throws Exception { - executeCommand(jimFsConfigurationWithoutPermissions, new OwnerCheckFileCommand(captureOutputTerminal, Mode.DISABLED)); + executeCommand(jimFsConfigurationWithoutPermissions, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); } @Test public void testThatCommandLogsIfGroupChanges() throws Exception { - executeCommand(jimFsConfiguration, new GroupCheckFileCommand(captureOutputTerminal, Mode.CHANGE)); + executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE)); assertThat(captureOutputTerminal.getTerminalOutput(), hasItem(allOf(containsString("Group of file ["), containsString("] used to be ["), containsString("], but now is [")))); } @Test public void testThatCommandLogsNothingIfGroupRemainsSame() throws Exception { - executeCommand(jimFsConfiguration, new GroupCheckFileCommand(captureOutputTerminal, Mode.KEEP)); + executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP)); assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); } @Test public void testThatCommandLogsNothingIfGroupIsDisabled() throws Exception { - executeCommand(jimFsConfiguration, new GroupCheckFileCommand(captureOutputTerminal, Mode.DISABLED)); + executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); } @Test public void testThatCommandLogsNothingIfFileSystemDoesNotSupportGroups() throws Exception { - executeCommand(jimFsConfigurationWithoutPermissions, new GroupCheckFileCommand(captureOutputTerminal, Mode.DISABLED)); + executeCommand(jimFsConfigurationWithoutPermissions, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED)); assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0)); } @@ -130,7 +130,10 @@ public class CheckFileCommandTests extends ElasticsearchTestCase { try (FileSystem fs = Jimfs.newFileSystem(configuration)) { Path path = fs.getPath(randomAsciiOfLength(10)); - new CreateFileCommand(captureOutputTerminal, path).execute(ImmutableSettings.EMPTY, new Environment(ImmutableSettings.EMPTY)); + Settings settings = ImmutableSettings.builder() + .put("path.home", createTempDir().toString()) + .build(); + new CreateFileCommand(captureOutputTerminal, path).execute(settings, new Environment(settings)); assertThat(Files.exists(path), is(true)); } @@ -145,7 +148,10 @@ public class CheckFileCommandTests extends ElasticsearchTestCase { Path path = fs.getPath(randomAsciiOfLength(10)); Files.write(path, "anything".getBytes(Charsets.UTF_8)); - new DeleteFileCommand(captureOutputTerminal, path).execute(ImmutableSettings.EMPTY, new Environment(ImmutableSettings.EMPTY)); + Settings settings = ImmutableSettings.builder() + .put("path.home", createTempDir().toString()) + .build(); + new DeleteFileCommand(captureOutputTerminal, path).execute(settings, new Environment(settings)); assertThat(Files.exists(path), is(false)); } @@ -163,16 +169,21 @@ public class CheckFileCommandTests extends ElasticsearchTestCase { protected final Mode mode; protected FileSystem fs; protected Path[] paths; + final Path baseDir; - public AbstractTestCheckFileCommand(Terminal terminal, Mode mode) throws IOException { + public AbstractTestCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { super(terminal); this.mode = mode; + this.baseDir = baseDir; } public CliTool.ExitStatus execute(FileSystem fs) throws Exception { this.fs = fs; this.paths = new Path[] { writePath(fs, "p1", "anything"), writePath(fs, "p2", "anything"), writePath(fs, "p3", "anything") }; - return super.execute(ImmutableSettings.EMPTY, new Environment(ImmutableSettings.EMPTY)); + Settings settings = ImmutableSettings.settingsBuilder() + .put("path.home", baseDir.toString()) + .build(); + return super.execute(ImmutableSettings.EMPTY, new Environment(settings)); } private Path writePath(FileSystem fs, String name, String content) throws IOException { @@ -192,8 +203,8 @@ public class CheckFileCommandTests extends ElasticsearchTestCase { */ class PermissionCheckFileCommand extends AbstractTestCheckFileCommand { - public PermissionCheckFileCommand(Terminal terminal, Mode mode) throws IOException { - super(terminal, mode); + public PermissionCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { + super(baseDir, terminal, mode); } @Override @@ -221,8 +232,8 @@ public class CheckFileCommandTests extends ElasticsearchTestCase { */ class OwnerCheckFileCommand extends AbstractTestCheckFileCommand { - public OwnerCheckFileCommand(Terminal terminal, Mode mode) throws IOException { - super(terminal, mode); + public OwnerCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { + super(baseDir, terminal, mode); } @Override @@ -251,8 +262,8 @@ public class CheckFileCommandTests extends ElasticsearchTestCase { */ class GroupCheckFileCommand extends AbstractTestCheckFileCommand { - public GroupCheckFileCommand(Terminal terminal, Mode mode) throws IOException { - super(terminal, mode); + public GroupCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException { + super(baseDir, terminal, mode); } @Override diff --git a/src/test/java/org/elasticsearch/common/cli/CliToolTestCase.java b/src/test/java/org/elasticsearch/common/cli/CliToolTestCase.java index df633b57c7b..278869388ce 100644 --- a/src/test/java/org/elasticsearch/common/cli/CliToolTestCase.java +++ b/src/test/java/org/elasticsearch/common/cli/CliToolTestCase.java @@ -22,6 +22,8 @@ package org.elasticsearch.common.cli; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.After; +import org.junit.Before; import org.junit.Ignore; import java.io.IOException; @@ -37,6 +39,16 @@ import java.util.Locale; @Ignore public abstract class CliToolTestCase extends ElasticsearchTestCase { + @Before + public void setPathHome() { + System.setProperty("es.default.path.home", createTempDir().toString()); + } + + @After + public void clearPathHome() { + System.clearProperty("es.default.path.home"); + } + protected static String[] args(String command) { if (!Strings.hasLength(command)) { return Strings.EMPTY_ARRAY; diff --git a/src/test/java/org/elasticsearch/common/jna/NativesTests.java b/src/test/java/org/elasticsearch/common/jna/NativesTests.java index 7a53b8d6895..72b973c0db9 100644 --- a/src/test/java/org/elasticsearch/common/jna/NativesTests.java +++ b/src/test/java/org/elasticsearch/common/jna/NativesTests.java @@ -20,83 +20,31 @@ package org.elasticsearch.common.jna; import org.apache.lucene.util.Constants; -import org.elasticsearch.common.jna.Kernel32Library.ConsoleCtrlHandler; import org.elasticsearch.test.ElasticsearchTestCase; -import org.junit.After; -import org.junit.Before; import org.junit.Test; -import java.util.HashMap; -import java.util.Map; - import static org.hamcrest.Matchers.equalTo; public class NativesTests extends ElasticsearchTestCase { - /** - * Those properties are set by the JNA Api and if not ignored, - * lead to tests failure (see AbstractRandomizedTest#IGNORED_INVARIANT_PROPERTIES) - */ - private static final String[] JNA_INVARIANT_PROPERTIES = { - "jna.platform.library.path", - "jnidispatch.path" - }; - - private Map properties = new HashMap<>(); - - @Before - public void saveProperties() { - assumeTrue("Natives can't load libraries from path if security manager is enabled.", System.getSecurityManager() == null); - for (String p : JNA_INVARIANT_PROPERTIES) { - properties.put(p, System.getProperty(p)); - } - } - - @After - public void restoreProperties() { - for (String p : JNA_INVARIANT_PROPERTIES) { - if (properties.get(p) != null) { - System.setProperty(p, properties.get(p)); - } else { - System.clearProperty(p); - } - } - } - @Test - public void testTryMlockall() { - Natives.tryMlockall(); - + public void testMlockall() { if (Constants.WINDOWS) { assertFalse("Memory locking is not available on Windows platforms", Natives.LOCAL_MLOCKALL); } + if (Constants.MAC_OS_X) { + assertFalse("Memory locking is not available on OS X platforms", Natives.LOCAL_MLOCKALL); + } } - + @Test - public void testAddConsoleCtrlHandler() { - ConsoleCtrlHandler handler = new ConsoleCtrlHandler() { - @Override - public boolean handle(int code) { - return false; - } - }; - - Natives.addConsoleCtrlHandler(handler); - + public void testConsoleCtrlHandler() { if (Constants.WINDOWS) { assertNotNull(Kernel32Library.getInstance()); assertThat(Kernel32Library.getInstance().getCallbacks().size(), equalTo(1)); - } else { assertNotNull(Kernel32Library.getInstance()); assertThat(Kernel32Library.getInstance().getCallbacks().size(), equalTo(0)); - - try { - Kernel32Library.getInstance().addConsoleCtrlHandler(handler); - fail("should have thrown an unsupported operation exception"); - } catch (UnsatisfiedLinkError e) { - // UnsatisfiedLinkError is expected - } } } } diff --git a/src/test/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerTests.java b/src/test/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerTests.java index c39c8a5b90f..24df66c77e8 100644 --- a/src/test/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerTests.java +++ b/src/test/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerTests.java @@ -56,6 +56,7 @@ public class Log4jESLoggerTests extends ElasticsearchTestCase { // Need to set custom path.conf so we can use a custom logging.yml file for the test Settings settings = ImmutableSettings.builder() .put("path.conf", configDir.toAbsolutePath()) + .put("path.home", createTempDir().toString()) .build(); LogConfigurator.configure(settings); diff --git a/src/test/java/org/elasticsearch/common/logging/log4j/LoggingConfigurationTests.java b/src/test/java/org/elasticsearch/common/logging/log4j/LoggingConfigurationTests.java index b53b434a492..1010ea734b3 100644 --- a/src/test/java/org/elasticsearch/common/logging/log4j/LoggingConfigurationTests.java +++ b/src/test/java/org/elasticsearch/common/logging/log4j/LoggingConfigurationTests.java @@ -59,6 +59,7 @@ public class LoggingConfigurationTests extends ElasticsearchTestCase { Path configDir = getDataPath("config"); Settings settings = ImmutableSettings.builder() .put("path.conf", configDir.toAbsolutePath()) + .put("path.home", createTempDir().toString()) .build(); LogConfigurator.configure(settings); @@ -87,7 +88,10 @@ public class LoggingConfigurationTests extends ElasticsearchTestCase { Path loggingConf = tmpDir.resolve(loggingConfiguration("json")); Files.write(loggingConf, "{\"json\": \"foo\"}".getBytes(StandardCharsets.UTF_8)); Environment environment = new Environment( - ImmutableSettings.builder().put("path.conf", tmpDir.toAbsolutePath()).build()); + ImmutableSettings.builder() + .put("path.conf", tmpDir.toAbsolutePath()) + .put("path.home", createTempDir().toString()) + .build()); ImmutableSettings.Builder builder = ImmutableSettings.builder(); LogConfigurator.resolveConfig(environment, builder); @@ -102,7 +106,10 @@ public class LoggingConfigurationTests extends ElasticsearchTestCase { Path loggingConf = tmpDir.resolve(loggingConfiguration("properties")); Files.write(loggingConf, "key: value".getBytes(StandardCharsets.UTF_8)); Environment environment = new Environment( - ImmutableSettings.builder().put("path.conf", tmpDir.toAbsolutePath()).build()); + ImmutableSettings.builder() + .put("path.conf", tmpDir.toAbsolutePath()) + .put("path.home", createTempDir().toString()) + .build()); ImmutableSettings.Builder builder = ImmutableSettings.builder(); LogConfigurator.resolveConfig(environment, builder); @@ -119,7 +126,10 @@ public class LoggingConfigurationTests extends ElasticsearchTestCase { Files.write(loggingConf1, "yml: bar".getBytes(StandardCharsets.UTF_8)); Files.write(loggingConf2, "yaml: bar".getBytes(StandardCharsets.UTF_8)); Environment environment = new Environment( - ImmutableSettings.builder().put("path.conf", tmpDir.toAbsolutePath()).build()); + ImmutableSettings.builder() + .put("path.conf", tmpDir.toAbsolutePath()) + .put("path.home", createTempDir().toString()) + .build()); ImmutableSettings.Builder builder = ImmutableSettings.builder(); LogConfigurator.resolveConfig(environment, builder); @@ -135,7 +145,10 @@ public class LoggingConfigurationTests extends ElasticsearchTestCase { Path invalidSuffix = tmpDir.resolve(loggingConfiguration(randomFrom(LogConfigurator.ALLOWED_SUFFIXES)) + randomInvalidSuffix()); Files.write(invalidSuffix, "yml: bar".getBytes(StandardCharsets.UTF_8)); Environment environment = new Environment( - ImmutableSettings.builder().put("path.conf", invalidSuffix.toAbsolutePath()).build()); + ImmutableSettings.builder() + .put("path.conf", invalidSuffix.toAbsolutePath()) + .put("path.home", createTempDir().toString()) + .build()); ImmutableSettings.Builder builder = ImmutableSettings.builder(); LogConfigurator.resolveConfig(environment, builder); diff --git a/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java new file mode 100644 index 00000000000..06b958d47aa --- /dev/null +++ b/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import com.google.common.collect.ImmutableSet; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.test.ElasticsearchAllocationTestCase; +import org.junit.Test; + +import java.util.*; + +import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; +import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; +import static org.hamcrest.Matchers.equalTo; + +/** + * Test IndexMetaState for master and data only nodes return correct list of indices to write + * There are many parameters: + * - meta state is not in memory + * - meta state is in memory with old version/ new version + * - meta state is in memory with new version + * - version changed in cluster state event/ no change + * - node is data only node + * - node is master eligible + * for data only nodes: shard initializing on shard + */ +public class GatewayMetaStateTests extends ElasticsearchAllocationTestCase { + + ClusterChangedEvent generateEvent(boolean initializing, boolean versionChanged, boolean masterEligible) { + //ridiculous settings to make sure we don't run into uninitialized because fo default + AllocationService strategy = createAllocationService(settingsBuilder() + .put("cluster.routing.allocation.concurrent_recoveries", 100) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) + .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) + .build()); + ClusterState newClusterState, previousClusterState; + MetaData metaDataOldClusterState = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2)) + .build(); + + RoutingTable routingTableOldClusterState = RoutingTable.builder() + .addAsNew(metaDataOldClusterState.index("test")) + .build(); + + // assign all shards + ClusterState init = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) + .metaData(metaDataOldClusterState) + .routingTable(routingTableOldClusterState) + .nodes(generateDiscoveryNodes(masterEligible)) + .build(); + // new cluster state will have initializing shards on node 1 + RoutingTable routingTableNewClusterState = strategy.reroute(init).routingTable(); + if (initializing == false) { + // pretend all initialized, nothing happened + ClusterState temp = ClusterState.builder(init).routingTable(routingTableNewClusterState).metaData(metaDataOldClusterState).build(); + routingTableNewClusterState = strategy.applyStartedShards(temp, temp.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable(); + routingTableOldClusterState = routingTableNewClusterState; + + } else { + // nothing to do, we have one routing table with unassigned and one with initializing + } + + // create new meta data either with version changed or not + MetaData metaDataNewClusterState = MetaData.builder() + .put(init.metaData().index("test"), versionChanged) + .build(); + + + // create the cluster states with meta data and routing tables as computed before + previousClusterState = ClusterState.builder(init) + .metaData(metaDataOldClusterState) + .routingTable(routingTableOldClusterState) + .nodes(generateDiscoveryNodes(masterEligible)) + .build(); + newClusterState = ClusterState.builder(previousClusterState).routingTable(routingTableNewClusterState).metaData(metaDataNewClusterState).version(previousClusterState.getVersion() + 1).build(); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newClusterState, previousClusterState); + assertThat(event.state().version(), equalTo(event.previousState().version() + 1)); + return event; + } + + ClusterChangedEvent generateCloseEvent(boolean masterEligible) { + //ridiculous settings to make sure we don't run into uninitialized because fo default + AllocationService strategy = createAllocationService(settingsBuilder() + .put("cluster.routing.allocation.concurrent_recoveries", 100) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) + .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) + .build()); + ClusterState newClusterState, previousClusterState; + MetaData metaDataIndexCreated = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2)) + .build(); + + RoutingTable routingTableIndexCreated = RoutingTable.builder() + .addAsNew(metaDataIndexCreated.index("test")) + .build(); + + // assign all shards + ClusterState init = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) + .metaData(metaDataIndexCreated) + .routingTable(routingTableIndexCreated) + .nodes(generateDiscoveryNodes(masterEligible)) + .build(); + RoutingTable routingTableInitializing = strategy.reroute(init).routingTable(); + ClusterState temp = ClusterState.builder(init).routingTable(routingTableInitializing).build(); + RoutingTable routingTableStarted = strategy.applyStartedShards(temp, temp.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable(); + + // create new meta data either with version changed or not + MetaData metaDataStarted = MetaData.builder() + .put(init.metaData().index("test"), true) + .build(); + + // create the cluster states with meta data and routing tables as computed before + MetaData metaDataClosed = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).state(IndexMetaData.State.CLOSE).numberOfShards(5).numberOfReplicas(2)).version(metaDataStarted.version() + 1) + .build(); + previousClusterState = ClusterState.builder(init) + .metaData(metaDataStarted) + .routingTable(routingTableStarted) + .nodes(generateDiscoveryNodes(masterEligible)) + .build(); + newClusterState = ClusterState.builder(previousClusterState) + .routingTable(routingTableIndexCreated) + .metaData(metaDataClosed) + .version(previousClusterState.getVersion() + 1).build(); + + ClusterChangedEvent event = new ClusterChangedEvent("test", newClusterState, previousClusterState); + assertThat(event.state().version(), equalTo(event.previousState().version() + 1)); + return event; + } + + private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) { + Map masterNodeAttributes = new HashMap<>(); + masterNodeAttributes.put("master", "true"); + masterNodeAttributes.put("data", "true"); + Map dataNodeAttributes = new HashMap<>(); + dataNodeAttributes.put("master", "false"); + dataNodeAttributes.put("data", "true"); + return DiscoveryNodes.builder().put(newNode("node1", masterEligible ? masterNodeAttributes : dataNodeAttributes)).put(newNode("master_node", masterNodeAttributes)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node"); + } + + public void assertState(ClusterChangedEvent event, + boolean stateInMemory, + boolean expectMetaData) throws Exception { + MetaData inMemoryMetaData = null; + ImmutableSet oldIndicesList = ImmutableSet.of(); + if (stateInMemory) { + inMemoryMetaData = event.previousState().metaData(); + ImmutableSet.Builder relevantIndices = ImmutableSet.builder(); + oldIndicesList = relevantIndices.addAll(GatewayMetaState.getRelevantIndices(event.previousState(), oldIndicesList)).build(); + } + Set newIndicesList = GatewayMetaState.getRelevantIndices(event.state(), oldIndicesList); + // third, get the actual write info + Iterator indices = GatewayMetaState.resolveStatesToBeWritten(oldIndicesList, newIndicesList, inMemoryMetaData, event.state().metaData()).iterator(); + + if (expectMetaData) { + assertThat(indices.hasNext(), equalTo(true)); + assertThat(indices.next().getNewMetaData().index(), equalTo("test")); + assertThat(indices.hasNext(), equalTo(false)); + } else { + assertThat(indices.hasNext(), equalTo(false)); + } + } + + @Test + public void testVersionChangeIsAlwaysWritten() throws Exception { + // test that version changes are always written + boolean initializing = randomBoolean(); + boolean versionChanged = true; + boolean stateInMemory = randomBoolean(); + boolean masterEligible = randomBoolean(); + boolean expectMetaData = true; + ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); + assertState(event, stateInMemory, expectMetaData); + } + + @Test + public void testNewShardsAlwaysWritten() throws Exception { + // make sure new shards on data only node always written + boolean initializing = true; + boolean versionChanged = randomBoolean(); + boolean stateInMemory = randomBoolean(); + boolean masterEligible = false; + boolean expectMetaData = true; + ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); + assertState(event, stateInMemory, expectMetaData); + } + + @Test + public void testAllUpToDateNothingWritten() throws Exception { + // make sure state is not written again if we wrote already + boolean initializing = false; + boolean versionChanged = false; + boolean stateInMemory = true; + boolean masterEligible = randomBoolean(); + boolean expectMetaData = false; + ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); + assertState(event, stateInMemory, expectMetaData); + } + + @Test + public void testNoWriteIfNothingChanged() throws Exception { + boolean initializing = false; + boolean versionChanged = false; + boolean stateInMemory = true; + boolean masterEligible = randomBoolean(); + boolean expectMetaData = false; + ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible); + ClusterChangedEvent newEventWithNothingChanged = new ClusterChangedEvent("test cluster state", event.state(), event.state()); + assertState(newEventWithNothingChanged, stateInMemory, expectMetaData); + } + + @Test + public void testWriteClosedIndex() throws Exception { + // test that the closing of an index is written also on data only node + boolean masterEligible = randomBoolean(); + boolean expectMetaData = true; + boolean stateInMemory = true; + ClusterChangedEvent event = generateCloseEvent(masterEligible); + assertState(event, stateInMemory, expectMetaData); + } +} diff --git a/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java b/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java new file mode 100644 index 00000000000..f8ba42d31c5 --- /dev/null +++ b/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java @@ -0,0 +1,358 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.base.Predicate; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.elasticsearch.test.InternalTestCluster; +import org.junit.Test; + +import java.io.IOException; +import java.util.LinkedHashMap; + +import static org.elasticsearch.client.Requests.clusterHealthRequest; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +/** + * + */ +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) +public class MetaDataWriteDataNodesTests extends ElasticsearchIntegrationTest { + + @Test + public void testMetaWrittenAlsoOnDataNode() throws Exception { + // this test checks that index state is written on data only nodes + String masterNodeName = startMasterNode(); + String redNode = startDataNode("red"); + assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0))); + index("test", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + waitForConcreteMappingsOnAll("test", "doc", "text"); + ensureGreen("test"); + assertIndexInMetaState(redNode, "test"); + assertIndexInMetaState(masterNodeName, "test"); + //stop master node and start again with an empty data folder + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + String newMasterNode = startMasterNode(); + ensureGreen("test"); + // wait for mapping also on master becasue then we can be sure the state was written + waitForConcreteMappingsOnAll("test", "doc", "text"); + // check for meta data + assertIndexInMetaState(redNode, "test"); + assertIndexInMetaState(newMasterNode, "test"); + // check if index and doc is still there + ensureGreen("test"); + assertTrue(client().prepareGet("test", "doc", "1").get().isExists()); + } + + @Test + public void testMetaWrittenOnlyForIndicesOnNodesThatHaveAShard() throws Exception { + // this test checks that the index state is only written to a data only node if they have a shard of that index allocated on the node + String masterNode = startMasterNode(); + String blueNode = startDataNode("blue"); + String redNode = startDataNode("red"); + + assertAcked(prepareCreate("blue_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue"))); + index("blue_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); + index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + ensureGreen(); + waitForConcreteMappingsOnAll("blue_index", "doc", "text"); + waitForConcreteMappingsOnAll("red_index", "doc", "text"); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexNotInMetaState(redNode, "blue_index"); + assertIndexInMetaState(blueNode, "blue_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + assertIndexInMetaState(masterNode, "blue_index"); + + // not the index state for blue_index should only be written on blue_node and the for red_index only on red_node + // we restart red node and master but with empty data folders + stopNode(redNode); + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + redNode = startDataNode("red"); + + ensureGreen(); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(blueNode, "blue_index"); + assertIndexNotInMetaState(redNode, "red_index"); + assertIndexNotInMetaState(redNode, "blue_index"); + assertIndexNotInMetaState(masterNode, "red_index"); + assertIndexInMetaState(masterNode, "blue_index"); + // check that blue index is still there + assertFalse(client().admin().indices().prepareExists("red_index").get().isExists()); + assertTrue(client().prepareGet("blue_index", "doc", "1").get().isExists()); + // red index should be gone + // if the blue node had stored the index state then cluster health would be red and red_index would exist + assertFalse(client().admin().indices().prepareExists("red_index").get().isExists()); + + } + + @Test + public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception { + // this test checks that the index state is removed from a data only node once all shards have been allocated away from it + String masterNode = startMasterNode(); + String blueNode = startDataNode("blue"); + String redNode = startDataNode("red"); + + // create blue_index on blue_node and same for red + client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("3")).get(); + assertAcked(prepareCreate("blue_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue"))); + index("blue_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); + index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + + ensureGreen(); + assertIndexNotInMetaState(redNode, "blue_index"); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(blueNode, "blue_index"); + assertIndexInMetaState(masterNode, "red_index"); + assertIndexInMetaState(masterNode, "blue_index"); + + // now relocate blue_index to red_node and red_index to blue_node + logger.debug("relocating indices..."); + client().admin().indices().prepareUpdateSettings("blue_index").setSettings(ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")).get(); + client().admin().indices().prepareUpdateSettings("red_index").setSettings(ImmutableSettings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue")).get(); + client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).get(); + ensureGreen(); + assertIndexNotInMetaState(redNode, "red_index"); + assertIndexNotInMetaState(blueNode, "blue_index"); + assertIndexInMetaState(redNode, "blue_index"); + assertIndexInMetaState(blueNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + assertIndexInMetaState(masterNode, "blue_index"); + waitForConcreteMappingsOnAll("blue_index", "doc", "text"); + waitForConcreteMappingsOnAll("red_index", "doc", "text"); + + //at this point the blue_index is on red node and the red_index on blue node + // now, when we start red and master node again but without data folder, the red index should be gone but the blue index should initialize fine + stopNode(redNode); + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + redNode = startDataNode("red"); + ensureGreen(); + assertIndexNotInMetaState(redNode, "blue_index"); + assertIndexNotInMetaState(blueNode, "blue_index"); + assertIndexNotInMetaState(redNode, "red_index"); + assertIndexInMetaState(blueNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + assertIndexNotInMetaState(masterNode, "blue_index"); + assertTrue(client().prepareGet("red_index", "doc", "1").get().isExists()); + // if the red_node had stored the index state then cluster health would be red and blue_index would exist + assertFalse(client().admin().indices().prepareExists("blue_index").get().isExists()); + } + + @Test + public void testMetaWrittenWhenIndexIsClosed() throws Exception { + String masterNode = startMasterNode(); + String redNodeDataPath = createTempDir().toString(); + String redNode = startDataNode("red", redNodeDataPath); + String blueNode = startDataNode("blue"); + // create red_index on red_node and same for red + client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("3")).get(); + assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); + index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + + ensureGreen(); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + + waitForConcreteMappingsOnAll("red_index", "doc", "text"); + client().admin().indices().prepareClose("red_index").get(); + // close the index + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); + + // restart master with empty data folder and maybe red node + boolean restartRedNode = randomBoolean(); + //at this point the red_index on red node + if (restartRedNode) { + stopNode(redNode); + } + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + if (restartRedNode) { + redNode = startDataNode("red", redNodeDataPath); + } + + ensureGreen("red_index"); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); + + // open the index again + client().admin().indices().prepareOpen("red_index").get(); + clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.OPEN.name())); + // restart again + ensureGreen(); + if (restartRedNode) { + stopNode(redNode); + } + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + if (restartRedNode) { + redNode = startDataNode("red", redNodeDataPath); + } + ensureGreen("red_index"); + assertIndexNotInMetaState(blueNode, "red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.OPEN.name())); + assertTrue(client().prepareGet("red_index", "doc", "1").get().isExists()); + } + + @Test + public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { + String masterNode = startMasterNode(); + String redNodeDataPath = createTempDir().toString(); + String redNode = startDataNode("red", redNodeDataPath); + // create red_index on red_node and same for red + client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2")).get(); + assertAcked(prepareCreate("red_index").setSettings(ImmutableSettings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red"))); + index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); + + logger.info("--> wait for green red_index"); + ensureGreen(); + logger.info("--> wait for meta state written for red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + + waitForConcreteMappingsOnAll("red_index", "doc", "text"); + + logger.info("--> close red_index"); + client().admin().indices().prepareClose("red_index").get(); + // close the index + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); + + logger.info("--> restart red node"); + stopNode(redNode); + redNode = startDataNode("red", redNodeDataPath); + client().admin().indices().preparePutMapping("red_index").setType("doc").setSource(jsonBuilder().startObject() + .startObject("properties") + .startObject("integer_field") + .field("type", "integer") + .endObject() + .endObject() + .endObject()).get(); + + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get(); + assertNotNull(((LinkedHashMap) (getMappingsResponse.getMappings().get("red_index").get("doc").getSourceAsMap().get("properties"))).get("integer_field")); + // restart master with empty data folder and maybe red node + ((InternalTestCluster) cluster()).stopCurrentMasterNode(); + masterNode = startMasterNode(); + + ensureGreen("red_index"); + assertIndexInMetaState(redNode, "red_index"); + assertIndexInMetaState(masterNode, "red_index"); + clusterStateResponse = client().admin().cluster().prepareState().get(); + assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name())); + getMappingsResponse = client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get(); + assertNotNull(((LinkedHashMap) (getMappingsResponse.getMappings().get("red_index").get("doc").getSourceAsMap().get("properties"))).get("integer_field")); + + } + + private String startDataNode(String color) { + return startDataNode(color, createTempDir().toString()); + } + + private String startDataNode(String color, String newDataPath) { + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder() + .put("node.data", true) + .put("node.master", false) + .put("node.color", color) + .put("path.data", newDataPath); + return internalCluster().startNode(settingsBuilder.build()); + } + + private String startMasterNode() { + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder() + .put("node.data", false) + .put("node.master", true) + .put("path.data", createTempDir().toString()); + return internalCluster().startNode(settingsBuilder.build()); + } + + private void stopNode(String name) throws IOException { + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(name)); + } + + protected void assertIndexNotInMetaState(String nodeName, String indexName) throws Exception { + assertMetaState(nodeName, indexName, false); + } + + protected void assertIndexInMetaState(String nodeName, String indexName) throws Exception { + assertMetaState(nodeName, indexName, true); + } + + + private void assertMetaState(final String nodeName, final String indexName, final boolean shouldBe) throws Exception { + awaitBusy(new Predicate() { + @Override + public boolean apply(Object o) { + logger.info("checking if meta state exists..."); + try { + return shouldBe == metaStateExists(nodeName, indexName); + } catch (Throwable t) { + logger.info("failed to load meta state", t); + // TODO: loading of meta state fails rarely if the state is deleted while we try to load it + // this here is a hack, would be much better to use for example a WatchService + return false; + } + } + }); + boolean inMetaSate = metaStateExists(nodeName, indexName); + if (shouldBe) { + assertTrue("expected " + indexName + " in meta state of node " + nodeName, inMetaSate); + } else { + assertFalse("expected " + indexName + " to not be in meta state of node " + nodeName, inMetaSate); + } + } + + private boolean metaStateExists(String nodeName, String indexName) throws Exception { + GatewayMetaState nodeMetaState = ((InternalTestCluster) cluster()).getInstance(GatewayMetaState.class, nodeName); + MetaData nodeMetaData = null; + nodeMetaData = nodeMetaState.loadMetaState(); + ImmutableOpenMap indices = nodeMetaData.getIndices(); + boolean inMetaSate = false; + for (ObjectObjectCursor index : indices) { + inMetaSate = inMetaSate || index.key.equals(indexName); + } + return inMetaSate; + } +} diff --git a/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java b/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java index c0a63d22922..17b095d7e26 100644 --- a/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java +++ b/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java @@ -19,30 +19,41 @@ package org.elasticsearch.index; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShadowIndexShard; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.*; import org.junit.Test; +import java.io.IOException; import java.nio.file.Path; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -309,6 +320,174 @@ public class IndexWithShadowReplicasTests extends ElasticsearchIntegrationTest { assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar")); } + @Test + public void testPrimaryRelocationWithConcurrentIndexing() throws Exception { + Settings nodeSettings = ImmutableSettings.builder() + .put("node.add_id_to_custom_path", false) + .put("node.enable_custom_paths", true) + .build(); + + String node1 = internalCluster().startNode(nodeSettings); + Path dataPath = createTempDir(); + final String IDX = "test"; + + Settings idxSettings = ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) + .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) + .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) + .build(); + + prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string").get(); + ensureYellow(IDX); + // Node1 has the primary, now node2 has the replica + String node2 = internalCluster().startNode(nodeSettings); + ensureGreen(IDX); + flushAndRefresh(IDX); + String node3 = internalCluster().startNode(nodeSettings); + final AtomicInteger counter = new AtomicInteger(0); + final CountDownLatch started = new CountDownLatch(1); + + final int numPhase1Docs = scaledRandomIntBetween(25, 200); + final int numPhase2Docs = scaledRandomIntBetween(25, 200); + final CountDownLatch phase1finished = new CountDownLatch(1); + final CountDownLatch phase2finished = new CountDownLatch(1); + + Thread thread = new Thread() { + @Override + public void run() { + started.countDown(); + while (counter.get() < (numPhase1Docs + numPhase2Docs)) { + final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", + Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get(); + assertTrue(indexResponse.isCreated()); + final int docCount = counter.get(); + if (docCount == numPhase1Docs) { + phase1finished.countDown(); + } + } + logger.info("--> stopping indexing thread"); + phase2finished.countDown(); + } + }; + thread.start(); + started.await(); + phase1finished.await(); // wait for a certain number of documents to be indexed + logger.info("--> excluding {} from allocation", node1); + // now prevent primary from being allocated on node 1 move to node_3 + Settings build = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", node1).build(); + client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); + // wait for more documents to be indexed post-recovery, also waits for + // indexing thread to stop + phase2finished.await(); + ensureGreen(IDX); + thread.join(); + logger.info("--> performing query"); + flushAndRefresh(); + + SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); + assertHitCount(resp, counter.get()); + assertHitCount(resp, numPhase1Docs + numPhase2Docs); + } + + @Test + public void testPrimaryRelocationWhereRecoveryFails() throws Exception { + Settings nodeSettings = ImmutableSettings.builder() + .put("node.add_id_to_custom_path", false) + .put("node.enable_custom_paths", true) + .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName()) + .build(); + + String node1 = internalCluster().startNode(nodeSettings); + Path dataPath = createTempDir(); + final String IDX = "test"; + + Settings idxSettings = ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) + .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) + .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) + .build(); + + prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string").get(); + ensureYellow(IDX); + // Node1 has the primary, now node2 has the replica + String node2 = internalCluster().startNode(nodeSettings); + ensureGreen(IDX); + flushAndRefresh(IDX); + String node3 = internalCluster().startNode(nodeSettings); + final AtomicInteger counter = new AtomicInteger(0); + final CountDownLatch started = new CountDownLatch(1); + + final int numPhase1Docs = scaledRandomIntBetween(25, 200); + final int numPhase2Docs = scaledRandomIntBetween(25, 200); + final int numPhase3Docs = scaledRandomIntBetween(25, 200); + final CountDownLatch phase1finished = new CountDownLatch(1); + final CountDownLatch phase2finished = new CountDownLatch(1); + final CountDownLatch phase3finished = new CountDownLatch(1); + + final AtomicBoolean keepFailing = new AtomicBoolean(true); + + MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, node1)); + mockTransportService.addDelegate(internalCluster().getInstance(Discovery.class, node3).localNode(), + new MockTransportService.DelegateTransport(mockTransportService.original()) { + + @Override + public void sendRequest(DiscoveryNode node, long requestId, String action, + TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + if (keepFailing.get() && action.equals(RecoveryTarget.Actions.TRANSLOG_OPS)) { + logger.info("--> failing translog ops"); + throw new ElasticsearchException("failing on purpose"); + } + super.sendRequest(node, requestId, action, request, options); + } + }); + + Thread thread = new Thread() { + @Override + public void run() { + started.countDown(); + while (counter.get() < (numPhase1Docs + numPhase2Docs + numPhase3Docs)) { + final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", + Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get(); + assertTrue(indexResponse.isCreated()); + final int docCount = counter.get(); + if (docCount == numPhase1Docs) { + phase1finished.countDown(); + } else if (docCount == (numPhase1Docs + numPhase2Docs)) { + phase2finished.countDown(); + } + } + logger.info("--> stopping indexing thread"); + phase3finished.countDown(); + } + }; + thread.start(); + started.await(); + phase1finished.await(); // wait for a certain number of documents to be indexed + logger.info("--> excluding {} from allocation", node1); + // now prevent primary from being allocated on node 1 move to node_3 + Settings build = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", node1).build(); + client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); + // wait for more documents to be indexed post-recovery, also waits for + // indexing thread to stop + phase2finished.await(); + // stop failing + keepFailing.set(false); + // wait for more docs to be indexed + phase3finished.await(); + ensureGreen(IDX); + thread.join(); + logger.info("--> performing query"); + flushAndRefresh(); + + SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); + assertHitCount(resp, counter.get()); + } + @Test public void testIndexWithShadowReplicasCleansUp() throws Exception { Settings nodeSettings = ImmutableSettings.builder() @@ -448,4 +627,46 @@ public class IndexWithShadowReplicasTests extends ElasticsearchIntegrationTest { assertThat(hits[2].field("foo").getValue().toString(), equalTo("eggplant")); assertThat(hits[3].field("foo").getValue().toString(), equalTo("foo")); } + + @Test + public void testIndexOnSharedFSRecoversToAnyNode() throws Exception { + Settings nodeSettings = ImmutableSettings.builder() + .put("node.add_id_to_custom_path", false) + .put("node.enable_custom_paths", true) + .build(); + + internalCluster().startNode(nodeSettings); + Path dataPath = createTempDir(); + String IDX = "test"; + + Settings idxSettings = ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) + .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) + .put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true) + .build(); + + // only one node, so all primaries will end up on node1 + prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string,index=not_analyzed").get(); + ensureGreen(IDX); + + // Index some documents + client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get(); + client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); + client().prepareIndex(IDX, "doc", "3").setSource("foo", "baz").get(); + client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get(); + + // start a second node + internalCluster().startNode(nodeSettings); + + // node1 is master, stop that one, since we only have primaries, + // usually this would mean data loss, but not on shared fs! + internalCluster().stopCurrentMasterNode(); + + ensureGreen(IDX); + refresh(); + SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).addFieldDataField("foo").addSort("foo", SortOrder.ASC).get(); + assertHitCount(resp, 4); + } } diff --git a/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java index 04413f5d41e..70f12cdd401 100644 --- a/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java @@ -33,6 +33,7 @@ public class ASCIIFoldingTokenFilterFactoryTests extends ElasticsearchTokenStrea @Test public void testDefault() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_ascii_folding.type", "asciifolding") .build()); TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_ascii_folding"); @@ -46,6 +47,7 @@ public class ASCIIFoldingTokenFilterFactoryTests extends ElasticsearchTokenStrea @Test public void testPreserveOriginal() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_ascii_folding.type", "asciifolding") .put("index.analysis.filter.my_ascii_folding.preserve_original", true) .build()); diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java b/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java index 81f06ad79d7..ffe1d1b37dc 100644 --- a/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java @@ -77,8 +77,11 @@ public class AnalysisModuleTests extends ElasticsearchTestCase { return injector.getInstance(AnalysisService.class); } - private static Settings loadFromClasspath(String path) { - return settingsBuilder().loadFromClasspath(path).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + private Settings loadFromClasspath(String path) { + return settingsBuilder().loadFromClasspath(path) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("path.home", createTempDir().toString()) + .build(); } @@ -103,8 +106,11 @@ public class AnalysisModuleTests extends ElasticsearchTestCase { @Test public void testVersionedAnalyzers() throws Exception { - Settings settings2 = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml") - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build(); + Settings settings2 = settingsBuilder() + .loadFromClasspath("org/elasticsearch/index/analysis/test1.yml") + .put("path.home", createTempDir().toString()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0) + .build(); AnalysisService analysisService2 = getAnalysisService(settings2); // indicesanalysisservice always has the current version @@ -121,7 +127,10 @@ public class AnalysisModuleTests extends ElasticsearchTestCase { } private void assertTokenFilter(String name, Class clazz) throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); + Settings settings = ImmutableSettings.settingsBuilder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("path.home", createTempDir().toString()).build(); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter(name); Tokenizer tokenizer = new WhitespaceTokenizer(); tokenizer.setReader(new StringReader("foo bar")); @@ -152,6 +161,12 @@ public class AnalysisModuleTests extends ElasticsearchTestCase { // html = (HtmlStripCharFilterFactory) custom2.charFilters()[1]; // assertThat(html.readAheadLimit(), equalTo(1024)); + // verify position offset gap + analyzer = analysisService.analyzer("custom6").analyzer(); + assertThat(analyzer, instanceOf(CustomAnalyzer.class)); + CustomAnalyzer custom6 = (CustomAnalyzer) analyzer; + assertThat(custom6.getPositionIncrementGap("any_string"), equalTo(256)); + // verify characters mapping analyzer = analysisService.analyzer("custom5").analyzer(); assertThat(analyzer, instanceOf(CustomAnalyzer.class)); @@ -199,11 +214,14 @@ public class AnalysisModuleTests extends ElasticsearchTestCase { @Test public void testWordListPath() throws Exception { - Environment env = new Environment(ImmutableSettings.Builder.EMPTY_SETTINGS); + Settings settings = ImmutableSettings.builder() + .put("path.home", createTempDir().toString()) + .build(); + Environment env = new Environment(settings); String[] words = new String[]{"donau", "dampf", "schiff", "spargel", "creme", "suppe"}; Path wordListFile = generateWordList(words); - Settings settings = settingsBuilder().loadFromSource("index: \n word_list_path: " + wordListFile.toAbsolutePath()).build(); + settings = settingsBuilder().loadFromSource("index: \n word_list_path: " + wordListFile.toAbsolutePath()).build(); Set wordList = Analysis.getWordSet(env, settings, "index.word_list"); MatcherAssert.assertThat(wordList.size(), equalTo(6)); diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java b/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java index 5a853c8f738..ea52f71e94a 100644 --- a/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java +++ b/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java @@ -34,11 +34,15 @@ import org.elasticsearch.index.settings.IndexSettingsModule; import org.elasticsearch.indices.analysis.IndicesAnalysisModule; import org.elasticsearch.indices.analysis.IndicesAnalysisService; +import java.nio.file.Path; + public class AnalysisTestsHelper { - public static AnalysisService createAnalysisServiceFromClassPath(String resource) { + public static AnalysisService createAnalysisServiceFromClassPath(Path baseDir, String resource) { Settings settings = ImmutableSettings.settingsBuilder() - .loadFromClasspath(resource).build(); + .loadFromClasspath(resource) + .put("path.home", baseDir.toString()) + .build(); return createAnalysisServiceFromSettings(settings); } diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java b/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java index 880b9d80489..7254268eb40 100644 --- a/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java @@ -45,6 +45,7 @@ public class AnalyzerBackwardsCompatTests extends ElasticsearchTokenStreamTestCa builder.put(SETTING_VERSION_CREATED, version); } builder.put("index.analysis.analyzer.foo.type", type); + builder.put("path.home", createTempDir().toString()); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build()); NamedAnalyzer analyzer = analysisService.analyzer("foo"); if (version.onOrAfter(noStopwordVersion)) { diff --git a/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java index b0bdda19be0..bfa4c5ed596 100644 --- a/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java @@ -33,7 +33,7 @@ public class CJKFilterFactoryTests extends ElasticsearchTokenStreamTestCase { @Test public void testDefault() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_bigram"); String source = "多くの学生が試験に落ちた。"; String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" }; @@ -44,7 +44,7 @@ public class CJKFilterFactoryTests extends ElasticsearchTokenStreamTestCase { @Test public void testNoFlags() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_no_flags"); String source = "多くの学生が試験に落ちた。"; String[] expected = new String[]{"多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" }; @@ -55,7 +55,7 @@ public class CJKFilterFactoryTests extends ElasticsearchTokenStreamTestCase { @Test public void testHanOnly() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_only"); String source = "多くの学生が試験に落ちた。"; String[] expected = new String[]{"多", "く", "の", "学生", "が", "試験", "に", "落", "ち", "た" }; @@ -66,7 +66,7 @@ public class CJKFilterFactoryTests extends ElasticsearchTokenStreamTestCase { @Test public void testHanUnigramOnly() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_unigram_only"); String source = "多くの学生が試験に落ちた。"; String[] expected = new String[]{"多", "く", "の", "学", "学生", "生", "が", "試", "試験", "験", "に", "落", "ち", "た" }; diff --git a/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java index e2715f690c8..c592579c801 100644 --- a/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.ModulesBuilder; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; @@ -49,6 +50,7 @@ public class CharFilterTests extends ElasticsearchTokenStreamTestCase { .putArray("index.analysis.char_filter.my_mapping.mappings", "ph=>f", "qu=>q") .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard") .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping") + .put("path.home", createTempDir().toString()) .build(); Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector(); Injector injector = new ModulesBuilder().add( @@ -74,6 +76,7 @@ public class CharFilterTests extends ElasticsearchTokenStreamTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard") .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "html_strip") + .put("path.home", createTempDir().toString()) .build(); Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector(); Injector injector = new ModulesBuilder().add( diff --git a/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java b/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java index a3e7552f50b..37b3bfc21a7 100644 --- a/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java @@ -110,10 +110,18 @@ public class CompoundAnalysisTests extends ElasticsearchTestCase { } private Settings getJsonSettings() { - return settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.json").put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + return settingsBuilder() + .loadFromClasspath("org/elasticsearch/index/analysis/test1.json") + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("path.home", createTempDir().toString()) + .build(); } private Settings getYamlSettings() { - return settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/test1.yml").put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + return settingsBuilder() + .loadFromClasspath("org/elasticsearch/index/analysis/test1.yml") + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("path.home", createTempDir().toString()) + .build(); } } diff --git a/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java index cb79991b90c..298910c9216 100644 --- a/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java @@ -33,6 +33,7 @@ public class HunspellTokenFilterFactoryTests extends ElasticsearchTestCase { @Test public void testDedup() throws IOException { Settings settings = settingsBuilder() + .put("path.home", createTempDir().toString()) .put("path.conf", getDataPath("/indices/analyze/conf_dir")) .put("index.analysis.filter.en_US.type", "hunspell") .put("index.analysis.filter.en_US.locale", "en_US") @@ -45,6 +46,7 @@ public class HunspellTokenFilterFactoryTests extends ElasticsearchTestCase { assertThat(hunspellTokenFilter.dedup(), is(true)); settings = settingsBuilder() + .put("path.home", createTempDir().toString()) .put("path.conf", getDataPath("/indices/analyze/conf_dir")) .put("index.analysis.filter.en_US.type", "hunspell") .put("index.analysis.filter.en_US.dedup", false) diff --git a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java index 78f2bd5077c..fb708a40c26 100644 --- a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java @@ -40,7 +40,7 @@ public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase { @Test public void testLoadWithoutSettings() { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("keep"); Assert.assertNull(tokenFilter); } @@ -48,6 +48,7 @@ public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase { @Test public void testLoadOverConfiguredSettings() { Settings settings = ImmutableSettings.settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.broken_keep_filter.type", "keep") .put("index.analysis.filter.broken_keep_filter.keep_words_path", "does/not/exists.txt") .put("index.analysis.filter.broken_keep_filter.keep_words", "[\"Hello\", \"worlD\"]") @@ -63,6 +64,7 @@ public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase { @Test public void testKeepWordsPathSettings() { Settings settings = ImmutableSettings.settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.non_broken_keep_filter.type", "keep") .put("index.analysis.filter.non_broken_keep_filter.keep_words_path", "does/not/exists.txt") .build(); @@ -89,7 +91,7 @@ public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase { @Test public void testCaseInsensitiveMapping() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_keep_filter"); assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class)); String source = "hello small world"; @@ -101,7 +103,7 @@ public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase { @Test public void testCaseSensitiveMapping() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_case_sensitive_keep_filter"); assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class)); String source = "Hello small world"; diff --git a/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java index 95c942e19fd..c296c875987 100644 --- a/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java @@ -36,6 +36,7 @@ public class KeepTypesFilterFactoryTests extends ElasticsearchTokenStreamTestCas @Test public void testKeepTypes() throws IOException { Settings settings = ImmutableSettings.settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.keep_numbers.type", "keep_types") .putArray("index.analysis.filter.keep_numbers.types", new String[] {"", ""}) .build(); diff --git a/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java index 5473c635980..0428e66263d 100644 --- a/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java @@ -33,7 +33,10 @@ public class LimitTokenCountFilterFactoryTests extends ElasticsearchTokenStreamT @Test public void testDefault() throws IOException { - Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_default.type", "limit").build(); + Settings settings = ImmutableSettings.settingsBuilder() + .put("index.analysis.filter.limit_default.type", "limit") + .put("path.home", createTempDir().toString()) + .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); { TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_default"); @@ -56,8 +59,11 @@ public class LimitTokenCountFilterFactoryTests extends ElasticsearchTokenStreamT @Test public void testSettings() throws IOException { { - Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_1.type", "limit") - .put("index.analysis.filter.limit_1.max_token_count", 3).put("index.analysis.filter.limit_1.consume_all_tokens", true) + Settings settings = ImmutableSettings.settingsBuilder() + .put("index.analysis.filter.limit_1.type", "limit") + .put("index.analysis.filter.limit_1.max_token_count", 3) + .put("index.analysis.filter.limit_1.consume_all_tokens", true) + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1"); @@ -68,8 +74,11 @@ public class LimitTokenCountFilterFactoryTests extends ElasticsearchTokenStreamT assertTokenStreamContents(tokenFilter.create(tokenizer), expected); } { - Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_1.type", "limit") - .put("index.analysis.filter.limit_1.max_token_count", 3).put("index.analysis.filter.limit_1.consume_all_tokens", false) + Settings settings = ImmutableSettings.settingsBuilder() + .put("index.analysis.filter.limit_1.type", "limit") + .put("index.analysis.filter.limit_1.max_token_count", 3) + .put("index.analysis.filter.limit_1.consume_all_tokens", false) + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1"); @@ -81,8 +90,11 @@ public class LimitTokenCountFilterFactoryTests extends ElasticsearchTokenStreamT } { - Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.limit_1.type", "limit") - .put("index.analysis.filter.limit_1.max_token_count", 17).put("index.analysis.filter.limit_1.consume_all_tokens", true) + Settings settings = ImmutableSettings.settingsBuilder() + .put("index.analysis.filter.limit_1.type", "limit") + .put("index.analysis.filter.limit_1.max_token_count", 17) + .put("index.analysis.filter.limit_1.consume_all_tokens", true) + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1"); diff --git a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java index 996471a205c..eaab794e500 100644 --- a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java @@ -42,7 +42,11 @@ public class PatternCaptureTokenFilterTests extends ElasticsearchTokenStreamTest @Test public void testPatternCaptureTokenFilter() throws Exception { Index index = new Index("test"); - Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/pattern_capture.json").put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + Settings settings = settingsBuilder() + .put("path.home", createTempDir()) + .loadFromClasspath("org/elasticsearch/index/analysis/pattern_capture.json") + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector(); Injector injector = new ModulesBuilder().add( new IndexSettingsModule(index, settings), diff --git a/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java index a6c193df2ec..866aad321f8 100644 --- a/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java @@ -40,7 +40,7 @@ public class ShingleTokenFilterFactoryTests extends ElasticsearchTokenStreamTest @Test public void testDefault() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle"); String source = "the quick brown fox"; String[] expected = new String[]{"the", "the quick", "quick", "quick brown", "brown", "brown fox", "fox"}; @@ -51,7 +51,7 @@ public class ShingleTokenFilterFactoryTests extends ElasticsearchTokenStreamTest @Test public void testInverseMapping() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse"); assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class)); String source = "the quick brown fox"; @@ -63,7 +63,7 @@ public class ShingleTokenFilterFactoryTests extends ElasticsearchTokenStreamTest @Test public void testInverseMappingNoShingles() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse"); assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class)); String source = "the quick"; @@ -75,7 +75,7 @@ public class ShingleTokenFilterFactoryTests extends ElasticsearchTokenStreamTest @Test public void testFillerToken() throws IOException { - AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(RESOURCE); + AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE); TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_filler"); String source = "simon the sorcerer"; String[] expected = new String[]{"simon FILLER", "simon FILLER sorcerer", "FILLER sorcerer"}; diff --git a/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java index ab335129df5..727b71f3b2f 100644 --- a/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java @@ -54,6 +54,7 @@ public class StemmerTokenFilterFactoryTests extends ElasticsearchTokenStreamTest .put("index.analysis.analyzer.my_english.tokenizer","whitespace") .put("index.analysis.analyzer.my_english.filter","my_english") .put(SETTING_VERSION_CREATED,v) + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); @@ -87,6 +88,7 @@ public class StemmerTokenFilterFactoryTests extends ElasticsearchTokenStreamTest .put("index.analysis.analyzer.my_porter2.tokenizer","whitespace") .put("index.analysis.analyzer.my_porter2.filter","my_porter2") .put(SETTING_VERSION_CREATED,v) + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); diff --git a/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java b/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java index 134b2cd5e3e..00e59547a69 100644 --- a/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java @@ -42,7 +42,11 @@ public class StopAnalyzerTests extends ElasticsearchTokenStreamTestCase { @Test public void testDefaultsCompoundAnalysis() throws Exception { Index index = new Index("test"); - Settings settings = settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/stop.json").put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + Settings settings = settingsBuilder() + .loadFromClasspath("org/elasticsearch/index/analysis/stop.json") + .put("path.home", createTempDir().toString()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector(); Injector injector = new ModulesBuilder().add( new IndexSettingsModule(index, settings), diff --git a/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java index 929d4f335d8..fa1dbf71362 100644 --- a/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java @@ -49,6 +49,7 @@ public class StopTokenFilterTests extends ElasticsearchTokenStreamTestCase { if (random().nextBoolean()) { builder.put("index.analysis.filter.my_stop.version", "5.0"); } + builder.put("path.home", createTempDir().toString()); Settings settings = builder.build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); analysisService.tokenFilter("my_stop"); @@ -68,6 +69,7 @@ public class StopTokenFilterTests extends ElasticsearchTokenStreamTestCase { } else { // don't specify } + builder.put("path.home", createTempDir().toString()); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build()); TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop"); assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class)); @@ -83,8 +85,11 @@ public class StopTokenFilterTests extends ElasticsearchTokenStreamTestCase { @Test public void testDeprecatedPositionIncrementSettingWithVersions() throws IOException { - Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop") - .put("index.analysis.filter.my_stop.enable_position_increments", false).put("index.analysis.filter.my_stop.version", "4.3") + Settings settings = ImmutableSettings.settingsBuilder() + .put("index.analysis.filter.my_stop.type", "stop") + .put("index.analysis.filter.my_stop.enable_position_increments", false) + .put("index.analysis.filter.my_stop.version", "4.3") + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop"); @@ -100,6 +105,7 @@ public class StopTokenFilterTests extends ElasticsearchTokenStreamTestCase { Settings settings = ImmutableSettings.settingsBuilder() .put("index.analysis.filter.my_stop.type", "stop") .put("index.analysis.filter.my_stop.remove_trailing", false) + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop"); diff --git a/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java index 24aba316d6e..52dc850c12a 100644 --- a/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java @@ -34,6 +34,7 @@ public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStre @Test public void testDefault() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") .build()); TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter"); @@ -47,6 +48,7 @@ public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStre @Test public void testCatenateWords() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false") @@ -62,6 +64,7 @@ public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStre @Test public void testCatenateNumbers() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false") .put("index.analysis.filter.my_word_delimiter.catenate_numbers", "true") @@ -77,6 +80,7 @@ public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStre @Test public void testCatenateAll() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false") .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false") @@ -93,6 +97,7 @@ public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStre @Test public void testSplitOnCaseChange() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") .put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false") .build()); @@ -107,6 +112,7 @@ public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStre @Test public void testPreserveOriginal() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") .put("index.analysis.filter.my_word_delimiter.preserve_original", "true") .build()); @@ -121,6 +127,7 @@ public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStre @Test public void testStemEnglishPossessive() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") .put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false") .build()); @@ -136,6 +143,7 @@ public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStre @Test public void testPartsAndCatenate() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true") @@ -153,6 +161,7 @@ public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStre @Test public void testDeprecatedPartsAndCatenate() throws IOException { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder() + .put("path.home", createTempDir().toString()) .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter") .put("index.analysis.filter.my_word_delimiter.catenate_words", "true") .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true") diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java index 2792f0c4150..164892ff48c 100644 --- a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java @@ -39,7 +39,10 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream @Test public void testDefault() throws IOException { - Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams").build(); + Settings settings = ImmutableSettings.settingsBuilder() + .put("index.analysis.filter.common_grams_default.type", "common_grams") + .put("path.home", createTempDir().toString()) + .build(); try { AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); @@ -53,6 +56,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream { Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams") .putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein") + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); @@ -69,6 +73,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream { Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams") .put("index.analysis.filter.common_grams_default.query_mode", false) + .put("path.home", createTempDir().toString()) .putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein") .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); @@ -88,6 +93,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream { Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_1.type", "common_grams") .put("index.analysis.filter.common_grams_1.ignore_case", true) + .put("path.home", createTempDir().toString()) .putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are") .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); @@ -101,6 +107,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream { Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_2.type", "common_grams") .put("index.analysis.filter.common_grams_2.ignore_case", false) + .put("path.home", createTempDir().toString()) .putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are") .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); @@ -114,6 +121,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream { Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_3.type", "common_grams") .putArray("index.analysis.filter.common_grams_3.common_words", "the", "or", "not", "a", "is", "an", "they", "are") + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3"); @@ -127,7 +135,10 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream @Test public void testCommonGramsAnalysis() throws IOException { - Settings settings = ImmutableSettings.settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams.json").build(); + Settings settings = ImmutableSettings.settingsBuilder() + .loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams.json") + .put("path.home", createTempDir().toString()) + .build(); { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer(); @@ -151,6 +162,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream .put("index.analysis.filter.common_grams_1.query_mode", true) .putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are") .put("index.analysis.filter.common_grams_1.ignore_case", true) + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1"); @@ -165,6 +177,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream .put("index.analysis.filter.common_grams_2.query_mode", true) .putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are") .put("index.analysis.filter.common_grams_2.ignore_case", false) + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2"); @@ -178,6 +191,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_3.type", "common_grams") .put("index.analysis.filter.common_grams_3.query_mode", true) .putArray("index.analysis.filter.common_grams_3.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are") + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3"); @@ -191,6 +205,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream Settings settings = ImmutableSettings.settingsBuilder().put("index.analysis.filter.common_grams_4.type", "common_grams") .put("index.analysis.filter.common_grams_4.query_mode", true) .putArray("index.analysis.filter.common_grams_4.common_words", "the", "or", "not", "a", "is", "an", "they", "are") + .put("path.home", createTempDir().toString()) .build(); AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_4"); @@ -204,7 +219,10 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream @Test public void testQueryModeCommonGramsAnalysis() throws IOException { - Settings settings = ImmutableSettings.settingsBuilder().loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json").build(); + Settings settings = ImmutableSettings.settingsBuilder() + .loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json") + .put("path.home", createTempDir().toString()) + .build(); { AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer(); diff --git a/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java b/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java index c1d09a90bb1..6df27d10212 100644 --- a/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java +++ b/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java @@ -59,9 +59,9 @@ public class SynonymsAnalysisTest extends ElasticsearchTestCase { @Test public void testSynonymsAnalysis() throws IOException { - Settings settings = settingsBuilder(). loadFromClasspath("org/elasticsearch/index/analysis/synonyms/synonyms.json") + .put("path.home", createTempDir().toString()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); Index index = new Index("test"); diff --git a/src/test/java/org/elasticsearch/index/analysis/test1.json b/src/test/java/org/elasticsearch/index/analysis/test1.json index 3b503d1da76..69be6db8f82 100644 --- a/src/test/java/org/elasticsearch/index/analysis/test1.json +++ b/src/test/java/org/elasticsearch/index/analysis/test1.json @@ -66,6 +66,10 @@ "tokenizer":"standard", "char_filter":["my_mapping"] }, + "custom6":{ + "tokenizer":"standard", + "position_offset_gap": 256 + }, "czechAnalyzerWithStemmer":{ "tokenizer":"standard", "filter":["standard", "lowercase", "stop", "czech_stem"] diff --git a/src/test/java/org/elasticsearch/index/analysis/test1.yml b/src/test/java/org/elasticsearch/index/analysis/test1.yml index 9c4aac6a6cb..81ef2353103 100644 --- a/src/test/java/org/elasticsearch/index/analysis/test1.yml +++ b/src/test/java/org/elasticsearch/index/analysis/test1.yml @@ -49,7 +49,8 @@ index : tokenizer : standard char_filter : [my_mapping] custom6 : - type : standard + tokenizer : standard + position_offset_gap: 256 custom7 : type : standard version: 3.6 @@ -58,4 +59,4 @@ index : filter : [standard, lowercase, stop, czech_stem] decompoundingAnalyzer : tokenizer : standard - filter : [dict_dec] \ No newline at end of file + filter : [dict_dec] diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 3dfdb5e629c..c63e4b256ae 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -19,10 +19,7 @@ package org.elasticsearch.index.engine; -import com.carrotsearch.randomizedtesting.annotations.Repeat; -import com.carrotsearch.randomizedtesting.annotations.Seed; import com.google.common.collect.ImmutableMap; - import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -32,13 +29,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexDeletionPolicy; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.LiveIndexWriterConfig; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; @@ -48,7 +39,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; @@ -59,13 +49,13 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; -import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; import org.elasticsearch.index.engine.Engine.Searcher; import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService; @@ -89,10 +79,10 @@ import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryUtils; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogSizeMatcher; import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; import org.junit.After; @@ -100,6 +90,7 @@ import org.junit.Before; import org.junit.Test; import java.io.IOException; +import java.nio.file.Path; import java.util.Arrays; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -121,15 +112,14 @@ public class InternalEngineTests extends ElasticsearchTestCase { private Store store; private Store storeReplica; - protected Translog translog; - protected Translog replicaTranslog; - protected InternalEngine engine; protected InternalEngine replicaEngine; private Settings defaultSettings; private int indexConcurrency; private String codecName; + private Path primaryTranslogDir; + private Path replicaTranslogDir; @Override @Before @@ -157,8 +147,8 @@ public class InternalEngineTests extends ElasticsearchTestCase { storeReplica = createStore(); Lucene.cleanLuceneIndex(store.directory()); Lucene.cleanLuceneIndex(storeReplica.directory()); - translog = createTranslog(); - engine = createEngine(store, translog); + primaryTranslogDir = createTempDir("translog-primary"); + engine = createEngine(store, primaryTranslogDir); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName()); @@ -166,8 +156,8 @@ public class InternalEngineTests extends ElasticsearchTestCase { if (randomBoolean()) { engine.config().setEnableGcDeletes(false); } - replicaTranslog = createTranslogReplica(); - replicaEngine = createEngine(storeReplica, replicaTranslog); + replicaTranslogDir = createTempDir("translog-replica"); + replicaEngine = createEngine(storeReplica, replicaTranslogDir); currentIndexWriterConfig = replicaEngine.getCurrentIndexWriterConfig(); assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); @@ -182,12 +172,12 @@ public class InternalEngineTests extends ElasticsearchTestCase { public void tearDown() throws Exception { super.tearDown(); IOUtils.close( - replicaEngine, storeReplica, replicaTranslog, - engine, store, translog); - + replicaEngine, storeReplica, + engine, store); terminate(threadPool); } + private Document testDocumentWithTextField() { Document document = testDocument(); document.add(new TextField("value", "test", Field.Store.YES)); @@ -226,12 +216,16 @@ public class InternalEngineTests extends ElasticsearchTestCase { return new Store(shardId, EMPTY_SETTINGS, directoryService, new DummyShardLock(shardId)); } - protected Translog createTranslog() throws IOException { - return new FsTranslog(shardId, EMPTY_SETTINGS, createTempDir("translog-primary")); + protected FsTranslog createTranslog() throws IOException { + return createTranslog(primaryTranslogDir); } - protected Translog createTranslogReplica() throws IOException { - return new FsTranslog(shardId, EMPTY_SETTINGS, createTempDir("translog-replica")); + protected FsTranslog createTranslog(Path translogPath) throws IOException { + return new FsTranslog(shardId, EMPTY_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, translogPath); + } + + protected FsTranslog createTranslogReplica() throws IOException { + return createTranslog(replicaTranslogDir); } protected IndexDeletionPolicy createIndexDeletionPolicy() { @@ -250,26 +244,25 @@ public class InternalEngineTests extends ElasticsearchTestCase { return new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, indexSettingsService); } - protected InternalEngine createEngine(Store store, Translog translog) { + protected InternalEngine createEngine(Store store, Path translogPath) { IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); - return createEngine(indexSettingsService, store, translog, createMergeScheduler(indexSettingsService)); + return createEngine(indexSettingsService, store, translogPath, createMergeScheduler(indexSettingsService)); } - protected InternalEngine createEngine(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) { - return new InternalEngine(config(indexSettingsService, store, translog, mergeSchedulerProvider), false); + protected InternalEngine createEngine(IndexSettingsService indexSettingsService, Store store, Path translogPath, MergeSchedulerProvider mergeSchedulerProvider) { + return new InternalEngine(config(indexSettingsService, store, translogPath, mergeSchedulerProvider), false); } - public EngineConfig config(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) { + public EngineConfig config(IndexSettingsService indexSettingsService, Store store, Path translogPath, MergeSchedulerProvider mergeSchedulerProvider) { IndexWriterConfig iwc = newIndexWriterConfig(); EngineConfig config = new EngineConfig(shardId, threadPool, new ShardIndexingService(shardId, EMPTY_SETTINGS, new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)), indexSettingsService - , null, store, createSnapshotDeletionPolicy(), translog, createMergePolicy(), mergeSchedulerProvider, + , null, store, createSnapshotDeletionPolicy(), createMergePolicy(), mergeSchedulerProvider, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(shardId.index()), new Engine.FailedEngineListener() { @Override public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } - }, new TranslogHandler(shardId.index().getName()), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); - + }, new TranslogHandler(shardId.index().getName()), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), BigArrays.NON_RECYCLING_INSTANCE, translogPath); return config; } @@ -321,7 +314,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); assertThat(segments.get(0).isCompound(), equalTo(defaultCompound)); - ((InternalEngine) engine).config().setCompoundOnFlush(false); + engine.config().setCompoundOnFlush(false); ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), B_3, null); engine.create(new Engine.Create(null, newUid("3"), doc3)); @@ -369,7 +362,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); assertThat(segments.get(1).isCompound(), equalTo(false)); - ((InternalEngine) engine).config().setCompoundOnFlush(true); + engine.config().setCompoundOnFlush(true); ParsedDocument doc4 = testParsedDocument("4", "4", "test", null, -1, -1, testDocumentWithTextField(), B_3, null); engine.create(new Engine.Create(null, newUid("4"), doc4)); engine.refresh("test"); @@ -430,8 +423,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { ConcurrentMergeSchedulerProvider mergeSchedulerProvider = new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, new IndexSettingsService(shardId.index(), EMPTY_SETTINGS)); IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); try (Store store = createStore(); - Translog translog = createTranslog(); - Engine engine = createEngine(indexSettingsService, store, translog, mergeSchedulerProvider)) { + Engine engine = createEngine(indexSettingsService, store, createTempDir(), mergeSchedulerProvider)) { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(null, newUid("1"), doc); @@ -655,8 +647,6 @@ public class InternalEngineTests extends ElasticsearchTestCase { MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); searchResult.close(); - - engine.close(); } @Test @@ -717,198 +707,6 @@ public class InternalEngineTests extends ElasticsearchTestCase { assertThat(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId)); } - @Test - public void testFailEngineOnCorruption() { - ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(null, newUid("1"), doc)); - engine.flush(); - final int failInPhase = randomIntBetween(1, 3); - try { - engine.recover(new Engine.RecoveryHandler() { - @Override - public void phase1(SnapshotIndexCommit snapshot) throws EngineException { - if (failInPhase == 1) { - throw new RuntimeException("bar", new CorruptIndexException("Foo", "fake file description")); - } - } - - @Override - public void phase2(Translog.Snapshot snapshot) throws EngineException { - if (failInPhase == 2) { - throw new RuntimeException("bar", new CorruptIndexException("Foo", "fake file description")); - } - } - - @Override - public void phase3(Translog.Snapshot snapshot) throws EngineException { - if (failInPhase == 3) { - throw new RuntimeException("bar", new CorruptIndexException("Foo", "fake file description")); - } - } - }); - fail("exception expected"); - } catch (RuntimeException ex) { - - } - try { - Engine.Searcher searchResult = engine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - - ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); - engine.create(new Engine.Create(null, newUid("2"), doc2)); - engine.refresh("foo"); - - searchResult = engine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 2)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(2)); - searchResult.close(); - fail("engine should have failed"); - } catch (EngineClosedException ex) { - // expected - } - } - - - @Test - public void testSimpleRecover() throws Exception { - final ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(null, newUid("1"), doc)); - engine.flush(); - - engine.recover(new Engine.RecoveryHandler() { - @Override - public void phase1(SnapshotIndexCommit snapshot) throws EngineException { - try { - engine.flush(); - assertThat("flush is not allowed in phase 1", false, equalTo(true)); - } catch (FlushNotAllowedEngineException e) { - // all is well - } - } - - @Override - public void phase2(Translog.Snapshot snapshot) throws EngineException { - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); - try { - engine.flush(); - assertThat("flush is not allowed in phase 2", false, equalTo(true)); - } catch (FlushNotAllowedEngineException e) { - // all is well - } - - // but we can index - engine.index(new Engine.Index(null, newUid("1"), doc)); - } - - @Override - public void phase3(Translog.Snapshot snapshot) throws EngineException { - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); - try { - // we can do this here since we are on the same thread - engine.flush(); - assertThat("flush is not allowed in phase 3", false, equalTo(true)); - } catch (FlushNotAllowedEngineException e) { - // all is well - } - } - }); - // post recovery should flush the translog - try (Translog.Snapshot snapshot = translog.snapshot()) { - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); - } - // and we should not leak files - assertThat("there are unreferenced translog files left", translog.clearUnreferenced(), equalTo(0)); - - engine.flush(); - - assertThat("there are unreferenced translog files left, post flush", translog.clearUnreferenced(), equalTo(0)); - - engine.close(); - } - - @Test - public void testRecoverWithOperationsBetweenPhase1AndPhase2() throws Exception { - ParsedDocument doc1 = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(null, newUid("1"), doc1)); - engine.flush(); - ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); - engine.create(new Engine.Create(null, newUid("2"), doc2)); - - engine.recover(new Engine.RecoveryHandler() { - @Override - public void phase1(SnapshotIndexCommit snapshot) throws EngineException { - } - - @Override - public void phase2(Translog.Snapshot snapshot) throws EngineException { - try { - Translog.Create create = (Translog.Create) snapshot.next(); - assertThat("translog snapshot should not read null", create != null, equalTo(true)); - assertThat(create.source().toBytesArray(), equalTo(B_2)); - assertThat(snapshot.next(), equalTo(null)); - } catch (IOException ex) { - throw new ElasticsearchException("failed", ex); - } - } - - @Override - public void phase3(Translog.Snapshot snapshot) throws EngineException { - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); - } - }); - - engine.flush(); - engine.close(); - } - - @Test - public void testRecoverWithOperationsBetweenPhase1AndPhase2AndPhase3() throws Exception { - ParsedDocument doc1 = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null); - engine.create(new Engine.Create(null, newUid("1"), doc1)); - engine.flush(); - ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null); - engine.create(new Engine.Create(null, newUid("2"), doc2)); - - engine.recover(new Engine.RecoveryHandler() { - @Override - public void phase1(SnapshotIndexCommit snapshot) throws EngineException { - } - - @Override - public void phase2(Translog.Snapshot snapshot) throws EngineException { - try { - Translog.Create create = (Translog.Create) snapshot.next(); - assertThat(create != null, equalTo(true)); - assertThat(snapshot.next(), equalTo(null)); - assertThat(create.source().toBytesArray(), equalTo(B_2)); - - // add for phase3 - ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), B_3, null); - engine.create(new Engine.Create(null, newUid("3"), doc3)); - } catch (IOException ex) { - throw new ElasticsearchException("failed", ex); - } - } - - @Override - public void phase3(Translog.Snapshot snapshot) throws EngineException { - try { - Translog.Create create = (Translog.Create) snapshot.next(); - assertThat(create != null, equalTo(true)); - assertThat(snapshot.next(), equalTo(null)); - assertThat(create.source().toBytesArray(), equalTo(B_3)); - } catch (IOException ex) { - throw new ElasticsearchException("failed", ex); - } - } - }); - - engine.flush(); - engine.close(); - } - @Test public void testVersioningNewCreate() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); @@ -1100,8 +898,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { int numIters = randomIntBetween(2, 10); for (int j = 0; j < numIters; j++) { try (Store store = createStore()) { - final Translog translog = createTranslog(); - final InternalEngine engine = createEngine(store, translog); + final InternalEngine engine = createEngine(store, createTempDir()); final CountDownLatch startGun = new CountDownLatch(1); final CountDownLatch indexed = new CountDownLatch(1); @@ -1144,7 +941,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { engine.forceMerge(randomBoolean(), 1, false, randomBoolean(), randomBoolean()); } indexed.await(); - IOUtils.close(engine, translog); + IOUtils.close(engine); } } @@ -1480,7 +1277,6 @@ public class InternalEngineTests extends ElasticsearchTestCase { assertNotNull(iwIFDLogger); } - Level savedLevel = iwIFDLogger.getLevel(); iwIFDLogger.addAppender(mockAppender); iwIFDLogger.setLevel(Level.DEBUG); @@ -1510,8 +1306,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { public void testEnableGcDeletes() throws Exception { IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); try (Store store = createStore(); - Translog translog = createTranslog(); - Engine engine = new InternalEngine(config(indexSettingsService, store, translog, createMergeScheduler(indexSettingsService)), false)) { + Engine engine = new InternalEngine(config(indexSettingsService, store, createTempDir(), createMergeScheduler(indexSettingsService)), false)) { engine.config().setEnableGcDeletes(false); // Add document @@ -1577,7 +1372,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { try (Engine.Searcher test = this.engine.acquireSearcher("test")) { ShardId shardId = ShardUtils.extractShardId(test.reader()); assertNotNull(shardId); - assertEquals(shardId, ((InternalEngine) engine).config().getShardId()); + assertEquals(shardId, engine.config().getShardId()); } } @@ -1595,13 +1390,13 @@ public class InternalEngineTests extends ElasticsearchTestCase { wrapper.setAllowRandomFileNotFoundException(randomBoolean()); wrapper.setRandomIOExceptionRate(randomDouble()); wrapper.setRandomIOExceptionRateOnOpen(randomDouble()); + final Path translogPath = createTempDir("testFailStart"); try (Store store = createStore(wrapper)) { int refCount = store.refCount(); assertTrue("refCount: " + store.refCount(), store.refCount() > 0); - Translog translog = createTranslog(); InternalEngine holder; try { - holder = createEngine(store, translog); + holder = createEngine(store, translogPath); } catch (EngineCreationFailureException ex) { assertEquals(store.refCount(), refCount); continue; @@ -1612,7 +1407,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { try { assertEquals(store.refCount(), refCount + 1); holder.close(); - holder = createEngine(store, translog); + holder = createEngine(store, translogPath); assertEquals(store.refCount(), refCount + 1); } catch (EngineCreationFailureException ex) { // all is fine @@ -1620,7 +1415,6 @@ public class InternalEngineTests extends ElasticsearchTestCase { break; } } - translog.close(); holder.close(); assertEquals(store.refCount(), refCount); } @@ -1629,7 +1423,6 @@ public class InternalEngineTests extends ElasticsearchTestCase { @Test public void testSettings() { - InternalEngine engine = (InternalEngine) this.engine; CodecService codecService = new CodecService(shardId.index()); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); @@ -1735,8 +1528,8 @@ public class InternalEngineTests extends ElasticsearchTestCase { .put(EngineConfig.INDEX_BUFFER_SIZE_SETTING, "1kb").build(); IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), indexSettings); try (Store store = createStore(); - Translog translog = createTranslog(); - final Engine engine = new InternalEngine(config(indexSettingsService, store, translog, createMergeScheduler(indexSettingsService)), false)) { + Engine engine = new InternalEngine(config(indexSettingsService, store, createTempDir(), createMergeScheduler(indexSettingsService)), + false)) { for (int i = 0; i < 100; i++) { String id = Integer.toString(i); ParsedDocument doc = testParsedDocument(id, id, "test", null, -1, -1, testDocument(), B_1, null); @@ -1769,6 +1562,27 @@ public class InternalEngineTests extends ElasticsearchTestCase { } } + public void testMissingTranslog() throws IOException { + // test that we can force start the engine , even if the translog is missing. + engine.close(); + // fake a new translog, causing the engine to point to a missing one. + FsTranslog translog = createTranslog(); + translog.markCommitted(translog.currentId()); + // we have to re-open the translog because o.w. it will complain about commit information going backwards, which is OK as we did a fake markComitted + translog.close(); + try { + engine = createEngine(store, primaryTranslogDir); + fail("engine shouldn't start without a valid translog id"); + } catch (EngineCreationFailureException ex) { + // expected + } + // now it should be OK. + IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings) + .put(EngineConfig.INDEX_IGNORE_UNKNOWN_TRANSLOG, true).build()); + engine = createEngine(indexSettingsService, store, primaryTranslogDir, createMergeScheduler(indexSettingsService)); + } + + @TestLogging("index.translog:TRACE") public void testTranslogReplayWithFailure() throws IOException { boolean canHaveDuplicates = true; boolean autoGeneratedId = true; @@ -1799,20 +1613,10 @@ public class InternalEngineTests extends ElasticsearchTestCase { directory.setFailOnOpenInput(randomBoolean()); directory.setAllowRandomFileNotFoundException(randomBoolean()); try { - engine = createEngine(store, translog); + engine = createEngine(store, primaryTranslogDir); started = true; break; } catch (EngineCreationFailureException ex) { - // sometimes we fail after we committed the recovered docs during the finaly refresh call - // that means hte index is consistent and recovered so we can't assert on the num recovered ops below. - try (IndexReader reader = DirectoryReader.open(directory.getDelegate())) { - if (reader.numDocs() == numDocs) { - recoveredButFailed = true; - break; - } else { - // skip - we just failed - } - } } } @@ -1821,20 +1625,16 @@ public class InternalEngineTests extends ElasticsearchTestCase { directory.setFailOnOpenInput(false); directory.setAllowRandomFileNotFoundException(false); if (started == false) { - engine = createEngine(store, translog); + engine = createEngine(store, primaryTranslogDir); } } else { // no mock directory, no fun. - engine = createEngine(store, translog); + engine = createEngine(store, primaryTranslogDir); } try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } - if (recoveredButFailed == false) { - TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); - assertEquals(numDocs, parser.recoveredOps.get()); - } } @Test @@ -1859,11 +1659,8 @@ public class InternalEngineTests extends ElasticsearchTestCase { // this so we have to disable the check explicitly directory.setPreventDoubleWrite(false); } - long currentTranslogId = translog.currentId(); engine.close(); engine = new InternalEngine(engine.config(), true); - assertTrue(currentTranslogId + "<" + translog.currentId(), currentTranslogId < translog.currentId()); - assertEquals("translog ID must be incremented by 2 after initial recovery", currentTranslogId + 2, translog.currentId()); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); @@ -1903,11 +1700,8 @@ public class InternalEngineTests extends ElasticsearchTestCase { TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); parser.mappingUpdate = dynamicUpdate(); - long currentTranslogId = translog.currentId(); engine.close(); - engine = new InternalEngine(engine.config(), false); // we need to reuse the engine config otherwise the parser.mappingModified won't work - assertTrue(currentTranslogId + "<" + translog.currentId(), currentTranslogId < translog.currentId()); - assertEquals("translog ID must be incremented by 2 after initial recovery", currentTranslogId + 2, translog.currentId()); + engine = new InternalEngine(engine.config(), false); // we need to reuse the engine config unless the parser.mappingModified won't work try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); @@ -1923,7 +1717,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { } engine.close(); - engine = createEngine(store, translog); + engine = createEngine(store, primaryTranslogDir); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); @@ -1953,7 +1747,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { } engine.close(); - engine = createEngine(store, translog); + engine = createEngine(store, primaryTranslogDir); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); assertThat(topDocs.totalHits, equalTo(numDocs + 1)); @@ -1965,7 +1759,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { engine.refresh("test"); } else { engine.close(); - engine = createEngine(store, translog); + engine = createEngine(store, primaryTranslogDir); } try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs); diff --git a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 69ae60591a6..21a9d851eed 100644 --- a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.Index; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy; @@ -93,8 +94,6 @@ public class ShadowEngineTests extends ElasticsearchTestCase { private Store store; private Store storeReplica; - protected Translog translog; - protected Translog replicaTranslog; protected Engine primaryEngine; protected Engine replicaEngine; @@ -130,8 +129,7 @@ public class ShadowEngineTests extends ElasticsearchTestCase { storeReplica = createStore(dirPath); Lucene.cleanLuceneIndex(store.directory()); Lucene.cleanLuceneIndex(storeReplica.directory()); - translog = createTranslog(); - primaryEngine = createInternalEngine(store, translog); + primaryEngine = createInternalEngine(store, createTempDir("translog-primary")); LiveIndexWriterConfig currentIndexWriterConfig = ((InternalEngine)primaryEngine).getCurrentIndexWriterConfig(); assertEquals(primaryEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); @@ -140,8 +138,7 @@ public class ShadowEngineTests extends ElasticsearchTestCase { primaryEngine.config().setEnableGcDeletes(false); } - replicaTranslog = createTranslogReplica(); - replicaEngine = createShadowEngine(storeReplica, replicaTranslog); + replicaEngine = createShadowEngine(storeReplica); assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); if (randomBoolean()) { @@ -155,10 +152,6 @@ public class ShadowEngineTests extends ElasticsearchTestCase { super.tearDown(); replicaEngine.close(); storeReplica.close(); - - translog.close(); - replicaTranslog.close(); - primaryEngine.close(); store.close(); terminate(threadPool); @@ -202,14 +195,6 @@ public class ShadowEngineTests extends ElasticsearchTestCase { return new Store(shardId, EMPTY_SETTINGS, directoryService, new DummyShardLock(shardId)); } - protected Translog createTranslog() throws IOException { - return new FsTranslog(shardId, EMPTY_SETTINGS, createTempDir("translog-primary")); - } - - protected Translog createTranslogReplica() throws IOException { - return new FsTranslog(shardId, EMPTY_SETTINGS, createTempDir("translog-replica")); - } - protected IndexDeletionPolicy createIndexDeletionPolicy() { return new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS); } @@ -226,36 +211,33 @@ public class ShadowEngineTests extends ElasticsearchTestCase { return new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, indexSettingsService); } - protected ShadowEngine createShadowEngine(Store store, Translog translog) { + protected ShadowEngine createShadowEngine(Store store) { IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); - return createShadowEngine(indexSettingsService, store, translog, createMergeScheduler(indexSettingsService)); + return createShadowEngine(indexSettingsService, store, createMergeScheduler(indexSettingsService)); } - protected InternalEngine createInternalEngine(Store store, Translog translog) { + protected InternalEngine createInternalEngine(Store store, Path translogPath) { IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), ImmutableSettings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()); - return createInternalEngine(indexSettingsService, store, translog, createMergeScheduler(indexSettingsService)); + return createInternalEngine(indexSettingsService, store, translogPath, createMergeScheduler(indexSettingsService)); } - protected ShadowEngine createShadowEngine(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) { - return new ShadowEngine(config(indexSettingsService, store, translog, mergeSchedulerProvider)); + protected ShadowEngine createShadowEngine(IndexSettingsService indexSettingsService, Store store, MergeSchedulerProvider mergeSchedulerProvider) { + return new ShadowEngine(config(indexSettingsService, store, null, mergeSchedulerProvider)); } - protected InternalEngine createInternalEngine(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) { - return new InternalEngine(config(indexSettingsService, store, translog, mergeSchedulerProvider), true); + protected InternalEngine createInternalEngine(IndexSettingsService indexSettingsService, Store store, Path translogPath, MergeSchedulerProvider mergeSchedulerProvider) { + return new InternalEngine(config(indexSettingsService, store, translogPath, mergeSchedulerProvider), true); } - public EngineConfig config(IndexSettingsService indexSettingsService, Store store, Translog translog, MergeSchedulerProvider mergeSchedulerProvider) { + public EngineConfig config(IndexSettingsService indexSettingsService, Store store, Path translogPath, MergeSchedulerProvider mergeSchedulerProvider) { IndexWriterConfig iwc = newIndexWriterConfig(); EngineConfig config = new EngineConfig(shardId, threadPool, new ShardIndexingService(shardId, EMPTY_SETTINGS, new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)), indexSettingsService - , null, store, createSnapshotDeletionPolicy(), translog, createMergePolicy(), mergeSchedulerProvider, + , null, store, createSnapshotDeletionPolicy(), createMergePolicy(), mergeSchedulerProvider, iwc.getAnalyzer(), iwc.getSimilarity() , new CodecService(shardId.index()), new Engine.FailedEngineListener() { @Override public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test - } - }, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); - - + }}, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), BigArrays.NON_RECYCLING_INSTANCE, translogPath); return config; } @@ -908,10 +890,9 @@ public class ShadowEngineTests extends ElasticsearchTestCase { try (Store store = createStore(wrapper)) { int refCount = store.refCount(); assertTrue("refCount: "+ store.refCount(), store.refCount() > 0); - Translog translog = createTranslog(); ShadowEngine holder; try { - holder = createShadowEngine(store, translog); + holder = createShadowEngine(store); } catch (EngineCreationFailureException ex) { assertEquals(store.refCount(), refCount); continue; @@ -922,7 +903,7 @@ public class ShadowEngineTests extends ElasticsearchTestCase { try { assertEquals(store.refCount(), refCount + 1); holder.close(); - holder = createShadowEngine(store, translog); + holder = createShadowEngine(store); assertEquals(store.refCount(), refCount + 1); } catch (EngineCreationFailureException ex) { // all is fine @@ -930,7 +911,6 @@ public class ShadowEngineTests extends ElasticsearchTestCase { break; } } - translog.close(); holder.close(); assertEquals(store.refCount(), refCount); } @@ -949,7 +929,6 @@ public class ShadowEngineTests extends ElasticsearchTestCase { final Path srDir = createTempDir(); final Store srStore = createStore(srDir); Lucene.cleanLuceneIndex(srStore.directory()); - final Translog srTranslog = createTranslogReplica(); final AtomicBoolean succeeded = new AtomicBoolean(false); final CountDownLatch latch = new CountDownLatch(1); @@ -964,7 +943,7 @@ public class ShadowEngineTests extends ElasticsearchTestCase { } catch (InterruptedException e) { // ignore interruptions } - try (ShadowEngine srEngine = createShadowEngine(srStore, srTranslog)) { + try (ShadowEngine srEngine = createShadowEngine(srStore)) { succeeded.set(true); } catch (Exception e) { fail("should have been able to create the engine!"); @@ -980,8 +959,7 @@ public class ShadowEngineTests extends ElasticsearchTestCase { // Create an InternalEngine, which creates the index so the shadow // replica will handle it correctly Store pStore = createStore(srDir); - Translog pTranslog = createTranslog(); - InternalEngine pEngine = createInternalEngine(pStore, pTranslog); + InternalEngine pEngine = createInternalEngine(pStore, createTempDir("translog-primary")); // create a document ParseContext.Document document = testDocumentWithTextField(); @@ -993,6 +971,6 @@ public class ShadowEngineTests extends ElasticsearchTestCase { t.join(); assertTrue("ShadowEngine should have been able to be created", succeeded.get()); // (shadow engine is already shut down in the try-with-resources) - IOUtils.close(srTranslog, srStore, pTranslog, pEngine, pStore); + IOUtils.close(srStore, pEngine, pStore); } } diff --git a/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTest.java b/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTest.java index 654a377a4f1..15872851d45 100644 --- a/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTest.java +++ b/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTest.java @@ -67,6 +67,7 @@ public class TemplateQueryParserTest extends ElasticsearchTestCase { @Before public void setup() throws IOException { Settings settings = ImmutableSettings.settingsBuilder() + .put("path.home", createTempDir().toString()) .put("path.conf", this.getDataPath("config")) .put("name", getClass().getName()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) diff --git a/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java index 75cccb7000f..a7d30c465dd 100644 --- a/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java +++ b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.inject.util.Providers; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNameModule; import org.elasticsearch.index.analysis.AnalysisModule; @@ -56,7 +58,10 @@ public class IndexQueryParserPlugin2Tests extends ElasticsearchTestCase { @Test public void testCustomInjection() throws InterruptedException { - Settings settings = ImmutableSettings.builder().put("name", "testCustomInjection").put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + Settings settings = ImmutableSettings.builder() + .put("name", "testCustomInjection") + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("path.home", createTempDir()).build(); IndexQueryParserModule queryParserModule = new IndexQueryParserModule(settings); queryParserModule.addQueryParser("my", PluginJsonQueryParser.class); @@ -64,6 +69,7 @@ public class IndexQueryParserPlugin2Tests extends ElasticsearchTestCase { Index index = new Index("test"); Injector injector = new ModulesBuilder().add( + new EnvironmentModule(new Environment(settings)), new SettingsModule(settings), new ThreadPoolModule(settings), new IndicesQueriesModule(), diff --git a/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java index 92ce6bcde20..bbbee452284 100644 --- a/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java +++ b/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.inject.util.Providers; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNameModule; import org.elasticsearch.index.analysis.AnalysisModule; @@ -56,7 +58,10 @@ public class IndexQueryParserPluginTests extends ElasticsearchTestCase { @Test public void testCustomInjection() throws InterruptedException { - Settings settings = ImmutableSettings.builder().put("name", "testCustomInjection").put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + Settings settings = ImmutableSettings.builder() + .put("name", "testCustomInjection") + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("path.home", createTempDir()).build(); IndexQueryParserModule queryParserModule = new IndexQueryParserModule(settings); queryParserModule.addProcessor(new IndexQueryParserModule.QueryParsersProcessor() { @@ -73,6 +78,7 @@ public class IndexQueryParserPluginTests extends ElasticsearchTestCase { Index index = new Index("test"); Injector injector = new ModulesBuilder().add( + new EnvironmentModule(new Environment(settings)), new SettingsModule(settings), new ThreadPoolModule(settings), new IndicesQueriesModule(), diff --git a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index e17721a0f00..8464cf8e42a 100644 --- a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -23,18 +23,23 @@ import org.elasticsearch.cluster.routing.MutableShardRouting; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexService; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchSingleNodeTest; +import org.junit.Test; +import java.io.Closeable; import java.io.IOException; import java.nio.file.Path; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; /** * Simple unit-test IndexShard related operations. @@ -95,20 +100,20 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { IndexShard shard = test.shard(0); ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(getShardStateMetadata(shard), shardStateMetaData); - ShardRouting routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version()+1); + ShardRouting routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1); shard.updateRoutingEntry(routing, true); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); - routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version()+1); + routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1); shard.updateRoutingEntry(routing, true); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); - routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version()+1); + routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1); shard.updateRoutingEntry(routing, true); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); @@ -122,13 +127,13 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); - shard.updateRoutingEntry(new MutableShardRouting(shard.shardRouting, shard.shardRouting.version()+1), false); + shard.updateRoutingEntry(new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1), false); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertFalse("shard state persisted despite of persist=false", shardStateMetaData.equals(getShardStateMetadata(shard))); assertEquals("shard state persisted despite of persist=false", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID))); - routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version()+1); + routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1); shard.updateRoutingEntry(routing, true); shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); @@ -153,15 +158,13 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); assertEquals(shardStateMetaData, getShardStateMetadata(shard)); - routing = new MutableShardRouting(shard.shardId.index().getName(), shard.shardId.id(), routing.currentNodeId(), routing.primary(), ShardRoutingState.INITIALIZING, shard.shardRouting.version()+1); + routing = new MutableShardRouting(shard.shardId.index().getName(), shard.shardId.id(), routing.currentNodeId(), routing.primary(), ShardRoutingState.INITIALIZING, shard.shardRouting.version() + 1); shard.updateRoutingEntry(routing, true); shard.deleteShardState(); assertNull("no shard state expected after delete on initializing", load(logger, env.availableShardPaths(shard.shardId))); - - } ShardStateMetaData getShardStateMetadata(IndexShard shard) { @@ -180,7 +183,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { assertEquals(meta.hashCode(), new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID).hashCode()); assertFalse(meta.equals(new ShardStateMetaData(meta.version, !meta.primary, meta.indexUUID))); - assertFalse(meta.equals(new ShardStateMetaData(meta.version+1, meta.primary, meta.indexUUID))); + assertFalse(meta.equals(new ShardStateMetaData(meta.version + 1, meta.primary, meta.indexUUID))); assertFalse(meta.equals(new ShardStateMetaData(meta.version, !meta.primary, meta.indexUUID + "foo"))); Set hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode @@ -191,6 +194,41 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { } + @Test + public void testDeleteIndexDecreasesCounter() throws InterruptedException, ExecutionException, IOException { + assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); + ensureGreen("test"); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe("test"); + IndexShard indexShard = indexService.shard(0); + client().admin().indices().prepareDelete("test").get(); + assertThat(indexShard.getOperationsCount(), equalTo(0)); + try { + indexShard.incrementOperationCounter(); + fail("we should not be able to increment anymore"); + } catch (IndexShardClosedException e) { + // expected + } + } + + @Test + public void testIndexShardCounter() throws InterruptedException, ExecutionException, IOException { + assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); + ensureGreen("test"); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe("test"); + IndexShard indexShard = indexService.shard(0); + indexShard.incrementOperationCounter(); + assertEquals(2, indexShard.getOperationsCount()); + indexShard.incrementOperationCounter(); + assertEquals(3, indexShard.getOperationsCount()); + indexShard.decrementOperationCounter(); + indexShard.decrementOperationCounter(); + assertEquals(1, indexShard.getOperationsCount()); + + + } + public static ShardStateMetaData load(ESLogger logger, Path... shardPaths) throws IOException { return ShardStateMetaData.FORMAT.loadLatestState(logger, shardPaths); } diff --git a/src/test/java/org/elasticsearch/index/translog/SnapshotMatchers.java b/src/test/java/org/elasticsearch/index/translog/SnapshotMatchers.java new file mode 100644 index 00000000000..c45da660b00 --- /dev/null +++ b/src/test/java/org/elasticsearch/index/translog/SnapshotMatchers.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.elasticsearch.ElasticsearchException; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; + +import java.io.IOException; +import java.util.ArrayList; + + +public final class SnapshotMatchers { + private SnapshotMatchers() { + + } + + /** + * Consumes a snapshot and make sure it's size is as expected + */ + public static Matcher size(int size) { + return new SizeMatcher(size); + } + + /** + * Consumes a snapshot and make sure it's content is as expected + */ + public static Matcher equalsTo(Translog.Operation... ops) { + return new EqualMatcher(ops); + } + + /** + * Consumes a snapshot and make sure it's content is as expected + */ + public static Matcher equalsTo(ArrayList ops) { + return new EqualMatcher(ops.toArray(new Translog.Operation[ops.size()])); + } + + public static class SizeMatcher extends TypeSafeMatcher { + + private final int size; + + public SizeMatcher(int size) { + this.size = size; + } + + @Override + public boolean matchesSafely(Translog.Snapshot snapshot) { + int count = 0; + try { + while (snapshot.next() != null) { + count++; + } + } catch (IOException ex) { + throw new ElasticsearchException("failed to advance snapshot", ex); + } + return size == count; + } + + @Override + public void describeTo(Description description) { + description.appendText("a snapshot with size ").appendValue(size); + } + } + + public static class EqualMatcher extends TypeSafeMatcher { + + private final Translog.Operation[] expectedOps; + String failureMsg = null; + + public EqualMatcher(Translog.Operation[] expectedOps) { + this.expectedOps = expectedOps; + } + + @Override + protected boolean matchesSafely(Translog.Snapshot snapshot) { + try { + Translog.Operation op; + int i; + for (i = 0, op = snapshot.next(); op != null && i < expectedOps.length; i++, op = snapshot.next()) { + if (expectedOps[i].equals(op) == false) { + failureMsg = "position [" + i + "] expected [" + expectedOps[i] + "] but found [" + op + "]"; + return false; + } + } + + if (i < expectedOps.length) { + failureMsg = "expected [" + expectedOps.length + "] ops but only found [" + i + "]"; + return false; + } + + if (op != null) { + int count = 1; // to account for the op we already read + while (snapshot.next() != null) { + count++; + } + failureMsg = "expected [" + expectedOps.length + "] ops but got [" + (expectedOps.length + count) + "]"; + return false; + } + return true; + } catch (IOException ex) { + throw new ElasticsearchException("failed to read snapshot content", ex); + } + } + + @Override + public void describeTo(Description description) { + description.appendText(failureMsg); + } + } + + +} diff --git a/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java b/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java deleted file mode 100644 index 457bdca4688..00000000000 --- a/src/test/java/org/elasticsearch/index/translog/TranslogSizeMatcher.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.translog; - -import org.elasticsearch.ElasticsearchException; -import org.hamcrest.Description; -import org.hamcrest.Matcher; -import org.hamcrest.TypeSafeMatcher; - -import java.io.IOException; - -/** - * - */ -public class TranslogSizeMatcher extends TypeSafeMatcher { - - private final int size; - - public TranslogSizeMatcher(int size) { - this.size = size; - } - - @Override - public boolean matchesSafely(Translog.Snapshot snapshot) { - int count = 0; - long startingPosition = snapshot.position(); - try { - while (snapshot.next() != null) { - count++; - } - return size == count; - } catch (IOException ex) { - throw new ElasticsearchException("failed to advance iterator", ex); - } finally { - // Since counting the translog size consumes the stream, reset it - // back to the origin position after reading - snapshot.seekTo(startingPosition); - } - } - - @Override - public void describeTo(Description description) { - description.appendText("a translog with size ").appendValue(size); - } - - public static Matcher translogSize(int size) { - return new TranslogSizeMatcher(size); - } -} diff --git a/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java b/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java index ea2989ccbb2..80e620c9ccb 100644 --- a/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java +++ b/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java @@ -44,7 +44,6 @@ public class TranslogVersionTests extends ElasticsearchTestCase { StreamInput in = stream.openInput(translogFile); - in.readInt(); Translog.Operation operation = stream.read(in); assertThat("operation is the correct type correctly", operation.opType() == Translog.Operation.Type.SAVE, equalTo(true)); @@ -60,7 +59,6 @@ public class TranslogVersionTests extends ElasticsearchTestCase { assertThat(op.versionType(), equalTo(VersionType.INTERNAL)); try { - in.readInt(); stream.read(in); fail("should have been the end of the file"); } catch (EOFException e) { @@ -143,7 +141,7 @@ public class TranslogVersionTests extends ElasticsearchTestCase { fail("should have thrown an exception about the body being corrupted"); } catch (TranslogCorruptedException e) { assertThat("translog corruption from body: " + e.getMessage(), - e.getMessage().contains("translog stream is corrupted"), equalTo(true)); + e.getMessage().contains("translog corruption while reading from stream"), equalTo(true)); } } diff --git a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/fs/AbstractTranslogTests.java similarity index 54% rename from src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java rename to src/test/java/org/elasticsearch/index/translog/fs/AbstractTranslogTests.java index 1a5aa984455..d7e3c25edce 100644 --- a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/fs/AbstractTranslogTests.java @@ -17,18 +17,22 @@ * under the License. */ -package org.elasticsearch.index.translog; +package org.elasticsearch.index.translog.fs; import org.apache.lucene.index.Term; +import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.*; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; import org.junit.After; @@ -42,12 +46,12 @@ import java.nio.channels.FileChannel; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CountDownLatch; +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import static com.google.common.collect.Lists.newArrayList; import static org.hamcrest.Matchers.*; @@ -56,12 +60,28 @@ import static org.hamcrest.Matchers.*; * */ @LuceneTestCase.SuppressFileSystems("ExtrasFS") -public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase { +public abstract class AbstractTranslogTests extends ElasticsearchTestCase { protected final ShardId shardId = new ShardId(new Index("index"), 1); + protected FsTranslog translog; protected Path translogDir; - protected Translog translog; + + @Override + protected void afterIfSuccessful() throws Exception { + super.afterIfSuccessful(); + + if (translog.isOpen()) { + if (translog.currentId() > 1) { + translog.markCommitted(translog.currentId()); + assertFileDeleted(translog, translog.currentId() - 1); + } + translog.close(); + } + assertFileIsPresent(translog, translog.currentId()); + IOUtils.rm(translog.location()); // delete all the locations + + } @Override @Before @@ -69,8 +89,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase super.setUp(); // if a previous test failed we clean up things here translogDir = createTempDir(); - translog = create(translogDir); - translog.newTranslog(1); + translog = create(); } @Override @@ -78,17 +97,43 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase public void tearDown() throws Exception { try { translog.close(); - if (translog.currentId() > 1) { - // ensure all snapshots etc are closed if this fails something was not closed - assertFileDeleted(translog, translog.currentId() - 1); - } - assertFileIsPresent(translog, translog.currentId()); } finally { super.tearDown(); } } - protected abstract Translog create(Path translogDir) throws IOException; + + protected abstract FsTranslog create() throws IOException; + + + protected void addToTranslogAndList(Translog translog, ArrayList list, Translog.Operation op) { + list.add(op); + translog.add(op); + } + + + public void testIdParsingFromFile() { + long id = randomIntBetween(0, Integer.MAX_VALUE); + Path file = translogDir.resolve(FsTranslog.TRANSLOG_FILE_PREFIX + id); + assertThat(FsTranslog.parseIdFromFileName(file), equalTo(id)); + + file = translogDir.resolve(FsTranslog.TRANSLOG_FILE_PREFIX + id + ".recovering"); + assertThat(FsTranslog.parseIdFromFileName(file), equalTo(id)); + + file = translogDir.resolve(FsTranslog.TRANSLOG_FILE_PREFIX + randomNonTranslogPatternString(1, 10) + id); + assertThat(FsTranslog.parseIdFromFileName(file), equalTo(-1l)); + + file = translogDir.resolve(randomNonTranslogPatternString(1, FsTranslog.TRANSLOG_FILE_PREFIX.length() - 1)); + assertThat(FsTranslog.parseIdFromFileName(file), equalTo(-1l)); + } + + private static String randomNonTranslogPatternString(int min, int max) { + String string; + do { + string = randomRealisticUnicodeOfCodepointLength(randomIntBetween(min, max)); + } while (FsTranslog.PARSE_ID_PATTERN.matcher(string).matches()); + return string; + } @Test public void testRead() throws IOException { @@ -105,70 +150,32 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3}))); } - @Test - public void testTransientTranslog() throws IOException { - Translog.Snapshot snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); - snapshot.close(); - - translog.add(new Translog.Create("test", "1", new byte[]{1})); - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); - assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); - snapshot.close(); - - translog.newTransientTranslog(2); - - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); - assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); - snapshot.close(); - - translog.add(new Translog.Index("test", "2", new byte[]{2})); - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(2)); - assertThat(snapshot.estimatedTotalOperations(), equalTo(2)); - snapshot.close(); - - translog.makeTransientCurrent(); - - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); // now its one, since it only includes "2" - assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); - snapshot.close(); - } - @Test public void testSimpleOperations() throws IOException { - Translog.Snapshot snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + ArrayList ops = new ArrayList<>(); + Translog.Snapshot snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.size(0)); snapshot.close(); - translog.add(new Translog.Create("test", "1", new byte[]{1})); - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + addToTranslogAndList(translog, ops, new Translog.Create("test", "1", new byte[]{1})); + snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); snapshot.close(); - translog.add(new Translog.Index("test", "2", new byte[]{2})); - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(2)); - assertThat(snapshot.estimatedTotalOperations(), equalTo(2)); + addToTranslogAndList(translog, ops, new Translog.Index("test", "2", new byte[]{2})); + snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); + assertThat(snapshot.estimatedTotalOperations(), equalTo(ops.size())); snapshot.close(); - translog.add(new Translog.Delete(newUid("3"))); - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(3)); - assertThat(snapshot.estimatedTotalOperations(), equalTo(3)); + addToTranslogAndList(translog, ops, new Translog.Delete(newUid("3"))); + snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); + assertThat(snapshot.estimatedTotalOperations(), equalTo(ops.size())); snapshot.close(); - translog.add(new Translog.DeleteByQuery(new BytesArray(new byte[]{4}), null)); - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(4)); - assertThat(snapshot.estimatedTotalOperations(), equalTo(4)); - snapshot.close(); - - snapshot = translog.snapshot(); + snapshot = translog.newSnapshot(); Translog.Create create = (Translog.Create) snapshot.next(); assertThat(create != null, equalTo(true)); @@ -182,33 +189,26 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase assertThat(delete != null, equalTo(true)); assertThat(delete.uid(), equalTo(newUid("3"))); - Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) snapshot.next(); - assertThat(deleteByQuery != null, equalTo(true)); - assertThat(deleteByQuery.source().toBytes(), equalTo(new byte[]{4})); - assertThat(snapshot.next(), equalTo(null)); snapshot.close(); long firstId = translog.currentId(); - translog.newTranslog(2); + translog.newTranslog(); assertThat(translog.currentId(), Matchers.not(equalTo(firstId))); - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); + assertThat(snapshot.estimatedTotalOperations(), equalTo(ops.size())); + snapshot.close(); + + translog.markCommitted(translog.currentId()); + snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.size(0)); assertThat(snapshot.estimatedTotalOperations(), equalTo(0)); snapshot.close(); } - @Test(expected = TranslogException.class) - public void testReuseFails() throws IOException { - if (randomBoolean()) { - translog.newTranslog(1); - } else { - translog.newTransientTranslog(1); - } - } - protected TranslogStats stats() throws IOException { // force flushing and updating of stats translog.sync(); @@ -248,88 +248,69 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); lastSize = stats.translogSizeInBytes().bytes(); - - translog.add(new Translog.DeleteByQuery(new BytesArray(new byte[]{4}), null)); + translog.add(new Translog.Delete(newUid("4"))); + translog.newTranslog(); stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(4l)); assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); - translog.newTranslog(2); + translog.markCommitted(2); stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(0l)); assertThat(stats.translogSizeInBytes().bytes(), equalTo(17l)); } @Test - public void testSnapshot() throws IOException { - Translog.Snapshot snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + public void testSnapshot() { + ArrayList ops = new ArrayList<>(); + Translog.Snapshot snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.size(0)); snapshot.close(); - translog.add(new Translog.Create("test", "1", new byte[]{1})); - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + addToTranslogAndList(translog, ops, new Translog.Create("test", "1", new byte[]{1})); + + snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); snapshot.close(); - snapshot = translog.snapshot(); - Translog.Create create = (Translog.Create) snapshot.next(); - assertThat(create != null, equalTo(true)); - assertThat(create.source().toBytes(), equalTo(new byte[]{1})); - snapshot.close(); + snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); + assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); - Translog.Snapshot snapshot1 = translog.snapshot(); - assertThat(snapshot1, TranslogSizeMatcher.translogSize(1)); + // snapshot while another is open + Translog.Snapshot snapshot1 = translog.newSnapshot(); + assertThat(snapshot1, SnapshotMatchers.size(1)); assertThat(snapshot1.estimatedTotalOperations(), equalTo(1)); - // seek to the end of the translog snapshot - while (snapshot1.next() != null) { - // spin - } - - translog.add(new Translog.Index("test", "2", new byte[]{2})); - snapshot = translog.snapshot(snapshot1); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); - assertThat(snapshot.estimatedTotalOperations(), equalTo(2)); - snapshot.close(); - - snapshot = translog.snapshot(snapshot1); - Translog.Index index = (Translog.Index) snapshot.next(); - assertThat(index != null, equalTo(true)); - assertThat(index.source().toBytes(), equalTo(new byte[]{2})); - assertThat(snapshot.next(), equalTo(null)); - assertThat(snapshot.estimatedTotalOperations(), equalTo(2)); snapshot.close(); snapshot1.close(); } @Test public void testSnapshotWithNewTranslog() throws IOException { - Translog.Snapshot snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + ArrayList ops = new ArrayList<>(); + Translog.Snapshot snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.size(0)); snapshot.close(); - translog.add(new Translog.Create("test", "1", new byte[]{1})); - Translog.Snapshot actualSnapshot = translog.snapshot(); + addToTranslogAndList(translog, ops, new Translog.Create("test", "1", new byte[]{1})); + Translog.Snapshot snapshot1 = translog.newSnapshot(); - translog.add(new Translog.Index("test", "2", new byte[]{2})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "2", new byte[]{2})); - translog.newTranslog(2); + translog.newTranslog(); - translog.add(new Translog.Index("test", "3", new byte[]{3})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "3", new byte[]{3})); - snapshot = translog.snapshot(actualSnapshot); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); - snapshot.close(); + Translog.Snapshot snapshot2 = translog.newSnapshot(); + assertThat(snapshot2, SnapshotMatchers.equalsTo(ops)); + assertThat(snapshot2.estimatedTotalOperations(), equalTo(ops.size())); - snapshot = translog.snapshot(actualSnapshot); - Translog.Index index = (Translog.Index) snapshot.next(); - assertThat(index != null, equalTo(true)); - assertThat(index.source().toBytes(), equalTo(new byte[]{3})); - assertThat(snapshot.next(), equalTo(null)); - actualSnapshot.close(); - snapshot.close(); + assertThat(snapshot1, SnapshotMatchers.equalsTo(ops.get(0))); + snapshot1.close(); + snapshot2.close(); } public void testSnapshotOnClosedTranslog() throws IOException { @@ -337,106 +318,60 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase translog.add(new Translog.Create("test", "1", new byte[]{1})); translog.close(); try { - Translog.Snapshot snapshot = translog.snapshot(); + Translog.Snapshot snapshot = translog.newSnapshot(); fail("translog is closed"); } catch (TranslogException ex) { - assertEquals(ex.getMessage(), "current translog is already closed"); + assertThat(ex.getMessage(), containsString("can't increment channel")); } } @Test - public void deleteOnRollover() throws IOException { - translog.add(new Translog.Create("test", "1", new byte[]{1})); + public void deleteOnSnapshotRelease() throws Exception { + ArrayList firstOps = new ArrayList<>(); + addToTranslogAndList(translog, firstOps, new Translog.Create("test", "1", new byte[]{1})); - Translog.Snapshot firstSnapshot = translog.snapshot(); - assertThat(firstSnapshot, TranslogSizeMatcher.translogSize(1)); + Translog.Snapshot firstSnapshot = translog.newSnapshot(); assertThat(firstSnapshot.estimatedTotalOperations(), equalTo(1)); - translog.newTransientTranslog(2); + translog.newTranslog(); + translog.markCommitted(translog.currentId()); assertFileIsPresent(translog, 1); - translog.add(new Translog.Index("test", "2", new byte[]{2})); - assertThat(firstSnapshot, TranslogSizeMatcher.translogSize(1)); + ArrayList secOps = new ArrayList<>(); + addToTranslogAndList(translog, secOps, new Translog.Index("test", "2", new byte[]{2})); assertThat(firstSnapshot.estimatedTotalOperations(), equalTo(1)); - if (randomBoolean()) { - translog.clearUnreferenced(); - } - translog.makeTransientCurrent(); - Translog.Snapshot secondSnapshot = translog.snapshot(); + + Translog.Snapshot secondSnapshot = translog.newSnapshot(); translog.add(new Translog.Index("test", "3", new byte[]{3})); - assertThat(secondSnapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(secondSnapshot, SnapshotMatchers.equalsTo(secOps)); assertThat(secondSnapshot.estimatedTotalOperations(), equalTo(1)); assertFileIsPresent(translog, 1); assertFileIsPresent(translog, 2); - if (randomBoolean()) { - translog.clearUnreferenced(); - } + firstSnapshot.close(); assertFileDeleted(translog, 1); assertFileIsPresent(translog, 2); secondSnapshot.close(); assertFileIsPresent(translog, 2); // it's the current nothing should be deleted - if (randomBoolean()) { - translog.clearUnreferenced(); - } - translog.newTransientTranslog(3); - translog.makeTransientCurrent(); - if (randomBoolean()) { - translog.clearUnreferenced(); - } + translog.newTranslog(); + translog.markCommitted(translog.currentId()); assertFileIsPresent(translog, 3); // it's the current nothing should be deleted assertFileDeleted(translog, 2); - assertEquals(3, translog.findLargestPresentTranslogId()); - - translog.newTransientTranslog(4); - translog.revertTransient(); - assertFileIsPresent(translog, 3); // it's the current nothing should be deleted - assertFileDeleted(translog, 4); } - public void assertFileIsPresent(Translog translog, long id) { - if(Files.exists(translog.location().resolve(translog.getFilename(id)))) { + + public void assertFileIsPresent(FsTranslog translog, long id) { + if (Files.exists(translogDir.resolve(translog.getFilename(id)))) { return; } fail(translog.getFilename(id) + " is not present in any location: " + translog.location()); } - public void assertFileDeleted(Translog translog, long id) { - assertFalse(Files.exists(translog.location().resolve(translog.getFilename(id)))); + public void assertFileDeleted(FsTranslog translog, long id) { + assertFalse("translog [" + id + "] still exists", Files.exists(translog.location().resolve(translog.getFilename(id)))); } - @Test - public void testSnapshotWithSeekTo() throws IOException { - Translog.Snapshot snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); - snapshot.close(); - - translog.add(new Translog.Create("test", "1", new byte[]{1})); - snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); - // seek to the end of the translog snapshot - while (snapshot.next() != null) { - // spin - } - long lastPosition = snapshot.position(); - snapshot.close(); - - translog.add(new Translog.Create("test", "2", new byte[]{1})); - snapshot = translog.snapshot(); - snapshot.seekTo(lastPosition); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); - snapshot.close(); - - snapshot = translog.snapshot(); - snapshot.seekTo(lastPosition); - Translog.Create create = (Translog.Create) snapshot.next(); - assertThat(create != null, equalTo(true)); - assertThat(create.id(), equalTo("2")); - snapshot.close(); - } - - static class LocationOperation { final Translog.Operation operation; final Translog.Location location; @@ -445,6 +380,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase this.operation = operation; this.location = location; } + } @Test @@ -482,11 +418,8 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase randomFrom(VersionType.values())); break; case DELETE_BY_QUERY: - op = new Translog.DeleteByQuery( - new BytesArray(randomRealisticUnicodeOfLengthBetween(10, 400).getBytes("UTF-8")), - new String[]{randomRealisticUnicodeOfLengthBetween(10, 400)}, - "test"); - break; + // deprecated + continue; default: throw new ElasticsearchException("not supported op type"); } @@ -544,14 +477,6 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase assertEquals(expDelOp.version(), delOp.version()); assertEquals(expDelOp.versionType(), delOp.versionType()); break; - case DELETE_BY_QUERY: - Translog.DeleteByQuery delQueryOp = (Translog.DeleteByQuery) op; - Translog.DeleteByQuery expDelQueryOp = (Translog.DeleteByQuery) expectedOp; - assertThat(expDelQueryOp.source(), equalTo(delQueryOp.source())); - assertThat(expDelQueryOp.filteringAliases(), equalTo(delQueryOp.filteringAliases())); - assertThat(expDelQueryOp.types(), equalTo(delQueryOp.types())); - break; - default: throw new ElasticsearchException("unsupported opType"); } @@ -655,10 +580,11 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase @Test public void testVerifyTranslogIsNotDeleted() throws IOException { - assertTrue(Files.exists(translogDir.resolve("translog-1"))); + assertFileIsPresent(translog, 1); translog.add(new Translog.Create("test", "1", new byte[]{1})); - Translog.Snapshot snapshot = translog.snapshot(); - assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + Translog.Snapshot snapshot = translog.newSnapshot(); + assertThat(snapshot, SnapshotMatchers.size(1)); + assertFileIsPresent(translog, 1); assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); if (randomBoolean()) { translog.close(); @@ -668,6 +594,181 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase translog.close(); } - assertTrue(Files.exists(translogDir.resolve("translog-1"))); + assertFileIsPresent(translog, 1); } + + /** Tests that concurrent readers and writes maintain view and snapshot semantics */ + @Test + public void testConcurrentWriteViewsAndSnapshot() throws Throwable { + final Thread[] writers = new Thread[randomIntBetween(1, 10)]; + final Thread[] readers = new Thread[randomIntBetween(1, 10)]; + final int flushEveryOps = randomIntBetween(5, 100); + // used to notify main thread that so many operations have been written so it can simulate a flush + final AtomicReference writtenOpsLatch = new AtomicReference<>(new CountDownLatch(0)); + final AtomicLong idGenerator = new AtomicLong(); + final CyclicBarrier barrier = new CyclicBarrier(writers.length + readers.length + 1); + + // a map of all written ops and their returned location. + final Map writtenOps = ConcurrentCollections.newConcurrentMap(); + + // a signal for all threads to stop + final AtomicBoolean run = new AtomicBoolean(true); + + // any errors on threads + final List errors = new CopyOnWriteArrayList<>(); + logger.debug("using [{}] readers. [{}] writers. flushing every ~[{}] ops.", readers.length, writers.length, flushEveryOps); + for (int i = 0; i < writers.length; i++) { + final String threadId = "writer_" + i; + writers[i] = new Thread(new AbstractRunnable() { + @Override + public void doRun() throws BrokenBarrierException, InterruptedException { + barrier.await(); + int counter = 0; + while (run.get()) { + long id = idGenerator.incrementAndGet(); + final Translog.Operation op; + switch (Translog.Operation.Type.values()[((int) (id % Translog.Operation.Type.values().length))]) { + case CREATE: + op = new Translog.Create("type", "" + id, new byte[]{(byte) id}); + break; + case SAVE: + op = new Translog.Index("type", "" + id, new byte[]{(byte) id}); + break; + case DELETE: + op = new Translog.Delete(newUid("" + id)); + break; + case DELETE_BY_QUERY: + // deprecated + continue; + default: + throw new ElasticsearchException("unknown type"); + } + Translog.Location location = translog.add(op); + Translog.Location existing = writtenOps.put(op, location); + if (existing != null) { + fail("duplicate op [" + op + "], old entry at " + location); + } + writtenOpsLatch.get().countDown(); + counter++; + } + logger.debug("--> [{}] done. wrote [{}] ops.", threadId, counter); + } + + @Override + public void onFailure(Throwable t) { + logger.error("--> writer [{}] had an error", t, threadId); + errors.add(t); + } + }, threadId); + writers[i].start(); + } + + for (int i = 0; i < readers.length; i++) { + final String threadId = "reader_" + i; + readers[i] = new Thread(new AbstractRunnable() { + Translog.View view = null; + Set writtenOpsAtView; + + @Override + public void onFailure(Throwable t) { + logger.error("--> reader [{}] had an error", t, threadId); + errors.add(t); + closeView(); + } + + void closeView() { + if (view != null) { + view.close(); + } + } + + void newView() { + closeView(); + view = translog.newView(); + // captures the currently written ops so we know what to expect from the view + writtenOpsAtView = new HashSet<>(writtenOps.keySet()); + logger.debug("--> [{}] opened view from [{}]", threadId, view.minTranslogId()); + } + + @Override + protected void doRun() throws Exception { + barrier.await(); + int iter = 0; + while (run.get()) { + if (iter++ % 10 == 0) { + newView(); + } + + // captures al views that are written since the view was created (with a small caveat see bellow) + // these are what we expect the snapshot to return (and potentially some more). + Set expectedOps = new HashSet<>(writtenOps.keySet()); + expectedOps.removeAll(writtenOpsAtView); + try (Translog.Snapshot snapshot = view.snapshot()) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + expectedOps.remove(op); + } + } + if (expectedOps.isEmpty() == false) { + StringBuilder missed = new StringBuilder("missed ").append(expectedOps.size()).append(" operations"); + boolean failed = false; + for (Translog.Operation op : expectedOps) { + final Translog.Location loc = writtenOps.get(op); + if (loc.translogId < view.minTranslogId()) { + // writtenOps is only updated after the op was written to the translog. This mean + // that ops written to the translog before the view was taken (and will be missing from the view) + // may yet be available in writtenOpsAtView, meaning we will erroneously expect them + continue; + } + failed = true; + missed.append("\n --> [").append(op).append("] written at ").append(loc); + } + if (failed) { + fail(missed.toString()); + } + } + // slow down things a bit and spread out testing.. + writtenOpsLatch.get().await(200, TimeUnit.MILLISECONDS); + } + closeView(); + logger.debug("--> [{}] done. tested [{}] snapshots", threadId, iter); + } + }, threadId); + readers[i].start(); + } + + barrier.await(); + try { + long previousId = translog.currentId(); + for (int iterations = scaledRandomIntBetween(10, 200); iterations > 0 && errors.isEmpty(); iterations--) { + writtenOpsLatch.set(new CountDownLatch(flushEveryOps)); + while (writtenOpsLatch.get().await(200, TimeUnit.MILLISECONDS) == false) { + if (errors.size() > 0) { + break; + } + } + long newId = translog.newTranslog(); + translog.markCommitted(previousId); + previousId = newId; + } + } finally { + run.set(false); + logger.debug("--> waiting for threads to stop"); + for (Thread thread : writers) { + thread.join(); + } + for (Thread thread : readers) { + thread.join(); + } + if (errors.size() > 0) { + Throwable e = errors.get(0); + for (Throwable suppress : errors.subList(1, errors.size())) { + e.addSuppressed(suppress); + } + throw e; + } + logger.info("--> test done. total ops written [{}]", writtenOps.size()); + } + } + } diff --git a/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java index 574fac11506..d87c2d53e8d 100644 --- a/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java @@ -20,25 +20,25 @@ package org.elasticsearch.index.translog.fs; import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.index.translog.AbstractSimpleTranslogTests; -import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; -import java.nio.file.Path; /** * */ -public class FsBufferedTranslogTests extends AbstractSimpleTranslogTests { +@TestLogging("index.translog.fs:TRACE") +public class FsBufferedTranslogTests extends AbstractTranslogTests { @Override - protected Translog create(Path translogDir) throws IOException { + protected FsTranslog create() throws IOException { return new FsTranslog(shardId, ImmutableSettings.settingsBuilder() .put("index.translog.fs.type", FsTranslogFile.Type.BUFFERED.name()) .put("index.translog.fs.buffer_size", 10 + randomInt(128 * 1024)) .build(), - translogDir + BigArrays.NON_RECYCLING_INSTANCE, translogDir ); } } diff --git a/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java index 8a3a073d8c0..8ee569fe27c 100644 --- a/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java @@ -20,22 +20,21 @@ package org.elasticsearch.index.translog.fs; import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.index.translog.AbstractSimpleTranslogTests; -import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; -import java.nio.file.Path; /** * */ -public class FsSimpleTranslogTests extends AbstractSimpleTranslogTests { +@TestLogging("index.translog.fs:TRACE") +public class FsSimpleTranslogTests extends AbstractTranslogTests { @Override - protected Translog create(Path translogDir) throws IOException { + protected FsTranslog create() throws IOException { return new FsTranslog(shardId, ImmutableSettings.settingsBuilder().put("index.translog.fs.type", FsTranslogFile.Type.SIMPLE.name()).build(), - translogDir); + BigArrays.NON_RECYCLING_INSTANCE, translogDir); } - -} +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java b/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java index 304940e8141..742f5d1615e 100644 --- a/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java +++ b/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java @@ -79,17 +79,6 @@ public class HunspellServiceTests extends ElasticsearchIntegrationTest { assertIgnoreCase(true, dictionary); } - @Test - public void testCustomizeLocaleDirectory() throws Exception { - Settings settings = ImmutableSettings.settingsBuilder() - .put(HUNSPELL_LOCATION, getDataPath("/indices/analyze/conf_dir/hunspell")) - .build(); - - internalCluster().startNode(settings); - Dictionary dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US"); - assertThat(dictionary, notNullValue()); - } - @Test public void testDicWithNoAff() throws Exception { Settings settings = ImmutableSettings.settingsBuilder() diff --git a/src/main/java/org/elasticsearch/action/ClientAction.java b/src/test/java/org/elasticsearch/monitor/SigarTests.java similarity index 55% rename from src/main/java/org/elasticsearch/action/ClientAction.java rename to src/test/java/org/elasticsearch/monitor/SigarTests.java index a38b8dad1e0..f582c2fdc1c 100644 --- a/src/main/java/org/elasticsearch/action/ClientAction.java +++ b/src/test/java/org/elasticsearch/monitor/SigarTests.java @@ -17,17 +17,26 @@ * under the License. */ -package org.elasticsearch.action; +package org.elasticsearch.monitor; -import org.elasticsearch.client.Client; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.hyperic.sigar.Sigar; -/** - * Action (used with {@link Client} API). - */ -public abstract class ClientAction> - extends Action { - - protected ClientAction(String name) { - super(name); +public class SigarTests extends ElasticsearchTestCase { + + @Override + public void setUp() throws Exception { + super.setUp(); + assumeTrue("we can only ensure sigar is working when running from maven", + Boolean.parseBoolean(System.getProperty("tests.maven"))); + } + + public void testSigarLoads() throws Exception { + Sigar.load(); + } + + public void testSigarWorks() throws Exception { + Sigar sigar = new Sigar(); + assertNotNull(sigar.getCpu()); } } diff --git a/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java index 8db6fd4e5c0..25cc99820b6 100644 --- a/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java +++ b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java @@ -44,11 +44,20 @@ public class InternalSettingsPreparerTests extends ElasticsearchTestCase { @Test public void testIgnoreSystemProperties() { - Tuple tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder().put("node.zone", "bar").build(), true); + Settings settings = settingsBuilder() + .put("node.zone", "bar") + .put("path.home", createTempDir().toString()) + .build(); + Tuple tuple = InternalSettingsPreparer.prepareSettings(settings, true); // Should use setting from the system property assertThat(tuple.v1().get("node.zone"), equalTo("foo")); - tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder().put("config.ignore_system_properties", true).put("node.zone", "bar").build(), true); + settings = settingsBuilder() + .put("config.ignore_system_properties", true) + .put("node.zone", "bar") + .put("path.home", createTempDir().toString()) + .build(); + tuple = InternalSettingsPreparer.prepareSettings(settings, true); // Should use setting from the system property assertThat(tuple.v1().get("node.zone"), equalTo("bar")); } diff --git a/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java b/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java index dc58961cc4b..af07a25f3ae 100644 --- a/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java +++ b/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java @@ -33,8 +33,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.*; import org.elasticsearch.client.support.AbstractClient; -import org.elasticsearch.client.support.AbstractClusterAdminClient; -import org.elasticsearch.client.support.AbstractIndicesAdminClient; +import org.elasticsearch.client.support.Headers; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; @@ -110,31 +109,32 @@ public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase { expectedContext.putAll(transportContext); expectedContext.putAll(restContext); - Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders); + try (Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) { - SearchRequest searchRequest = Requests.searchRequest(); - putHeaders(searchRequest, transportHeaders); - putContext(searchRequest, transportContext); - assertHeaders(searchRequest, transportHeaders); - client.search(searchRequest); - assertHeaders(searchRequest, expectedHeaders); - assertContext(searchRequest, expectedContext); + SearchRequest searchRequest = Requests.searchRequest(); + putHeaders(searchRequest, transportHeaders); + putContext(searchRequest, transportContext); + assertHeaders(searchRequest, transportHeaders); + client.search(searchRequest); + assertHeaders(searchRequest, expectedHeaders); + assertContext(searchRequest, expectedContext); - GetRequest getRequest = Requests.getRequest("index"); - putHeaders(getRequest, transportHeaders); - putContext(getRequest, transportContext); - assertHeaders(getRequest, transportHeaders); - client.get(getRequest); - assertHeaders(getRequest, expectedHeaders); - assertContext(getRequest, expectedContext); + GetRequest getRequest = Requests.getRequest("index"); + putHeaders(getRequest, transportHeaders); + putContext(getRequest, transportContext); + assertHeaders(getRequest, transportHeaders); + client.get(getRequest); + assertHeaders(getRequest, expectedHeaders); + assertContext(getRequest, expectedContext); - IndexRequest indexRequest = Requests.indexRequest(); - putHeaders(indexRequest, transportHeaders); - putContext(indexRequest, transportContext); - assertHeaders(indexRequest, transportHeaders); - client.index(indexRequest); - assertHeaders(indexRequest, expectedHeaders); - assertContext(indexRequest, expectedContext); + IndexRequest indexRequest = Requests.indexRequest(); + putHeaders(indexRequest, transportHeaders); + putContext(indexRequest, transportContext); + assertHeaders(indexRequest, transportHeaders); + client.index(indexRequest); + assertHeaders(indexRequest, expectedHeaders); + assertContext(indexRequest, expectedContext); + } } @Test @@ -155,31 +155,32 @@ public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase { expectedContext.putAll(transportContext); expectedContext.putAll(restContext); - Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, expectedContext), usefulRestHeaders); + try (Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, expectedContext), usefulRestHeaders)) { - ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(); - putHeaders(clusterHealthRequest, transportHeaders); - putContext(clusterHealthRequest, transportContext); - assertHeaders(clusterHealthRequest, transportHeaders); - client.admin().cluster().health(clusterHealthRequest); - assertHeaders(clusterHealthRequest, expectedHeaders); - assertContext(clusterHealthRequest, expectedContext); + ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(); + putHeaders(clusterHealthRequest, transportHeaders); + putContext(clusterHealthRequest, transportContext); + assertHeaders(clusterHealthRequest, transportHeaders); + client.admin().cluster().health(clusterHealthRequest); + assertHeaders(clusterHealthRequest, expectedHeaders); + assertContext(clusterHealthRequest, expectedContext); - ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest(); - putHeaders(clusterStateRequest, transportHeaders); - putContext(clusterStateRequest, transportContext); - assertHeaders(clusterStateRequest, transportHeaders); - client.admin().cluster().state(clusterStateRequest); - assertHeaders(clusterStateRequest, expectedHeaders); - assertContext(clusterStateRequest, expectedContext); + ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest(); + putHeaders(clusterStateRequest, transportHeaders); + putContext(clusterStateRequest, transportContext); + assertHeaders(clusterStateRequest, transportHeaders); + client.admin().cluster().state(clusterStateRequest); + assertHeaders(clusterStateRequest, expectedHeaders); + assertContext(clusterStateRequest, expectedContext); - ClusterStatsRequest clusterStatsRequest = Requests.clusterStatsRequest(); - putHeaders(clusterStatsRequest, transportHeaders); - putContext(clusterStatsRequest, transportContext); - assertHeaders(clusterStatsRequest, transportHeaders); - client.admin().cluster().clusterStats(clusterStatsRequest); - assertHeaders(clusterStatsRequest, expectedHeaders); - assertContext(clusterStatsRequest, expectedContext); + ClusterStatsRequest clusterStatsRequest = Requests.clusterStatsRequest(); + putHeaders(clusterStatsRequest, transportHeaders); + putContext(clusterStatsRequest, transportContext); + assertHeaders(clusterStatsRequest, transportHeaders); + client.admin().cluster().clusterStats(clusterStatsRequest); + assertHeaders(clusterStatsRequest, expectedHeaders); + assertContext(clusterStatsRequest, expectedContext); + } } @Test @@ -200,31 +201,32 @@ public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase { expectedContext.putAll(transportContext); expectedContext.putAll(restContext); - Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders); + try (Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) { - CreateIndexRequest createIndexRequest = Requests.createIndexRequest("test"); - putHeaders(createIndexRequest, transportHeaders); - putContext(createIndexRequest, transportContext); - assertHeaders(createIndexRequest, transportHeaders); - client.admin().indices().create(createIndexRequest); - assertHeaders(createIndexRequest, expectedHeaders); - assertContext(createIndexRequest, expectedContext); + CreateIndexRequest createIndexRequest = Requests.createIndexRequest("test"); + putHeaders(createIndexRequest, transportHeaders); + putContext(createIndexRequest, transportContext); + assertHeaders(createIndexRequest, transportHeaders); + client.admin().indices().create(createIndexRequest); + assertHeaders(createIndexRequest, expectedHeaders); + assertContext(createIndexRequest, expectedContext); - CloseIndexRequest closeIndexRequest = Requests.closeIndexRequest("test"); - putHeaders(closeIndexRequest, transportHeaders); - putContext(closeIndexRequest, transportContext); - assertHeaders(closeIndexRequest, transportHeaders); - client.admin().indices().close(closeIndexRequest); - assertHeaders(closeIndexRequest, expectedHeaders); - assertContext(closeIndexRequest, expectedContext); + CloseIndexRequest closeIndexRequest = Requests.closeIndexRequest("test"); + putHeaders(closeIndexRequest, transportHeaders); + putContext(closeIndexRequest, transportContext); + assertHeaders(closeIndexRequest, transportHeaders); + client.admin().indices().close(closeIndexRequest); + assertHeaders(closeIndexRequest, expectedHeaders); + assertContext(closeIndexRequest, expectedContext); - FlushRequest flushRequest = Requests.flushRequest(); - putHeaders(flushRequest, transportHeaders); - putContext(flushRequest, transportContext); - assertHeaders(flushRequest, transportHeaders); - client.admin().indices().flush(flushRequest); - assertHeaders(flushRequest, expectedHeaders); - assertContext(flushRequest, expectedContext); + FlushRequest flushRequest = Requests.flushRequest(); + putHeaders(flushRequest, transportHeaders); + putContext(flushRequest, transportContext); + assertHeaders(flushRequest, transportHeaders); + client.admin().indices().flush(flushRequest); + assertHeaders(flushRequest, expectedHeaders); + assertContext(flushRequest, expectedContext); + } } @Test @@ -245,25 +247,26 @@ public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase { expectedContext.putAll(transportContext); expectedContext.putAll(restContext); - Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders); + try (Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) { - ActionRequestBuilder requestBuilders [] = new ActionRequestBuilder[] { - client.prepareIndex("index", "type"), - client.prepareGet("index", "type", "id"), - client.prepareBulk(), - client.prepareDelete(), - client.prepareIndex(), - client.prepareClearScroll(), - client.prepareMultiGet(), - }; + ActionRequestBuilder requestBuilders[] = new ActionRequestBuilder[]{ + client.prepareIndex("index", "type"), + client.prepareGet("index", "type", "id"), + client.prepareBulk(), + client.prepareDelete(), + client.prepareIndex(), + client.prepareClearScroll(), + client.prepareMultiGet(), + }; - for (ActionRequestBuilder requestBuilder : requestBuilders) { - putHeaders(requestBuilder.request(), transportHeaders); - putContext(requestBuilder.request(), transportContext); - assertHeaders(requestBuilder.request(), transportHeaders); - requestBuilder.get(); - assertHeaders(requestBuilder.request(), expectedHeaders); - assertContext(requestBuilder.request(), expectedContext); + for (ActionRequestBuilder requestBuilder : requestBuilders) { + putHeaders(requestBuilder.request(), transportHeaders); + putContext(requestBuilder.request(), transportContext); + assertHeaders(requestBuilder.request(), transportHeaders); + requestBuilder.get(); + assertHeaders(requestBuilder.request(), expectedHeaders); + assertContext(requestBuilder.request(), expectedContext); + } } } @@ -285,24 +288,25 @@ public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase { expectedContext.putAll(transportContext); expectedContext.putAll(restContext); - Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders); + try (Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) { - ActionRequestBuilder requestBuilders [] = new ActionRequestBuilder[] { - client.admin().cluster().prepareNodesInfo(), - client.admin().cluster().prepareClusterStats(), - client.admin().cluster().prepareState(), - client.admin().cluster().prepareCreateSnapshot("repo", "name"), - client.admin().cluster().prepareHealth(), - client.admin().cluster().prepareReroute() - }; + ActionRequestBuilder requestBuilders[] = new ActionRequestBuilder[]{ + client.admin().cluster().prepareNodesInfo(), + client.admin().cluster().prepareClusterStats(), + client.admin().cluster().prepareState(), + client.admin().cluster().prepareCreateSnapshot("repo", "name"), + client.admin().cluster().prepareHealth(), + client.admin().cluster().prepareReroute() + }; - for (ActionRequestBuilder requestBuilder : requestBuilders) { - putHeaders(requestBuilder.request(), transportHeaders); - putContext(requestBuilder.request(), transportContext); - assertHeaders(requestBuilder.request(), transportHeaders); - requestBuilder.get(); - assertHeaders(requestBuilder.request(), expectedHeaders); - assertContext(requestBuilder.request(), expectedContext); + for (ActionRequestBuilder requestBuilder : requestBuilders) { + putHeaders(requestBuilder.request(), transportHeaders); + putContext(requestBuilder.request(), transportContext); + assertHeaders(requestBuilder.request(), transportHeaders); + requestBuilder.get(); + assertHeaders(requestBuilder.request(), expectedHeaders); + assertContext(requestBuilder.request(), expectedContext); + } } } @@ -324,25 +328,26 @@ public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase { expectedContext.putAll(transportContext); expectedContext.putAll(restContext); - Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders); + try (Client client = client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) { - ActionRequestBuilder requestBuilders [] = new ActionRequestBuilder[] { - client.admin().indices().prepareValidateQuery(), - client.admin().indices().prepareCreate("test"), - client.admin().indices().prepareAliases(), - client.admin().indices().prepareAnalyze("text"), - client.admin().indices().prepareDeleteWarmer(), - client.admin().indices().prepareTypesExists("type"), - client.admin().indices().prepareClose() - }; + ActionRequestBuilder requestBuilders[] = new ActionRequestBuilder[]{ + client.admin().indices().prepareValidateQuery(), + client.admin().indices().prepareCreate("test"), + client.admin().indices().prepareAliases(), + client.admin().indices().prepareAnalyze("text"), + client.admin().indices().prepareDeleteWarmer(), + client.admin().indices().prepareTypesExists("type"), + client.admin().indices().prepareClose() + }; - for (ActionRequestBuilder requestBuilder : requestBuilders) { - putHeaders(requestBuilder.request(), transportHeaders); - putContext(requestBuilder.request(), transportContext); - assertHeaders(requestBuilder.request(), transportHeaders); - requestBuilder.get(); - assertHeaders(requestBuilder.request(), expectedHeaders); - assertContext(requestBuilder.request(), expectedContext); + for (ActionRequestBuilder requestBuilder : requestBuilders) { + putHeaders(requestBuilder.request(), transportHeaders); + putContext(requestBuilder.request(), transportContext); + assertHeaders(requestBuilder.request(), transportHeaders); + requestBuilder.get(); + assertHeaders(requestBuilder.request(), expectedHeaders); + assertContext(requestBuilder.request(), expectedContext); + } } } @@ -417,76 +422,24 @@ public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase { } } - private static class NoOpClient extends AbstractClient implements AdminClient { + private class NoOpClient extends AbstractClient { - @Override - public AdminClient admin() { - return this; + public NoOpClient() { + super(ImmutableSettings.EMPTY, new ThreadPool(getTestName()), Headers.EMPTY); } @Override - public Settings settings() { - return null; - } - - @Override - public > ActionFuture execute(Action action, Request request) { - return null; - } - - @Override - public > void execute(Action action, Request request, ActionListener listener) { + protected > void doExecute(Action action, Request request, ActionListener listener) { listener.onResponse(null); } - @Override - public ThreadPool threadPool() { - return null; - } - @Override public void close() { - - } - - @Override - public ClusterAdminClient cluster() { - return new AbstractClusterAdminClient() { - @Override - public > ActionFuture execute(Action action, Request request) { - return null; - } - - @Override - public > void execute(Action action, Request request, ActionListener listener) { - listener.onResponse(null); - } - - @Override - public ThreadPool threadPool() { - return null; - } - }; - } - - @Override - public IndicesAdminClient indices() { - return new AbstractIndicesAdminClient() { - @Override - public > ActionFuture execute(Action action, Request request) { - return null; - } - - @Override - public > void execute(Action action, Request request, ActionListener listener) { - listener.onResponse(null); - } - - @Override - public ThreadPool threadPool() { - return null; - } - }; + try { + terminate(threadPool()); + } catch (Throwable t) { + throw new ElasticsearchException(t.getMessage(), t); + } } } } diff --git a/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/src/test/java/org/elasticsearch/script/NativeScriptTests.java index 951f605801e..04c2e63e020 100644 --- a/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -50,8 +51,10 @@ public class NativeScriptTests extends ElasticsearchTestCase { Settings settings = ImmutableSettings.settingsBuilder() .put("script.native.my.type", MyNativeScriptFactory.class.getName()) .put("name", "testNativeScript") + .put("path.home", createTempDir()) .build(); Injector injector = new ModulesBuilder().add( + new EnvironmentModule(new Environment(settings)), new ThreadPoolModule(settings), new SettingsModule(settings), new ScriptModule(settings)).createInjector(); @@ -73,7 +76,7 @@ public class NativeScriptTests extends ElasticsearchTestCase { String scriptContext = randomFrom(ScriptContext.Standard.values()).getKey(); builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + scriptContext, randomFrom(ScriptMode.values())); } - Settings settings = builder.build(); + Settings settings = builder.put("path.home", createTempDir()).build(); Environment environment = new Environment(settings); ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, null); Map nativeScriptFactoryMap = new HashMap<>(); diff --git a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 69aa8685835..c3f164ab25f 100644 --- a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -69,6 +69,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { public void setup() throws IOException { Path genericConfigFolder = createTempDir(); baseSettings = settingsBuilder() + .put("path.home", createTempDir().toString()) .put("path.conf", genericConfigFolder) .build(); resourceWatcherService = new ResourceWatcherService(baseSettings, null); diff --git a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java index 1f04063a42d..2c8a7ddbc0d 100644 --- a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java +++ b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.script.expression; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -53,7 +52,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { paramsMap.put(params[i].toString(), params[i + 1]); } - SearchRequestBuilder req = new SearchRequestBuilder(client()).setIndices("test"); + SearchRequestBuilder req = client().prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.matchAllQuery()) .addSort(SortBuilders.fieldSort("_uid") .order(SortOrder.ASC)).addScriptField("foo", "expression", script, paramsMap); @@ -86,7 +85,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "doc", "2").setSource("text", "hello hello hello goodbye"), client().prepareIndex("test", "doc", "3").setSource("text", "hello hello goodebye")); ScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction("1 / _score", "expression"); - SearchRequestBuilder req = new SearchRequestBuilder(client()).setIndices("test"); + SearchRequestBuilder req = client().prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode("replace")); req.setSearchType(SearchType.DFS_QUERY_THEN_FETCH); // make sure DF is consistent SearchResponse rsp = req.get(); @@ -274,7 +273,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "doc", "2").setSource("x", 10, "y", 1.4), client().prepareIndex("test", "doc", "3").setSource("x", 13, "y", 1.8)); - SearchRequestBuilder req = new SearchRequestBuilder(client()).setIndices("test"); + SearchRequestBuilder req = client().prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.matchAllQuery()) .addAggregation(AggregationBuilders.stats("int_agg").field("x").script("_value * 3").lang(ExpressionScriptEngineService.NAME)) .addAggregation(AggregationBuilders.stats("double_agg").field("y").script("_value - 1.1").lang(ExpressionScriptEngineService.NAME)); @@ -300,7 +299,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "doc", "2").setSource("text", "goodbye"), client().prepareIndex("test", "doc", "3").setSource("text", "hello")); - SearchRequestBuilder req = new SearchRequestBuilder(client()).setIndices("test"); + SearchRequestBuilder req = client().prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.matchAllQuery()) .addAggregation(AggregationBuilders.terms("term_agg").field("text").script("_value").lang(ExpressionScriptEngineService.NAME)); diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java index cf25345aa93..74b8849e790 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.util.BigArray; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -38,7 +37,6 @@ import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregat import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.util.ArrayList; @@ -51,7 +49,10 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.global; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; @@ -407,6 +408,10 @@ public class GeoBoundsTests extends ElasticsearchIntegrationTest { GeoBounds geoBounds = bucket.getAggregations().get("geoBounds"); assertThat(geoBounds, notNullValue()); assertThat(geoBounds.getName(), equalTo("geoBounds")); + assertThat(geoBounds.topLeft().getLat(), allOf(greaterThanOrEqualTo(-90.0), lessThanOrEqualTo(90.0))); + assertThat(geoBounds.topLeft().getLon(), allOf(greaterThanOrEqualTo(-180.0), lessThanOrEqualTo(180.0))); + assertThat(geoBounds.bottomRight().getLat(), allOf(greaterThanOrEqualTo(-90.0), lessThanOrEqualTo(90.0))); + assertThat(geoBounds.bottomRight().getLon(), allOf(greaterThanOrEqualTo(-180.0), lessThanOrEqualTo(180.0))); } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index 77b7c8bc208..b87de1cbb8a 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.reducers.moving.avg; import com.google.common.collect.EvictingQueue; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; @@ -59,13 +60,12 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.min; import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.movingAvg; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.*; import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; @ElasticsearchIntegrationTest.SuiteScopeTest +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/10972") public class MovAvgTests extends ElasticsearchIntegrationTest { private static final String INTERVAL_FIELD = "l_value"; @@ -308,7 +308,6 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { * test simple moving average on single value field */ @Test - @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") public void simpleSingleValuedField() { SearchResponse response = client() @@ -361,7 +360,6 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } @Test - @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") public void linearSingleValuedField() { SearchResponse response = client() @@ -414,7 +412,6 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } @Test - @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") public void singleSingleValuedField() { SearchResponse response = client() @@ -467,7 +464,6 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } @Test - @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") public void doubleSingleValuedField() { SearchResponse response = client() @@ -1038,7 +1034,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } else { assertThat("[_count] movavg is null", countMovAvg, notNullValue()); assertThat("[_count] movavg does not match expected ["+countMovAvg.value()+" vs "+expectedCount+"]", - Math.abs(countMovAvg.value() - expectedCount) <= 0.000001, equalTo(true)); + countMovAvg.value(), closeTo(expectedCount, 0.1)); } // This is a gap bucket @@ -1047,7 +1043,8 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { assertThat("[value] movavg is not null", valuesMovAvg, Matchers.nullValue()); } else { assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); - assertThat("[value] movavg does not match expected ["+valuesMovAvg.value()+" vs "+expectedValue+"]", Math.abs(valuesMovAvg.value() - expectedValue) <= 0.000001, equalTo(true)); + assertThat("[value] movavg does not match expected ["+valuesMovAvg.value()+" vs "+expectedValue+"]", + valuesMovAvg.value(), closeTo(expectedValue, 0.1)); } } diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java index 0ca85672ec9..37adeca328f 100644 --- a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.snapshots; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableList; - import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase.Slow; @@ -44,7 +43,8 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.*; -import org.elasticsearch.cluster.metadata.SnapshotMetaData.*; +import org.elasticsearch.cluster.metadata.SnapshotMetaData.Entry; +import org.elasticsearch.cluster.metadata.SnapshotMetaData.ShardSnapshotStatus; import org.elasticsearch.cluster.metadata.SnapshotMetaData.State; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -70,10 +70,11 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static com.google.common.collect.Lists.newArrayList; -import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.index.shard.IndexShard.INDEX_REFRESH_INTERVAL; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; -import static org.elasticsearch.index.shard.IndexShard.*; import static org.hamcrest.Matchers.*; @Slow @@ -135,7 +136,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { for (int i = 0; i < 100; i += 2) { client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get(); } - refresh(); + assertAllSuccessful(refresh()); assertHitCount(client.prepareCount("test-idx-1").get(), 50L); assertHitCount(client.prepareCount("test-idx-2").get(), 50L); assertHitCount(client.prepareCount("test-idx-3").get(), 50L); diff --git a/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java b/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java index 86ae03992e2..d11cec0ab0a 100644 --- a/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java +++ b/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java @@ -21,6 +21,7 @@ package org.elasticsearch.stresstest.client; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeBuilder; @@ -38,8 +39,10 @@ public class ClientFailover { for (int i = 0; i < nodes.length; i++) { nodes[i] = NodeBuilder.nodeBuilder().node(); } + + // TODO: what is this? a public static void main test?!?! - final TransportClient client = new TransportClient() + final TransportClient client = TransportClient.builder().build() .addTransportAddress(new InetSocketTransportAddress("localhost", 9300)) .addTransportAddress(new InetSocketTransportAddress("localhost", 9301)) .addTransportAddress(new InetSocketTransportAddress("localhost", 9302)); diff --git a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java index 66239f34347..0426ab647fd 100644 --- a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java +++ b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java @@ -47,8 +47,9 @@ public class ManyIndicesRemoteStressTest { Client client; Node node = null; + // TODO: what is this? a public static void main test?!?!?! if (true) { - client = new TransportClient().addTransportAddress(new InetSocketTransportAddress("localhost", 9300)); + client = TransportClient.builder().settings(ImmutableSettings.EMPTY).build().addTransportAddress(new InetSocketTransportAddress("localhost", 9300)); } else { node = NodeBuilder.nodeBuilder().client(true).node(); client = node.client(); diff --git a/src/test/java/org/elasticsearch/test/BackgroundIndexer.java b/src/test/java/org/elasticsearch/test/BackgroundIndexer.java index 02441008f69..764c85657d7 100644 --- a/src/test/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/src/test/java/org/elasticsearch/test/BackgroundIndexer.java @@ -164,7 +164,7 @@ public class BackgroundIndexer implements AutoCloseable { continue; } id = idGenerator.incrementAndGet(); - client.prepareIndex(index, type, Long.toString(id) + "-" + indexerId).setSource(generateSource(id, threadRandom)).get(); + client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)).get(); indexCounter.incrementAndGet(); } } diff --git a/src/test/java/org/elasticsearch/test/CompositeTestCluster.java b/src/test/java/org/elasticsearch/test/CompositeTestCluster.java index 1e258abfa2a..03f4590daa5 100644 --- a/src/test/java/org/elasticsearch/test/CompositeTestCluster.java +++ b/src/test/java/org/elasticsearch/test/CompositeTestCluster.java @@ -26,9 +26,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.client.ClusterAdminClient; import org.elasticsearch.client.FilterClient; -import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; @@ -292,34 +290,7 @@ public class CompositeTestCluster extends TestCluster { private final class ExternalClient extends FilterClient { public ExternalClient() { - super(null); - } - - @Override - protected Client in() { - return internalClient(); - } - - @Override - public ClusterAdminClient cluster() { - return new ClusterAdmin(null) { - - @Override - protected ClusterAdminClient in() { - return internalClient().admin().cluster(); - } - }; - } - - @Override - public IndicesAdminClient indices() { - return new IndicesAdmin(null) { - - @Override - protected IndicesAdminClient in() { - return internalClient().admin().indices(); - } - }; + super(internalClient()); } @Override @@ -327,5 +298,4 @@ public class CompositeTestCluster extends TestCluster { // never close this client } } - } diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 109bd030023..10ad832eef2 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -648,6 +648,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } ensureClusterSizeConsistency(); ensureClusterStateConsistency(); + cluster().beforeIndexDeletion(); cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete if (afterClass || currentClusterScope == Scope.TEST) { cluster().close(); diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 02c02b2ed6e..a56db25f075 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -93,7 +93,7 @@ import static com.google.common.collect.Lists.newArrayList; public abstract class ElasticsearchTestCase extends LuceneTestCase { static { - SecurityHack.ensureInitialized(); + SecurityBootstrap.ensureInitialized(); } protected final ESLogger logger = Loggers.getLogger(getClass()); @@ -123,7 +123,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { } /** called after a test is finished, but only if succesfull */ - protected void afterIfSuccessful() { + protected void afterIfSuccessful() throws Exception { } // setup mock filesystems for this test run. we change PathUtils diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java index a61fe704867..8374472dba8 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java @@ -43,7 +43,7 @@ import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; public abstract class ElasticsearchTokenStreamTestCase extends BaseTokenStreamTestCase { static { - SecurityHack.ensureInitialized(); + SecurityBootstrap.ensureInitialized(); } public static Version randomVersion() { diff --git a/src/test/java/org/elasticsearch/test/ExternalNode.java b/src/test/java/org/elasticsearch/test/ExternalNode.java index f304b71cd4b..bc520988a68 100644 --- a/src/test/java/org/elasticsearch/test/ExternalNode.java +++ b/src/test/java/org/elasticsearch/test/ExternalNode.java @@ -21,7 +21,6 @@ package org.elasticsearch.test; import com.google.common.base.Predicate; import org.apache.lucene.util.Constants; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.client.Client; @@ -200,7 +199,7 @@ final class ExternalNode implements Closeable { .put("client.transport.nodes_sampler_interval", "1s") .put("name", "transport_client_" + nodeInfo.getNode().name()) .put(ClusterName.SETTING, clusterName).put("client.transport.sniff", false).build(); - TransportClient client = new TransportClient(clientSettings); + TransportClient client = TransportClient.builder().settings(clientSettings).build(); client.addTransportAddress(addr); this.client = client; } diff --git a/src/test/java/org/elasticsearch/test/ExternalTestCluster.java b/src/test/java/org/elasticsearch/test/ExternalTestCluster.java index c0052f24044..cd30b9937fb 100644 --- a/src/test/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/ExternalTestCluster.java @@ -72,7 +72,7 @@ public final class ExternalTestCluster extends TestCluster { .put("client.transport.ignore_cluster_name", true) .put("node.mode", "network").build(); // we require network here! - this.client = new TransportClient(clientSettings).addTransportAddresses(transportAddresses); + this.client = TransportClient.builder().settings(clientSettings).build().addTransportAddresses(transportAddresses); NodesInfoResponse nodeInfos = this.client.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get(); httpAddresses = new InetSocketAddress[nodeInfos.getNodes().length]; diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 4c857c24027..92797894e38 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -26,16 +26,10 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.google.common.base.Predicate; import com.google.common.base.Predicates; -import com.google.common.collect.Collections2; -import com.google.common.collect.Iterables; -import com.google.common.collect.Iterators; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; +import com.google.common.collect.*; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; - import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -80,7 +74,9 @@ import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.cache.filter.none.NoneFilterCache; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardModule; +import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.indices.IndicesService; @@ -112,34 +108,19 @@ import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; +import java.util.*; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import static junit.framework.Assert.fail; -import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; -import static org.apache.lucene.util.LuceneTestCase.rarely; -import static org.apache.lucene.util.LuceneTestCase.usually; +import static org.apache.lucene.util.LuceneTestCase.*; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.node.NodeBuilder.nodeBuilder; import static org.elasticsearch.test.ElasticsearchTestCase.assertBusy; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.*; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; @@ -802,7 +783,7 @@ public final class InternalTestCluster extends TestCluster { /* no sniff client for now - doesn't work will all tests since it might throw NoNodeAvailableException if nodes are shut down. * we first need support of transportClientRatio as annotations or so */ - return transportClient = TransportClientFactory.noSniff(settingsSource.transportClient()).client(node, clusterName); + return transportClient = new TransportClientFactory(false, settingsSource.transportClient(), baseDir).client(node, clusterName); } void resetClient() throws IOException { @@ -855,30 +836,16 @@ public final class InternalTestCluster extends TestCluster { } public static final String TRANSPORT_CLIENT_PREFIX = "transport_client_"; - static class TransportClientFactory { - private static TransportClientFactory NO_SNIFF_CLIENT_FACTORY = new TransportClientFactory(false, ImmutableSettings.EMPTY); - private static TransportClientFactory SNIFF_CLIENT_FACTORY = new TransportClientFactory(true, ImmutableSettings.EMPTY); + static class TransportClientFactory { private final boolean sniff; private final Settings settings; + private final Path baseDir; - public static TransportClientFactory noSniff(Settings settings) { - if (settings == null || settings.names().isEmpty()) { - return NO_SNIFF_CLIENT_FACTORY; - } - return new TransportClientFactory(false, settings); - } - - public static TransportClientFactory sniff(Settings settings) { - if (settings == null || settings.names().isEmpty()) { - return SNIFF_CLIENT_FACTORY; - } - return new TransportClientFactory(true, settings); - } - - TransportClientFactory(boolean sniff, Settings settings) { + TransportClientFactory(boolean sniff, Settings settings, Path baseDir) { this.sniff = sniff; this.settings = settings != null ? settings : ImmutableSettings.EMPTY; + this.baseDir = baseDir; } public Client client(Node node, String clusterName) { @@ -886,6 +853,7 @@ public final class InternalTestCluster extends TestCluster { Settings nodeSettings = node.settings(); Builder builder = settingsBuilder() .put("client.transport.nodes_sampler_interval", "1s") + .put("path.home", baseDir) .put("name", TRANSPORT_CLIENT_PREFIX + node.settings().get("name")) .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false) .put(ClusterName.SETTING, clusterName).put("client.transport.sniff", sniff) @@ -896,7 +864,7 @@ public final class InternalTestCluster extends TestCluster { .put("config.ignore_system_properties", true) .put(settings); - TransportClient client = new TransportClient(builder.build()); + TransportClient client = TransportClient.builder().settings(builder.build()).build(); client.addTransportAddress(addr); return client; } @@ -990,6 +958,26 @@ public final class InternalTestCluster extends TestCluster { randomlyResetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */ } + @Override + public void beforeIndexDeletion() { + assertShardIndexCounter(); + } + + private void assertShardIndexCounter() { + final Collection nodesAndClients = nodes.values(); + for (NodeAndClient nodeAndClient : nodesAndClients) { + IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); + for (IndexService indexService : indexServices) { + for (IndexShard indexShard : indexService) { + assertThat(indexShard.getOperationsCount(), anyOf(equalTo(1), equalTo(0))); + if (indexShard.getOperationsCount() == 0) { + assertThat(indexShard.state(), equalTo(IndexShardState.CLOSED)); + } + } + } + } + } + private void randomlyResetClients() throws IOException { // only reset the clients on nightly tests, it causes heavy load... if (RandomizedTest.isNightly() && rarely(random)) { diff --git a/src/test/java/org/elasticsearch/test/SecurityBootstrap.java b/src/test/java/org/elasticsearch/test/SecurityBootstrap.java new file mode 100644 index 00000000000..d5e050a0d3e --- /dev/null +++ b/src/test/java/org/elasticsearch/test/SecurityBootstrap.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test; + +import org.apache.lucene.util.TestSecurityManager; +import org.elasticsearch.bootstrap.Bootstrap; +import org.elasticsearch.bootstrap.ESPolicy; +import org.elasticsearch.bootstrap.Security; +import org.elasticsearch.common.io.PathUtils; + +import java.security.Permissions; +import java.security.Policy; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean; + +/** + * Installs test security manager (ensures it happens regardless of which + * test case happens to be first, test ordering, etc). + *

+ * The idea is to mimic as much as possible what happens with ES in production + * mode (e.g. assign permissions and install security manager the same way) + */ +class SecurityBootstrap { + + // TODO: can we share more code with the non-test side here + // without making things complex??? + + static { + // just like bootstrap, initialize natives, then SM + Bootstrap.initializeNatives(true, true); + // install security manager if requested + if (systemPropertyAsBoolean("tests.security.manager", false)) { + try { + // initialize tmpdir the same exact way as bootstrap. + Permissions perms = new Permissions(); + Security.addPath(perms, PathUtils.get(System.getProperty("java.io.tmpdir")), "read,readlink,write,delete"); + Policy.setPolicy(new ESPolicy(perms)); + System.setSecurityManager(new TestSecurityManager()); + Security.selfTest(); + } catch (Exception e) { + throw new RuntimeException("unable to install test security manager", e); + } + } + } + + // does nothing, just easy way to make sure the class is loaded. + static void ensureInitialized() {} +} diff --git a/src/test/java/org/elasticsearch/test/SecurityHack.java b/src/test/java/org/elasticsearch/test/SecurityHack.java deleted file mode 100644 index 092eda99f7d..00000000000 --- a/src/test/java/org/elasticsearch/test/SecurityHack.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test; - -import org.apache.lucene.util.TestSecurityManager; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean; - -/** - * Installs test security manager (ensures it happens regardless of which - * test case happens to be first, test ordering, etc). - *

- * Note that this is BS, this should be done by the jvm (by passing -Djava.security.manager). - * turning it on/off needs to be the role of maven, not this stuff. - */ -class SecurityHack { - - static { - // for IDEs, we check that security.policy is set - if (systemPropertyAsBoolean("tests.security.manager", true) && - System.getProperty("java.security.policy") != null) { - System.setSecurityManager(new TestSecurityManager()); - } - } - - // does nothing, just easy way to make sure the class is loaded. - static void ensureInitialized() {} -} diff --git a/src/test/java/org/elasticsearch/test/TestCluster.java b/src/test/java/org/elasticsearch/test/TestCluster.java index c8d48521b14..a1f5f016a8d 100644 --- a/src/test/java/org/elasticsearch/test/TestCluster.java +++ b/src/test/java/org/elasticsearch/test/TestCluster.java @@ -76,6 +76,12 @@ public abstract class TestCluster implements Iterable, Closeable { wipeRepositories(); } + /** + * Assertions that should run before the cluster is wiped should be called in this method + */ + public void beforeIndexDeletion() { + } + /** * This method checks all the things that need to be checked after each test */ diff --git a/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java b/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java index 160bf26ce19..0adb6380e77 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java +++ b/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.test.engine; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.translog.fs.FsTranslog; /** * diff --git a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java index e81009c4979..7c7bd96a31f 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java +++ b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java @@ -18,21 +18,24 @@ */ package org.elasticsearch.test.engine; -import org.apache.lucene.search.AssertingIndexSearcher; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SearcherManager; -import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.translog.fs.FsTranslog; import java.io.IOException; final class MockInternalEngine extends InternalEngine { private MockEngineSupport support; + private final boolean randomizeFlushOnClose; + MockInternalEngine(EngineConfig config, boolean skipInitialTranslogRecovery) throws EngineException { super(config, skipInitialTranslogRecovery); + randomizeFlushOnClose = IndexMetaData.isOnSharedFilesystem(config.getIndexSettings()) == false; } private synchronized MockEngineSupport support() { @@ -45,7 +48,7 @@ final class MockInternalEngine extends InternalEngine { @Override public void close() throws IOException { - switch(support().flushOrClose(this, MockEngineSupport.CloseAction.CLOSE)) { + switch (support().flushOrClose(this, MockEngineSupport.CloseAction.CLOSE)) { case FLUSH_AND_CLOSE: super.flushAndClose(); break; @@ -53,21 +56,22 @@ final class MockInternalEngine extends InternalEngine { super.close(); break; } - logger.debug("Ongoing recoveries after engine close: " + onGoingRecoveries.get()); - } @Override public void flushAndClose() throws IOException { - switch(support().flushOrClose(this, MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) { - case FLUSH_AND_CLOSE: - super.flushAndClose(); - break; - case CLOSE: - super.close(); - break; + if (randomizeFlushOnClose) { + switch (support().flushOrClose(this, MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) { + case FLUSH_AND_CLOSE: + super.flushAndClose(); + break; + case CLOSE: + super.close(); + break; + } + } else { + super.flushAndClose(); } - logger.debug("Ongoing recoveries after engine close: " + onGoingRecoveries.get()); } @Override diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 7a822e163cb..ab59da837e4 100644 --- a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -90,7 +90,7 @@ import static org.hamcrest.Matchers.*; */ public class ElasticsearchAssertions { - public static void assertAcked(AcknowledgedRequestBuilder builder) { + public static void assertAcked(AcknowledgedRequestBuilder builder) { assertAcked(builder.get()); } @@ -501,14 +501,14 @@ public class ElasticsearchAssertions { /** * Run the request from a given builder and check that it throws an exception of the right type */ - public static void assertThrows(ActionRequestBuilder builder, Class exceptionClass) { + public static void assertThrows(ActionRequestBuilder builder, Class exceptionClass) { assertThrows(builder.execute(), exceptionClass); } /** * Run the request from a given builder and check that it throws an exception of the right type, with a given {@link org.elasticsearch.rest.RestStatus} */ - public static void assertThrows(ActionRequestBuilder builder, Class exceptionClass, RestStatus status) { + public static void assertThrows(ActionRequestBuilder builder, Class exceptionClass, RestStatus status) { assertThrows(builder.execute(), exceptionClass, status); } @@ -517,7 +517,7 @@ public class ElasticsearchAssertions { * * @param extraInfo extra information to add to the failure message */ - public static void assertThrows(ActionRequestBuilder builder, Class exceptionClass, String extraInfo) { + public static void assertThrows(ActionRequestBuilder builder, Class exceptionClass, String extraInfo) { assertThrows(builder.execute(), exceptionClass, extraInfo); } @@ -582,11 +582,11 @@ public class ElasticsearchAssertions { } } - public static void assertThrows(ActionRequestBuilder builder, RestStatus status) { + public static void assertThrows(ActionRequestBuilder builder, RestStatus status) { assertThrows(builder.execute(), status); } - public static void assertThrows(ActionRequestBuilder builder, RestStatus status, String extraInfo) { + public static void assertThrows(ActionRequestBuilder builder, RestStatus status, String extraInfo) { assertThrows(builder.execute(), status, extraInfo); } diff --git a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java index d07b3b7c4d5..2a8d7cff621 100644 --- a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -22,7 +22,6 @@ package org.elasticsearch.test.store; import com.carrotsearch.randomizedtesting.SeedUtils; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.google.common.base.Charsets; - import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.*; @@ -39,14 +38,14 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.*; +import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Assert; import java.io.Closeable; @@ -65,7 +64,7 @@ public class MockFSDirectoryService extends FsDirectoryService { public static final String CRASH_INDEX = "index.store.mock.random.crash_index"; private static final EnumSet validCheckIndexStates = EnumSet.of( - IndexShardState.STARTED, IndexShardState.RELOCATED , IndexShardState.POST_RECOVERY + IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY ); private final FsDirectoryService delegateService; @@ -115,7 +114,7 @@ public class MockFSDirectoryService extends FsDirectoryService { // so that even in tests where don't flush we can check the integrity of the Lucene index if (indexShard.engine().hasUncommittedChanges()) { // only if we have any changes logger.info("{} flushing in order to run checkindex", indexShard.shardId()); - Releasables.close(indexShard.engine().snapshotIndex()); // Keep translog for tests that rely on replaying it + Releasables.close(indexShard.engine().snapshotIndex(true)); // Keep translog for tests that rely on replaying it } logger.info("{} flush finished in beforeIndexShardClosed", indexShard.shardId()); canRun = true; @@ -138,12 +137,11 @@ public class MockFSDirectoryService extends FsDirectoryService { } - @Override public Directory newDirectory() throws IOException { return wrap(delegateService.newDirectory()); } - + @Override protected synchronized Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { throw new UnsupportedOperationException(); @@ -170,8 +168,8 @@ public class MockFSDirectoryService extends FsDirectoryService { if (!status.clean) { ElasticsearchTestCase.checkIndexFailed = true; logger.warn("check index [failure] index files={}\n{}", - Arrays.toString(dir.listAll()), - new String(os.bytes().toBytes(), Charsets.UTF_8)); + Arrays.toString(dir.listAll()), + new String(os.bytes().toBytes(), Charsets.UTF_8)); throw new IndexShardException(shardId, "index check failure"); } else { if (logger.isDebugEnabled()) { diff --git a/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationTests.java b/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationTests.java index 10b7fa91ac0..ac54fb70e07 100644 --- a/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationTests.java @@ -66,8 +66,9 @@ public class NettyTransportMultiPortIntegrationTests extends ElasticsearchIntegr Settings settings = settingsBuilder() .put("cluster.name", internalCluster().getClusterName()) .put(TransportModule.TRANSPORT_TYPE_KEY, NettyTransport.class.getName()) + .put("path.home", createTempDir().toString()) .build(); - try (TransportClient transportClient = new TransportClient(settings, false)) { + try (TransportClient transportClient = TransportClient.builder().settings(settings).loadConfigSettings(false).build()) { transportClient.addTransportAddress(new InetSocketTransportAddress("127.0.0.1", randomPort)); ClusterHealthResponse response = transportClient.admin().cluster().prepareHealth().get(); assertThat(response.getStatus(), is(ClusterHealthStatus.GREEN)); diff --git a/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 2089404165c..2f1e81eead7 100644 --- a/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -95,7 +95,8 @@ public class TribeUnitTests extends ElasticsearchTestCase { //tribe node doesn't need the node.mode setting, as it's forced local internally anyways. The tribe clients do need it to make sure //they can find their corresponding tribes using the proper transport Settings settings = ImmutableSettings.builder().put("http.enabled", false).put("node.name", "tribe_node") - .put("tribe.t1.node.mode", NODE_MODE).put("tribe.t2.node.mode", NODE_MODE).put(extraSettings).build(); + .put("tribe.t1.node.mode", NODE_MODE).put("tribe.t2.node.mode", NODE_MODE) + .put("path.home", createTempDir()).put(extraSettings).build(); try (Node node = NodeBuilder.nodeBuilder().settings(settings).node()) { try (Client client = node.client()) {