diff --git a/docs/reference/aggregations/reducer.asciidoc b/docs/reference/aggregations/reducer.asciidoc index 2ce379cd583..33f82e78523 100644 --- a/docs/reference/aggregations/reducer.asciidoc +++ b/docs/reference/aggregations/reducer.asciidoc @@ -130,7 +130,7 @@ count of each bucket, instead of a specific metric: -------------------------------------------------- <1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram - +[[gap-policy]] [float] === Dealing with gaps in the data @@ -144,7 +144,7 @@ Where there is no data available in a bucket for a given metric it presents a pr the current bucket and the next bucket. In the derivative reducer aggregation has a `gap policy` parameter to define what the behavior should be when a gap in the data is found. There are currently two options for controlling the gap policy: -_ignore_:: +_skip_:: This option will not produce a derivative value for any buckets where the value in the current or previous bucket is missing @@ -154,7 +154,9 @@ _insert_zeros_:: +include::reducer/avg-bucket-aggregation.asciidoc[] include::reducer/derivative-aggregation.asciidoc[] include::reducer/max-bucket-aggregation.asciidoc[] include::reducer/min-bucket-aggregation.asciidoc[] +include::reducer/sum-bucket-aggregation.asciidoc[] include::reducer/movavg-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/reducer/avg-bucket-aggregation.asciidoc b/docs/reference/aggregations/reducer/avg-bucket-aggregation.asciidoc new file mode 100644 index 00000000000..5c2d6b22db8 --- /dev/null +++ b/docs/reference/aggregations/reducer/avg-bucket-aggregation.asciidoc @@ -0,0 +1,100 @@ +[[search-aggregations-reducer-avg-bucket-aggregation]] +=== Avg Bucket Aggregation + +A sibling reducer aggregation which calculates the (mean) average value of a specified metric in a sibling aggregation. +The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. + +==== Syntax + +An `avg_bucket` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "avg_bucket": { + "buckets_path": "the_sum" + } +} +-------------------------------------------------- + +.`avg_bucket` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`buckets_path` |The path to the buckets we wish to find the average for (see <> for more + details) |Required | + |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more + details)|Optional, defaults to `skip` || + |`format` |format to apply to the output value of this aggregation |Optional, defaults to `null` | +|=== + +The following snippet calculates the average of the total monthly `sales`: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + } + } + }, + "avg_monthly_sales": { + "avg_bucket": { + "buckets_paths": "sales_per_month>sales" <1> + } + } + } +} +-------------------------------------------------- +<1> `bucket_paths` instructs this avg_bucket aggregation that we want the (mean) average value of the `sales` aggregation in the +`sales_per_month` date histogram. + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + } + } + ] + }, + "avg_monthly_sales": { + "value": 328.33333333333333 + } + } +} +-------------------------------------------------- + diff --git a/docs/reference/aggregations/reducer/derivative-aggregation.asciidoc b/docs/reference/aggregations/reducer/derivative-aggregation.asciidoc index 17801055418..766172f350b 100644 --- a/docs/reference/aggregations/reducer/derivative-aggregation.asciidoc +++ b/docs/reference/aggregations/reducer/derivative-aggregation.asciidoc @@ -21,7 +21,11 @@ A `derivative` aggregation looks like this in isolation: .`derivative` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|`buckets_path` |The path to the buckets we wish to find the derivative for (see <> for more + details) |Required | + |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more + details)|Optional, defaults to `skip` | + |`format` |format to apply to the output value of this aggregation |Optional, defaults to `null` | |=== @@ -194,3 +198,85 @@ And the following may be the response: <1> No second derivative for the first two buckets since we need at least 2 data points from the first derivative to calculate the second derivative +==== Units + +The derivative aggregation allows the units of the derivative values to be specified. This returns an extra field in the response +`normalized_value` which reports the derivative value in the desired x-axis units. In the below example we calculate the derivative +of the total sales per month but ask for the derivative of the sales as in the units of sales per day: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + }, + "sales_deriv": { + "derivative": { + "buckets_paths": "sales", + "unit": "day" <1> + } + } + } + } + } +} +-------------------------------------------------- + +<1> `unit` specifies what unit to use for the x-axis of the derivative calculation + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } <1> + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + }, + "sales_deriv": { + "value": -490, <1> + "normalized_value": -17.5 <2> + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + }, + "sales_deriv": { + "value": 315, + "normalized_value": 10.16129032258065 + } + } + ] + } + } +} +-------------------------------------------------- +<1> `value` is reported in the original units of 'per month' +<2> `normalized_value` is reported in the desired units of 'per day' diff --git a/docs/reference/aggregations/reducer/max-bucket-aggregation.asciidoc b/docs/reference/aggregations/reducer/max-bucket-aggregation.asciidoc index 939140b4a26..703702a1a81 100644 --- a/docs/reference/aggregations/reducer/max-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/reducer/max-bucket-aggregation.asciidoc @@ -22,7 +22,10 @@ A `max_bucket` aggregation looks like this in isolation: |=== |Parameter Name |Description |Required |Default Value |`buckets_path` |The path to the buckets we wish to find the maximum for (see <> for more - details |Required | + details) |Required | + |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more + details)|Optional, defaults to `skip` | + |`format` |format to apply to the output value of this aggregation |Optional, defaults to `null` | |=== The following snippet calculates the maximum of the total monthly `sales`: diff --git a/docs/reference/aggregations/reducer/min-bucket-aggregation.asciidoc b/docs/reference/aggregations/reducer/min-bucket-aggregation.asciidoc index 1ea26c17a2e..d03e5ddb6c9 100644 --- a/docs/reference/aggregations/reducer/min-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/reducer/min-bucket-aggregation.asciidoc @@ -21,7 +21,11 @@ A `max_bucket` aggregation looks like this in isolation: .`min_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|`buckets_path` |The path to the buckets we wish to find the minimum for (see <> for more + details) |Required | + |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more + details)|Optional, defaults to `skip` | + |`format` |format to apply to the output value of this aggregation |Optional, defaults to `null` | |=== diff --git a/docs/reference/aggregations/reducer/movavg-aggregation.asciidoc b/docs/reference/aggregations/reducer/movavg-aggregation.asciidoc index 18cf98d263d..ef5f97e12ee 100644 --- a/docs/reference/aggregations/reducer/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/reducer/movavg-aggregation.asciidoc @@ -21,9 +21,9 @@ A `moving_avg` aggregation looks like this in isolation: [source,js] -------------------------------------------------- { - "movavg": { + "moving_avg": { "buckets_path": "the_sum", - "model": "double_exp", + "model": "holt", "window": 5, "gap_policy": "insert_zero", "settings": { @@ -153,9 +153,9 @@ although typically less than the `simple` model: .Linear moving average with window of size 100 image::images/reducers_movavg/linear_100window.png[] -==== Single Exponential +==== EWMA (Exponentially Weighted) -The `single_exp` model is similar to the `linear` model, except older data-points become exponentially less important, +The `ewma` model (aka "single-exponential") is similar to the `linear` model, except older data-points become exponentially less important, rather than linearly less important. The speed at which the importance decays can be controlled with an `alpha` setting. Small values make the weight decay slowly, which provides greater smoothing and takes into account a larger portion of the window. Larger valuers make the weight decay quickly, which reduces the impact of older values on the @@ -169,7 +169,7 @@ The default value of `alpha` is `0.5`, and the setting accepts any float from 0- "the_movavg":{ "moving_avg":{ "buckets_path": "the_sum", - "model" : "single_exp", + "model" : "ewma", "settings" : { "alpha" : 0.5 } @@ -187,13 +187,13 @@ image::images/reducers_movavg/single_0.2alpha.png[] .Single Exponential moving average with window of size 10, alpha = 0.7 image::images/reducers_movavg/single_0.7alpha.png[] -==== Double Exponential +==== Holt-Linear -The `double_exp` model, sometimes called "Holt's Linear Trend" model, incorporates a second exponential term which +The `holt` model (aka "double exponential") incorporates a second exponential term which tracks the data's trend. Single exponential does not perform well when the data has an underlying linear trend. The double exponential model calculates two values internally: a "level" and a "trend". -The level calculation is similar to `single_exp`, and is an exponentially weighted view of the data. The difference is +The level calculation is similar to `ewma`, and is an exponentially weighted view of the data. The difference is that the previously smoothed value is used instead of the raw value, which allows it to stay close to the original series. The trend calculation looks at the difference between the current and last value (e.g. the slope, or trend, of the smoothed data). The trend value is also exponentially weighted. @@ -208,7 +208,7 @@ The default value of `alpha` and `beta` is `0.5`, and the settings accept any fl "the_movavg":{ "moving_avg":{ "buckets_path": "the_sum", - "model" : "double_exp", + "model" : "holt", "settings" : { "alpha" : 0.5, "beta" : 0.5 @@ -217,7 +217,7 @@ The default value of `alpha` and `beta` is `0.5`, and the settings accept any fl } -------------------------------------------------- -In practice, the `alpha` value behaves very similarly in `double_exp` as `single_exp`: small values produce more smoothing +In practice, the `alpha` value behaves very similarly in `holt` as `ewma`: small values produce more smoothing and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult to see. Small values emphasize long-term trends (such as a constant linear trend in the whole series), while larger values emphasize short-term trends. This will become more apparently when you are predicting values. @@ -251,14 +251,14 @@ as your buckets: } -------------------------------------------------- -The `simple`, `linear` and `single_exp` models all produce "flat" predictions: they essentially converge on the mean +The `simple`, `linear` and `ewma` models all produce "flat" predictions: they essentially converge on the mean of the last value in the series, producing a flat: [[simple_prediction]] .Simple moving average with window of size 10, predict = 50 image::images/reducers_movavg/simple_prediction.png[] -In contrast, the `double_exp` model can extrapolate based on local or global constant trends. If we set a high `beta` +In contrast, the `holt` model can extrapolate based on local or global constant trends. If we set a high `beta` value, we can extrapolate based on local constant trends (in this case the predictions head down, because the data at the end of the series was heading in a downward direction): diff --git a/docs/reference/aggregations/reducer/sum-bucket-aggregation.asciidoc b/docs/reference/aggregations/reducer/sum-bucket-aggregation.asciidoc new file mode 100644 index 00000000000..0a6a98cc977 --- /dev/null +++ b/docs/reference/aggregations/reducer/sum-bucket-aggregation.asciidoc @@ -0,0 +1,100 @@ +[[search-aggregations-reducer-sum-bucket-aggregation]] +=== Sum Bucket Aggregation + +A sibling reducer aggregation which calculates the sum across all bucket of a specified metric in a sibling aggregation. +The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. + +==== Syntax + +A `sum_bucket` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "sum_bucket": { + "buckets_path": "the_sum" + } +} +-------------------------------------------------- + +.`sum_bucket` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`buckets_path` |The path to the buckets we wish to find the sum for (see <> for more + details) |Required | + |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more + details)|Optional, defaults to `skip` || + |`format` |format to apply to the output value of this aggregation |Optional, defaults to `null` | +|=== + +The following snippet calculates the sum of all the total monthly `sales` buckets: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + } + } + }, + "sum_monthly_sales": { + "sum_bucket": { + "buckets_paths": "sales_per_month>sales" <1> + } + } + } +} +-------------------------------------------------- +<1> `bucket_paths` instructs this sum_bucket aggregation that we want the sum of the `sales` aggregation in the +`sales_per_month` date histogram. + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + } + } + ] + }, + "sum_monthly_sales": { + "value": 985 + } + } +} +-------------------------------------------------- + diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 87b7e2c4f65..a3c04bcf0a8 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -1,34 +1,94 @@ [[index-modules-translog]] == Translog -Each shard has a transaction log or write ahead log associated with it. -It allows to guarantee that when an index/delete operation occurs, it is -applied atomically, while not "committing" the internal Lucene index for -each request. A flush ("commit") still happens based on several -parameters: +Changes to Lucene are only persisted to disk during a Lucene commit, +which is a relatively heavy operation and so cannot be performed after every +index or delete operation. Changes that happen after one commit and before another +will be lost in the event of process exit or HW failure. + +To prevent this data loss, each shard has a _transaction log_ or write ahead +log associated with it. Any index or delete operation is written to the +translog after being processed by the internal Lucene index. + +In the event of a crash, recent transactions can be replayed from the +transaction log when the shard recovers. + +An Elasticsearch flush is the process of performing a Lucene commit and +starting a new translog. It is done automatically in the background in order +to make sure the transaction log doesn't grow too large, which would make +replaying its operations take a considerable amount of time during recovery. +It is also exposed through an API, though its rarely needed to be performed +manually. + + +[float] +=== Flush settings + +The following <> settings +control how often the in-memory buffer is flushed to disk: + +`index.translog.flush_threshold_size`:: + +Once the translog hits this size, a flush will happen. Defaults to `512mb`. `index.translog.flush_threshold_ops`:: After how many operations to flush. Defaults to `unlimited`. -`index.translog.flush_threshold_size`:: +`index.translog.flush_threshold_period`:: -Once the translog hits this size, a flush will happen. Defaults to `512mb`. +How long to wait before triggering a flush regardless of translog size. Defaults to `30m`. -`index.translog.flush_threshold_period`:: +`index.translog.interval`:: -The period with no flush happening to force a flush. Defaults to `30m`. +How often to check if a flush is needed, randomized between the interval value +and 2x the interval value. Defaults to `5s`. -`index.translog.interval`:: +[float] +=== Translog settings -How often to check if a flush is needed, randomized -between the interval value and 2x the interval value. Defaults to `5s`. +The translog itself is only persisted to disk when it is ++fsync++ed. Until +then, data recently written to the translog may only exist in the file system +cache and could potentially be lost in the event of hardware failure. + +The following <> settings +control the behaviour of the transaction log: `index.translog.sync_interval`:: -How often the translog is ++fsync++ed to disk. Defaults to `5s`. +How often the translog is ++fsync++ed to disk. Defaults to `5s`. Can be set to +`0` to sync after each operation. +`index.translog.fs.type`:: -Note: these parameters can be updated at runtime using the Index -Settings Update API (for example, these number can be increased when -executing bulk updates to support higher TPS) +Either a `buffered` translog (default) which buffers 64kB in memory before +writing to disk, or a `simple` translog which writes every entry to disk +immediately. Whichever is used, these writes are only ++fsync++ed according +to the `sync_interval`. + +The `buffered` translog is written to disk when it reaches 64kB in size, or +whenever a `sync` is triggered by the `sync_interval`. + +.Why don't we `fsync` the translog after every write? +****************************************************** + +The disk is the slowest part of any server. An `fsync` ensures that data in +the file system buffer has been physically written to disk, but this +persistence comes with a performance cost. + +However, the translog is not the only persistence mechanism in Elasticsearch. +Any index or update request is first written to the primary shard, then +forwarded in parallel to any replica shards. The primary waits for the action +to be completed on the replicas before returning success to the client. + +If the node holding the primary shard dies for some reason, its transaction +log could be missing the last 5 seconds of data. However, that data should +already be available on a replica shard on a different node. Of course, if +the whole data centre loses power at the same time, then it is possible that +you could lose the last 5 seconds (or `sync_interval`) of data. + +We are constantly monitoring the perfromance implications of better default +translog sync semantics, so the default might change as time passes and HW, +virtualization, and other aspects improve. + +****************************************************** \ No newline at end of file diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index 8f57613dbe1..1f51793b97b 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -8,15 +8,6 @@ actual JSON that was used as the indexed document. It is not indexed <>, the `_source` field is returned by default. -Though very handy to have around, the source field does incur storage -overhead within the index. For this reason, it can be disabled. For -example: - -[source,js] --------------------------------------------------- -{ - "tweet" : { - "_source" : {"enabled" : false} - } -} --------------------------------------------------- +Many APIs may use the `_source` field. For example, the +<>. To minimize the storage cost of +`_source`, set `index.codec: best_compression` in index settings. diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index b097543f656..ebfad2ada58 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -19,7 +19,8 @@ See <> for more info. include::migrate_2_0.asciidoc[] +include::migrate_1_6.asciidoc[] + include::migrate_1_4.asciidoc[] include::migrate_1_0.asciidoc[] - diff --git a/docs/reference/migration/migrate_1_6.asciidoc b/docs/reference/migration/migrate_1_6.asciidoc new file mode 100644 index 00000000000..9540d3b6759 --- /dev/null +++ b/docs/reference/migration/migrate_1_6.asciidoc @@ -0,0 +1,17 @@ +[[breaking-changes-1.6]] +== Breaking changes in 1.6 + +This section discusses the changes that you need to be aware of when migrating +your application from Elasticsearch 1.x to Elasticsearch 1.6. + +[float] +=== More Like This API + +The More Like This API query has been deprecated and will be removed in 2.0. Instead use the <>. + +[float] +=== `top_children` query + +The `top_children` query has been deprecated and will be removed in 2.0. Instead the `has_child` query should be used. +The `top_children` query isn't always faster than the `has_child` query and the `top_children` query is often inaccurate. +The total hits and any aggregations in the same search request will likely be off. diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index b9e84b85839..d4e16c56582 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -24,10 +24,10 @@ The following deprecated methods have been removed: Partial fields were deprecated since 1.0.0beta1 in favor of <>. -=== More Like This Field +=== More Like This -The More Like This Field query has been removed in favor of the <> -restrained set to a specific `field`. +The More Like This API and the More Like This Field query have been removed in +favor of the <>. === Routing @@ -272,6 +272,15 @@ to provide special features. They now have limited configuration options. * `_field_names` configuration is limited to disabling the field. * `_size` configuration is limited to enabling the field. +==== Source field limitations +The `_source` field could previously be disabled dynamically. Since this field +is a critical piece of many features like the Update API, it is no longer +possible to disable. + +The options for `compress` and `compress_threshold` have also been removed. +The source field is already compressed. To minimize the storage cost, +set `index.codec: best_compression` in index settings. + ==== Boolean fields Boolean fields used to have a string fielddata with `F` meaning `false` and `T` diff --git a/docs/reference/query-dsl/queries/top-children-query.asciidoc b/docs/reference/query-dsl/queries/top-children-query.asciidoc index 4616d87676b..a4d74f62184 100644 --- a/docs/reference/query-dsl/queries/top-children-query.asciidoc +++ b/docs/reference/query-dsl/queries/top-children-query.asciidoc @@ -1,6 +1,8 @@ [[query-dsl-top-children-query]] === Top Children Query +deprecated[1.6.0, Use the `has_child` query instead] + The `top_children` query runs the child query with an estimated hits size, and out of the hit docs, aggregates it into parent docs. If there aren't enough parent docs matching the requested from/size search diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index b71a0dfe466..002a3ad1282 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -101,7 +101,4 @@ include::search/explain.asciidoc[] include::search/percolate.asciidoc[] -include::search/more-like-this.asciidoc[] - include::search/field-stats.asciidoc[] - diff --git a/docs/reference/search/more-like-this.asciidoc b/docs/reference/search/more-like-this.asciidoc deleted file mode 100644 index ffdad3a4d3d..00000000000 --- a/docs/reference/search/more-like-this.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -[[search-more-like-this]] -== More Like This API - -The more like this (mlt) API allows to get documents that are "like" a -specified document. Here is an example: - -[source,js] --------------------------------------------------- -$ curl -XGET 'http://localhost:9200/twitter/tweet/1/_mlt?mlt_fields=tag,content&min_doc_freq=1' --------------------------------------------------- - -The API simply results in executing a search request with -<> query (http -parameters match the parameters to the `more_like_this` query). This -means that the body of the request can optionally include all the -request body options in the <> (aggs, from/to and so on). Internally, the more like this -API is equivalent to performing a boolean query of `more_like_this_field` -queries, with one query per specified `mlt_fields`. - -Rest parameters relating to search are also allowed, including -`search_type`, `search_indices`, `search_types`, `search_scroll`, -`search_size` and `search_from`. - -When no `mlt_fields` are specified, all the fields of the document will -be used in the `more_like_this` query generated. - -By default, the queried document is excluded from the response (`include` -set to false). - -Note: In order to use the `mlt` feature a `mlt_field` needs to be either -be `stored`, store `term_vector` or `source` needs to be enabled. diff --git a/docs/reference/setup/repositories.asciidoc b/docs/reference/setup/repositories.asciidoc index 3bf693d33ea..964913be94a 100644 --- a/docs/reference/setup/repositories.asciidoc +++ b/docs/reference/setup/repositories.asciidoc @@ -91,6 +91,9 @@ yum install elasticsearch Configure Elasticsearch to automatically start during bootup. If your distribution is using SysV init, then you will need to run: +WARNING: The repositories do not work with older rpm based distributions + that still use RPM v3, like CentOS5. + [source,sh] -------------------------------------------------- chkconfig --add elasticsearch diff --git a/pom.xml b/pom.xml index 718e98e91ea..2b6ff1ccab0 100644 --- a/pom.xml +++ b/pom.xml @@ -66,11 +66,6 @@ - - codehaus-snapshots - Codehaus Snapshots - http://repository.codehaus.org/ - lucene-snapshots Lucene Snapshots diff --git a/rest-api-spec/api/mlt.json b/rest-api-spec/api/mlt.json deleted file mode 100644 index 0dc58782dff..00000000000 --- a/rest-api-spec/api/mlt.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "mlt": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-more-like-this.html", - "methods": ["GET", "POST"], - "url": { - "path": "/{index}/{type}/{id}/_mlt", - "paths": ["/{index}/{type}/{id}/_mlt"], - "parts": { - "id": { - "type" : "string", - "required" : true, - "description" : "The document ID" - }, - "index": { - "type" : "string", - "required" : true, - "description" : "The name of the index" - }, - "type": { - "type" : "string", - "required" : true, - "description" : "The type of the document (use `_all` to fetch the first document matching the ID across all types)" - } - }, - "params": { - "boost_terms": { - "type" : "number", - "description" : "The boost factor" - }, - "max_doc_freq": { - "type" : "number", - "description" : "The word occurrence frequency as count: words with higher occurrence in the corpus will be ignored" - }, - "max_query_terms": { - "type" : "number", - "description" : "The maximum query terms to be included in the generated query" - }, - "max_word_length": { - "type" : "number", - "description" : "The minimum length of the word: longer words will be ignored" - }, - "min_doc_freq": { - "type" : "number", - "description" : "The word occurrence frequency as count: words with lower occurrence in the corpus will be ignored" - }, - "min_term_freq": { - "type" : "number", - "description" : "The term frequency as percent: terms with lower occurrence in the source document will be ignored" - }, - "min_word_length": { - "type" : "number", - "description" : "The minimum length of the word: shorter words will be ignored" - }, - "mlt_fields": { - "type" : "list", - "description" : "Specific fields to perform the query against" - }, - "percent_terms_to_match": { - "type" : "number", - "description" : "How many terms have to match in order to consider the document a match (default: 0.3)" - }, - "routing": { - "type" : "string", - "description" : "Specific routing value" - }, - "search_from": { - "type" : "number", - "description" : "The offset from which to return results" - }, - "search_indices": { - "type" : "list", - "description" : "A comma-separated list of indices to perform the query against (default: the index containing the document)" - }, - "search_scroll": { - "type" : "string", - "description" : "A scroll search request definition" - }, - "search_size": { - "type" : "number", - "description" : "The number of documents to return (default: 10)" - }, - "search_source": { - "type" : "string", - "description" : "A specific search request definition (instead of using the request body)" - }, - "search_type": { - "type" : "string", - "description" : "Specific search type (eg. `dfs_then_fetch`, `scan`, etc)" - }, - "search_types": { - "type" : "list", - "description" : "A comma-separated list of types to perform the query against (default: the same type as the document)" - }, - "stop_words": { - "type" : "list", - "description" : "A list of stop words to be ignored" - } - } - }, - "body": { - "description" : "A specific search request definition" - } - } -} diff --git a/rest-api-spec/test/mlt/10_basic.yaml b/rest-api-spec/test/mlt/10_basic.yaml index f7f41b72b06..e67a629046a 100644 --- a/rest-api-spec/test/mlt/10_basic.yaml +++ b/rest-api-spec/test/mlt/10_basic.yaml @@ -30,11 +30,16 @@ wait_for_status: green - do: - mlt: - index: test_1 - type: test - id: 1 - mlt_fields: title - + search: + index: test_1 + type: test + body: + query: + more_like_this: + like: + - + _id: 1 + fields: ["title"] + - match: {hits.total: 0} diff --git a/src/main/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java b/src/main/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java deleted file mode 100644 index 4bf403bc24e..00000000000 --- a/src/main/java/org/apache/lucene/spatial/prefix/RecursivePrefixTreeStrategy.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.spatial.prefix; - -import com.spatial4j.core.shape.Point; -import com.spatial4j.core.shape.Shape; -import org.apache.lucene.search.Filter; -import org.apache.lucene.spatial.prefix.tree.Cell; -import org.apache.lucene.spatial.prefix.tree.CellIterator; -import org.apache.lucene.spatial.prefix.tree.LegacyCell; -import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; -import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import org.apache.lucene.spatial.query.SpatialArgs; -import org.apache.lucene.spatial.query.SpatialOperation; -import org.apache.lucene.spatial.query.UnsupportedSpatialOperation; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -/** - * A {@link PrefixTreeStrategy} which uses {@link AbstractVisitingPrefixTreeFilter}. - * This strategy has support for searching non-point shapes (note: not tested). - * Even a query shape with distErrPct=0 (fully precise to the grid) should have - * good performance for typical data, unless there is a lot of indexed data - * coincident with the shape's edge. - * - * @lucene.experimental - * - * NOTE: Will be removed upon commit of LUCENE-6422 - */ -public class RecursivePrefixTreeStrategy extends PrefixTreeStrategy { - /* Future potential optimizations: - - Each shape.relate(otherShape) result could be cached since much of the same relations will be invoked when - multiple segments are involved. Do this for "complex" shapes, not cheap ones, and don't cache when disjoint to - bbox because it's a cheap calc. This is one advantage TermQueryPrefixTreeStrategy has over RPT. - - */ - - protected int prefixGridScanLevel; - - //Formerly known as simplifyIndexedCells. Eventually will be removed. Only compatible with RPT - // and a LegacyPrefixTree. - protected boolean pruneLeafyBranches = true; - - protected boolean multiOverlappingIndexedShapes = true; - - public RecursivePrefixTreeStrategy(SpatialPrefixTree grid, String fieldName) { - super(grid, fieldName); - prefixGridScanLevel = grid.getMaxLevels() - 4;//TODO this default constant is dependent on the prefix grid size - } - - public int getPrefixGridScanLevel() { - return prefixGridScanLevel; - } - - /** - * Sets the grid level [1-maxLevels] at which indexed terms are scanned brute-force - * instead of by grid decomposition. By default this is maxLevels - 4. The - * final level, maxLevels, is always scanned. - * - * @param prefixGridScanLevel 1 to maxLevels - */ - public void setPrefixGridScanLevel(int prefixGridScanLevel) { - //TODO if negative then subtract from maxlevels - this.prefixGridScanLevel = prefixGridScanLevel; - } - - public boolean isMultiOverlappingIndexedShapes() { - return multiOverlappingIndexedShapes; - } - - /** See {@link ContainsPrefixTreeFilter#multiOverlappingIndexedShapes}. */ - public void setMultiOverlappingIndexedShapes(boolean multiOverlappingIndexedShapes) { - this.multiOverlappingIndexedShapes = multiOverlappingIndexedShapes; - } - - public boolean isPruneLeafyBranches() { - return pruneLeafyBranches; - } - - /** An optional hint affecting non-point shapes: it will - * simplify/aggregate sets of complete leaves in a cell to its parent, resulting in ~20-25% - * fewer indexed cells. However, it will likely be removed in the future. (default=true) - */ - public void setPruneLeafyBranches(boolean pruneLeafyBranches) { - this.pruneLeafyBranches = pruneLeafyBranches; - } - - @Override - public String toString() { - StringBuilder str = new StringBuilder(getClass().getSimpleName()).append('('); - str.append("SPG:(").append(grid.toString()).append(')'); - if (pointsOnly) - str.append(",pointsOnly"); - if (pruneLeafyBranches) - str.append(",pruneLeafyBranches"); - if (prefixGridScanLevel != grid.getMaxLevels() - 4) - str.append(",prefixGridScanLevel:").append(""+prefixGridScanLevel); - if (!multiOverlappingIndexedShapes) - str.append(",!multiOverlappingIndexedShapes"); - return str.append(')').toString(); - } - - @Override - protected Iterator createCellIteratorToIndex(Shape shape, int detailLevel, Iterator reuse) { - if (shape instanceof Point || !pruneLeafyBranches || grid instanceof PackedQuadPrefixTree) - return super.createCellIteratorToIndex(shape, detailLevel, reuse); - - List cells = new ArrayList<>(4096); - recursiveTraverseAndPrune(grid.getWorldCell(), shape, detailLevel, cells); - return cells.iterator(); - } - - /** Returns true if cell was added as a leaf. If it wasn't it recursively descends. */ - private boolean recursiveTraverseAndPrune(Cell cell, Shape shape, int detailLevel, List result) { - // Important: this logic assumes Cells don't share anything with other cells when - // calling cell.getNextLevelCells(). This is only true for LegacyCell. - if (!(cell instanceof LegacyCell)) - throw new IllegalStateException("pruneLeafyBranches must be disabled for use with grid "+grid); - - if (cell.getLevel() == detailLevel) { - cell.setLeaf();//FYI might already be a leaf - } - if (cell.isLeaf()) { - result.add(cell); - return true; - } - if (cell.getLevel() != 0) - result.add(cell); - - int leaves = 0; - CellIterator subCells = cell.getNextLevelCells(shape); - while (subCells.hasNext()) { - Cell subCell = subCells.next(); - if (recursiveTraverseAndPrune(subCell, shape, detailLevel, result)) - leaves++; - } - //can we prune? - if (leaves == ((LegacyCell)cell).getSubCellsSize() && cell.getLevel() != 0) { - //Optimization: substitute the parent as a leaf instead of adding all - // children as leaves - - //remove the leaves - do { - result.remove(result.size() - 1);//remove last - } while (--leaves > 0); - //add cell as the leaf - cell.setLeaf(); - return true; - } - return false; - } - - @Override - public Filter makeFilter(SpatialArgs args) { - final SpatialOperation op = args.getOperation(); - - Shape shape = args.getShape(); - int detailLevel = grid.getLevelForDistance(args.resolveDistErr(ctx, distErrPct)); - - if (op == SpatialOperation.Intersects) { - return new IntersectsPrefixTreeFilter( - shape, getFieldName(), grid, detailLevel, prefixGridScanLevel); - } else if (op == SpatialOperation.IsWithin) { - return new WithinPrefixTreeFilter( - shape, getFieldName(), grid, detailLevel, prefixGridScanLevel, - -1);//-1 flag is slower but ensures correct results - } else if (op == SpatialOperation.Contains) { - return new ContainsPrefixTreeFilter(shape, getFieldName(), grid, detailLevel, - multiOverlappingIndexedShapes); - } - throw new UnsupportedSpatialOperation(op); - } -} - - - - diff --git a/src/main/java/org/apache/lucene/spatial/prefix/tree/CellIterator.java b/src/main/java/org/apache/lucene/spatial/prefix/tree/CellIterator.java deleted file mode 100644 index fa7bf247786..00000000000 --- a/src/main/java/org/apache/lucene/spatial/prefix/tree/CellIterator.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.spatial.prefix.tree; - -import java.util.Iterator; -import java.util.NoSuchElementException; - -/** - * An Iterator of SpatialPrefixTree Cells. The order is always sorted without duplicates. - * - * @lucene.experimental - * - * NOTE: Will be removed upon commit of LUCENE-6422 - */ -public abstract class CellIterator implements Iterator { - - //note: nextCell or thisCell can be non-null but neither at the same time. That's - // because they might return the same instance when re-used! - - protected Cell nextCell;//to be returned by next(), and null'ed after - protected Cell thisCell;//see next() & thisCell(). Should be cleared in hasNext(). - - /** Returns the cell last returned from {@link #next()}. It's cleared by hasNext(). */ - public Cell thisCell() { - assert thisCell != null : "Only call thisCell() after next(), not hasNext()"; - return thisCell; - } - - // Arguably this belongs here and not on Cell - //public SpatialRelation getShapeRel() - - /** - * Gets the next cell that is >= {@code fromCell}, compared using non-leaf bytes. If it returns null then - * the iterator is exhausted. - */ - public Cell nextFrom(Cell fromCell) { - while (true) { - if (!hasNext()) - return null; - Cell c = next();//will update thisCell - if (c.compareToNoLeaf(fromCell) >= 0) { - return c; - } - } - } - - /** This prevents sub-cells (those underneath the current cell) from being iterated to, - * if applicable, otherwise a NO-OP. */ - @Override - public void remove() { - assert thisCell != null; - } - - @Override - public Cell next() { - if (nextCell == null) { - if (!hasNext()) - throw new NoSuchElementException(); - } - thisCell = nextCell; - nextCell = null; - return thisCell; - } -} diff --git a/src/main/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java b/src/main/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java deleted file mode 100644 index 7900fd62bc4..00000000000 --- a/src/main/java/org/apache/lucene/spatial/prefix/tree/LegacyCell.java +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.spatial.prefix.tree; - -import com.spatial4j.core.shape.Point; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.SpatialRelation; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.StringHelper; - -import java.util.Collection; - -/** The base for the original two SPT's: Geohash and Quad. Don't subclass this for new SPTs. - * @lucene.internal - * - * NOTE: Will be removed upon commit of LUCENE-6422 - */ -//public for RPT pruneLeafyBranches code -public abstract class LegacyCell implements Cell { - - // Important: A LegacyCell doesn't share state for getNextLevelCells(), and - // LegacySpatialPrefixTree assumes this in its simplify tree logic. - - private static final byte LEAF_BYTE = '+';//NOTE: must sort before letters & numbers - - //Arguably we could simply use a BytesRef, using an extra Object. - protected byte[] bytes;//generally bigger to potentially hold a leaf - protected int b_off; - protected int b_len;//doesn't reflect leaf; same as getLevel() - - protected boolean isLeaf; - - /** - * When set via getSubCells(filter), it is the relationship between this cell - * and the given shape filter. Doesn't participate in shape equality. - */ - protected SpatialRelation shapeRel; - - protected Shape shape;//cached - - /** Warning: Refers to the same bytes (no copy). If {@link #setLeaf()} is subsequently called then it - * may modify bytes. */ - protected LegacyCell(byte[] bytes, int off, int len) { - this.bytes = bytes; - this.b_off = off; - this.b_len = len; - readLeafAdjust(); - } - - protected void readCell(BytesRef bytes) { - shapeRel = null; - shape = null; - this.bytes = bytes.bytes; - this.b_off = bytes.offset; - this.b_len = (short) bytes.length; - readLeafAdjust(); - } - - protected void readLeafAdjust() { - isLeaf = (b_len > 0 && bytes[b_off + b_len - 1] == LEAF_BYTE); - if (isLeaf) - b_len--; - if (getLevel() == getMaxLevels()) - isLeaf = true; - } - - protected abstract SpatialPrefixTree getGrid(); - - protected abstract int getMaxLevels(); - - @Override - public SpatialRelation getShapeRel() { - return shapeRel; - } - - @Override - public void setShapeRel(SpatialRelation rel) { - this.shapeRel = rel; - } - - @Override - public boolean isLeaf() { - return isLeaf; - } - - @Override - public void setLeaf() { - isLeaf = true; - } - - @Override - public BytesRef getTokenBytesWithLeaf(BytesRef result) { - result = getTokenBytesNoLeaf(result); - if (!isLeaf || getLevel() == getMaxLevels()) - return result; - if (result.bytes.length < result.offset + result.length + 1) { - assert false : "Not supposed to happen; performance bug"; - byte[] copy = new byte[result.length + 1]; - System.arraycopy(result.bytes, result.offset, copy, 0, result.length - 1); - result.bytes = copy; - result.offset = 0; - } - result.bytes[result.offset + result.length++] = LEAF_BYTE; - return result; - } - - @Override - public BytesRef getTokenBytesNoLeaf(BytesRef result) { - if (result == null) - return new BytesRef(bytes, b_off, b_len); - result.bytes = bytes; - result.offset = b_off; - result.length = b_len; - return result; - } - - @Override - public int getLevel() { - return b_len; - } - - @Override - public CellIterator getNextLevelCells(Shape shapeFilter) { - assert getLevel() < getGrid().getMaxLevels(); - if (shapeFilter instanceof Point) { - LegacyCell cell = getSubCell((Point) shapeFilter); - cell.shapeRel = SpatialRelation.CONTAINS; - return new SingletonCellIterator(cell); - } else { - return new FilterCellIterator(getSubCells().iterator(), shapeFilter); - } - } - - /** - * Performant implementations are expected to implement this efficiently by - * considering the current cell's boundary. - *

- * Precondition: Never called when getLevel() == maxLevel. - * Precondition: this.getShape().relate(p) != DISJOINT. - */ - protected abstract LegacyCell getSubCell(Point p); - - /** - * Gets the cells at the next grid cell level that covers this cell. - * Precondition: Never called when getLevel() == maxLevel. - * - * @return A set of cells (no dups), sorted, modifiable, not empty, not null. - */ - protected abstract Collection getSubCells(); - - /** - * {@link #getSubCells()}.size() -- usually a constant. Should be >=2 - */ - public abstract int getSubCellsSize(); - - @Override - public boolean isPrefixOf(Cell c) { - //Note: this only works when each level uses a whole number of bytes. - LegacyCell cell = (LegacyCell)c; - boolean result = sliceEquals(cell.bytes, cell.b_off, cell.b_len, bytes, b_off, b_len); - assert result == StringHelper.startsWith(c.getTokenBytesNoLeaf(null), getTokenBytesNoLeaf(null)); - return result; - } - - /** Copied from {@link org.apache.lucene.util.StringHelper#startsWith(org.apache.lucene.util.BytesRef, org.apache.lucene.util.BytesRef)} - * which calls this. This is to avoid creating a BytesRef. */ - private static boolean sliceEquals(byte[] sliceToTest_bytes, int sliceToTest_offset, int sliceToTest_length, - byte[] other_bytes, int other_offset, int other_length) { - if (sliceToTest_length < other_length) { - return false; - } - int i = sliceToTest_offset; - int j = other_offset; - final int k = other_offset + other_length; - - while (j < k) { - if (sliceToTest_bytes[i++] != other_bytes[j++]) { - return false; - } - } - - return true; - } - - @Override - public int compareToNoLeaf(Cell fromCell) { - LegacyCell b = (LegacyCell) fromCell; - return compare(bytes, b_off, b_len, b.bytes, b.b_off, b.b_len); - } - - /** Copied from {@link org.apache.lucene.util.BytesRef#compareTo(org.apache.lucene.util.BytesRef)}. - * This is to avoid creating a BytesRef. */ - protected static int compare(byte[] aBytes, int aUpto, int a_length, byte[] bBytes, int bUpto, int b_length) { - final int aStop = aUpto + Math.min(a_length, b_length); - while(aUpto < aStop) { - int aByte = aBytes[aUpto++] & 0xff; - int bByte = bBytes[bUpto++] & 0xff; - - int diff = aByte - bByte; - if (diff != 0) { - return diff; - } - } - - // One is a prefix of the other, or, they are equal: - return a_length - b_length; - } - - @Override - public boolean equals(Object obj) { - //this method isn't "normally" called; just in asserts/tests - if (obj instanceof Cell) { - Cell cell = (Cell) obj; - return getTokenBytesWithLeaf(null).equals(cell.getTokenBytesWithLeaf(null)); - } else { - return false; - } - } - - @Override - public int hashCode() { - return getTokenBytesWithLeaf(null).hashCode(); - } - - @Override - public String toString() { - //this method isn't "normally" called; just in asserts/tests - return getTokenBytesWithLeaf(null).utf8ToString(); - } - -} diff --git a/src/main/java/org/apache/lucene/spatial/prefix/tree/PackedQuadPrefixTree.java b/src/main/java/org/apache/lucene/spatial/prefix/tree/PackedQuadPrefixTree.java deleted file mode 100644 index 65808c041e3..00000000000 --- a/src/main/java/org/apache/lucene/spatial/prefix/tree/PackedQuadPrefixTree.java +++ /dev/null @@ -1,435 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.spatial.prefix.tree; - -import com.spatial4j.core.context.SpatialContext; -import com.spatial4j.core.shape.Point; -import com.spatial4j.core.shape.Rectangle; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.SpatialRelation; -import com.spatial4j.core.shape.impl.RectangleImpl; -import org.apache.lucene.util.BytesRef; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.NoSuchElementException; - -/** - * Subclassing QuadPrefixTree this {@link SpatialPrefixTree} uses the compact QuadCell encoding described in - * {@link PackedQuadCell} - * - * @lucene.experimental - * - * NOTE: Will be removed upon commit of LUCENE-6422 - */ -public class PackedQuadPrefixTree extends QuadPrefixTree { - public static final byte[] QUAD = new byte[] {0x00, 0x01, 0x02, 0x03}; - public static final int MAX_LEVELS_POSSIBLE = 29; - - private boolean leafyPrune = true; - - public static class Factory extends QuadPrefixTree.Factory { - @Override - protected SpatialPrefixTree newSPT() { - if (maxLevels > MAX_LEVELS_POSSIBLE) { - throw new IllegalArgumentException("maxLevels " + maxLevels + " exceeds maximum value " + MAX_LEVELS_POSSIBLE); - } - return new PackedQuadPrefixTree(ctx, maxLevels); - } - } - - public PackedQuadPrefixTree(SpatialContext ctx, int maxLevels) { - super(ctx, maxLevels); - } - - @Override - public Cell getWorldCell() { - return new PackedQuadCell(0x0L); - } - @Override - public Cell getCell(Point p, int level) { - List cells = new ArrayList<>(1); - build(xmid, ymid, 0, cells, 0x0L, ctx.makePoint(p.getX(),p.getY()), level); - return cells.get(0);//note cells could be longer if p on edge - } - - protected void build(double x, double y, int level, List matches, long term, Shape shape, int maxLevel) { - double w = levelW[level] / 2; - double h = levelH[level] / 2; - - // Z-Order - // http://en.wikipedia.org/wiki/Z-order_%28curve%29 - checkBattenberg(QUAD[0], x - w, y + h, level, matches, term, shape, maxLevel); - checkBattenberg(QUAD[1], x + w, y + h, level, matches, term, shape, maxLevel); - checkBattenberg(QUAD[2], x - w, y - h, level, matches, term, shape, maxLevel); - checkBattenberg(QUAD[3], x + w, y - h, level, matches, term, shape, maxLevel); - } - - protected void checkBattenberg(byte quad, double cx, double cy, int level, List matches, - long term, Shape shape, int maxLevel) { - // short-circuit if we find a match for the point (no need to continue recursion) - if (shape instanceof Point && !matches.isEmpty()) - return; - double w = levelW[level] / 2; - double h = levelH[level] / 2; - - SpatialRelation v = shape.relate(ctx.makeRectangle(cx - w, cx + w, cy - h, cy + h)); - - if (SpatialRelation.DISJOINT == v) { - return; - } - - // set bits for next level - term |= (((long)(quad))<<(64-(++level<<1))); - // increment level - term = ((term>>>1)+1)<<1; - - if (SpatialRelation.CONTAINS == v || (level >= maxLevel)) { - matches.add(new PackedQuadCell(term, v.transpose())); - } else {// SpatialRelation.WITHIN, SpatialRelation.INTERSECTS - build(cx, cy, level, matches, term, shape, maxLevel); - } - } - - @Override - public Cell readCell(BytesRef term, Cell scratch) { - PackedQuadCell cell = (PackedQuadCell) scratch; - if (cell == null) - cell = (PackedQuadCell) getWorldCell(); - cell.readCell(term); - return cell; - } - - @Override - public CellIterator getTreeCellIterator(Shape shape, int detailLevel) { - return new PrefixTreeIterator(shape); - } - - public void setPruneLeafyBranches( boolean pruneLeafyBranches ) { - this.leafyPrune = pruneLeafyBranches; - } - - /** - * PackedQuadCell Binary Representation is as follows - * CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCDDDDDL - * - * Where C = Cell bits (2 per quad) - * D = Depth bits (5 with max of 29 levels) - * L = isLeaf bit - */ - public class PackedQuadCell extends QuadCell { - private long term; - - PackedQuadCell(long term) { - super(null, 0, 0); - this.term = term; - this.b_off = 0; - this.bytes = longToByteArray(this.term); - this.b_len = 8; - readLeafAdjust(); - } - - PackedQuadCell(long term, SpatialRelation shapeRel) { - this(term); - this.shapeRel = shapeRel; - } - - @Override - protected void readCell(BytesRef bytes) { - shapeRel = null; - shape = null; - this.bytes = bytes.bytes; - this.b_off = bytes.offset; - this.b_len = (short) bytes.length; - this.term = longFromByteArray(this.bytes, bytes.offset); - readLeafAdjust(); - } - - private final int getShiftForLevel(final int level) { - return 64 - (level<<1); - } - - public boolean isEnd(final int level, final int shift) { - return (term != 0x0L && ((((0x1L<<(level<<1))-1)-(term>>>shift)) == 0x0L)); - } - - /** - * Get the next cell in the tree without using recursion. descend parameter requests traversal to the child nodes, - * setting this to false will step to the next sibling. - * Note: This complies with lexicographical ordering, once you've moved to the next sibling there is no backtracking. - */ - public PackedQuadCell nextCell(boolean descend) { - final int level = getLevel(); - final int shift = getShiftForLevel(level); - // base case: can't go further - if ( (!descend && isEnd(level, shift)) || isEnd(maxLevels, getShiftForLevel(maxLevels))) { - return null; - } - long newTerm; - final boolean isLeaf = (term&0x1L)==0x1L; - // if descend requested && we're not at the maxLevel - if ((descend && !isLeaf && (level != maxLevels)) || level == 0) { - // simple case: increment level bits (next level) - newTerm = ((term>>>1)+0x1L)<<1; - } else { // we're not descending or we can't descend - newTerm = term + (0x1L<>>shift)&0x3L) == 0x3L) { - // adjust level for number popping up - newTerm = ((newTerm>>>1) - (Long.numberOfTrailingZeros(newTerm>>>shift)>>>1))<<1; - } - } - return new PackedQuadCell(newTerm); - } - - @Override - protected void readLeafAdjust() { - isLeaf = ((0x1L)&term) == 0x1L; - if (getLevel() == getMaxLevels()) { - isLeaf = true; - } - } - - @Override - public BytesRef getTokenBytesWithLeaf(BytesRef result) { - if (isLeaf) { - term |= 0x1L; - } - return getTokenBytesNoLeaf(result); - } - - @Override - public BytesRef getTokenBytesNoLeaf(BytesRef result) { - if (result == null) - return new BytesRef(bytes, b_off, b_len); - result.bytes = longToByteArray(this.term); - result.offset = 0; - result.length = result.bytes.length; - return result; - } - - @Override - public int compareToNoLeaf(Cell fromCell) { - PackedQuadCell b = (PackedQuadCell) fromCell; - final long thisTerm = (((0x1L)&term) == 0x1L) ? term-1 : term; - final long fromTerm = (((0x1L)&b.term) == 0x1L) ? b.term-1 : b.term; - final int result = compare(longToByteArray(thisTerm), 0, 8, longToByteArray(fromTerm), 0, 8); - return result; - } - - @Override - public int getLevel() { - int l = (int)((term >>> 1)&0x1FL); - return l; - } - - @Override - protected Collection getSubCells() { - List cells = new ArrayList<>(4); - PackedQuadCell pqc = (PackedQuadCell)(new PackedQuadCell(((term&0x1)==0x1) ? this.term-1 : this.term)) - .nextCell(true); - cells.add(pqc); - cells.add((pqc = (PackedQuadCell) (pqc.nextCell(false)))); - cells.add((pqc = (PackedQuadCell) (pqc.nextCell(false)))); - cells.add(pqc.nextCell(false)); - return cells; - } - - @Override - protected QuadCell getSubCell(Point p) { - return (PackedQuadCell) PackedQuadPrefixTree.this.getCell(p, getLevel() + 1);//not performant! - } - - @Override - public boolean isPrefixOf(Cell c) { - PackedQuadCell cell = (PackedQuadCell)c; - return (this.term==0x0L) ? true : isInternalPrefix(cell); - } - - protected boolean isInternalPrefix(PackedQuadCell c) { - final int shift = 64 - (getLevel()<<1); - return ((term>>>shift)-(c.term>>>shift)) == 0x0L; - } - - protected long concat(byte postfix) { - // extra leaf bit - return this.term | (((long)(postfix))<<((getMaxLevels()-getLevel()<<1)+6)); - } - - /** - * Constructs a bounding box shape out of the encoded cell - */ - @Override - protected Rectangle makeShape() { - double xmin = PackedQuadPrefixTree.this.xmin; - double ymin = PackedQuadPrefixTree.this.ymin; - int level = getLevel(); - - byte b; - for (short l=0, i=1; l>>(64-(i<<1))) & 0x3L); - - switch (b) { - case 0x00: - ymin += levelH[l]; - break; - case 0x01: - xmin += levelW[l]; - ymin += levelH[l]; - break; - case 0x02: - break;//nothing really - case 0x03: - xmin += levelW[l]; - break; - default: - throw new RuntimeException("unexpected quadrant"); - } - } - - double width, height; - if (level > 0) { - width = levelW[level - 1]; - height = levelH[level - 1]; - } else { - width = gridW; - height = gridH; - } - return new RectangleImpl(xmin, xmin + width, ymin, ymin + height, ctx); - } - - private long fromBytes(byte b1, byte b2, byte b3, byte b4, byte b5, byte b6, byte b7, byte b8) { - return ((long)b1 & 255L) << 56 | ((long)b2 & 255L) << 48 | ((long)b3 & 255L) << 40 - | ((long)b4 & 255L) << 32 | ((long)b5 & 255L) << 24 | ((long)b6 & 255L) << 16 - | ((long)b7 & 255L) << 8 | (long)b8 & 255L; - } - - private byte[] longToByteArray(long value) { - byte[] result = new byte[8]; - for(int i = 7; i >= 0; --i) { - result[i] = (byte)((int)(value & 255L)); - value >>= 8; - } - return result; - } - - private long longFromByteArray(byte[] bytes, int ofs) { - assert bytes.length >= 8; - return fromBytes(bytes[0+ofs], bytes[1+ofs], bytes[2+ofs], bytes[3+ofs], - bytes[4+ofs], bytes[5+ofs], bytes[6+ofs], bytes[7+ofs]); - } - - /** - * Used for debugging, this will print the bits of the cell - */ - @Override - public String toString() { - String s = ""; - for(int i = 0; i < Long.numberOfLeadingZeros(term); i++) { - s+='0'; - } - if (term != 0) - s += Long.toBinaryString(term); - return s; - } - } // PackedQuadCell - - protected class PrefixTreeIterator extends CellIterator { - private Shape shape; - private PackedQuadCell thisCell; - private PackedQuadCell nextCell; - - private short leaves; - private short level; - private final short maxLevels; - private CellIterator pruneIter; - - PrefixTreeIterator(Shape shape) { - this.shape = shape; - this.thisCell = ((PackedQuadCell)(getWorldCell())).nextCell(true); - this.maxLevels = (short)thisCell.getMaxLevels(); - this.nextCell = null; - } - - @Override - public boolean hasNext() { - if (nextCell != null) { - return true; - } - SpatialRelation rel; - // loop until we're at the end of the quad tree or we hit a relation - while (thisCell != null) { - rel = thisCell.getShape().relate(shape); - if (rel == SpatialRelation.DISJOINT) { - thisCell = thisCell.nextCell(false); - } else { // within || intersects || contains - thisCell.setShapeRel(rel); - nextCell = thisCell; - if (rel == SpatialRelation.WITHIN) { - thisCell.setLeaf(); - thisCell = thisCell.nextCell(false); - } else { // intersects || contains - level = (short) (thisCell.getLevel()); - if (level == maxLevels || pruned(rel)) { - thisCell.setLeaf(); - if (shape instanceof Point) { - thisCell.setShapeRel(SpatialRelation.WITHIN); - thisCell = null; - } else { - thisCell = thisCell.nextCell(false); - } - break; - } - thisCell = thisCell.nextCell(true); - } - break; - } - } - return nextCell != null; - } - - private boolean pruned(SpatialRelation rel) { - if (rel == SpatialRelation.INTERSECTS && leafyPrune && level == maxLevels-1) { - for (leaves=0, pruneIter=thisCell.getNextLevelCells(shape); pruneIter.hasNext(); pruneIter.next(), ++leaves); - return leaves == 4; - } - return false; - } - - @Override - public Cell next() { - if (nextCell == null) { - if (!hasNext()) { - throw new NoSuchElementException(); - } - } - // overriding since this implementation sets thisCell in hasNext - Cell temp = nextCell; - nextCell = null; - return temp; - } - - @Override - public void remove() { - //no-op - } - } -} diff --git a/src/main/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java b/src/main/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java deleted file mode 100644 index 489816ddf3c..00000000000 --- a/src/main/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.spatial.prefix.tree; - -import com.spatial4j.core.context.SpatialContext; -import com.spatial4j.core.shape.Point; -import com.spatial4j.core.shape.Rectangle; -import com.spatial4j.core.shape.Shape; -import com.spatial4j.core.shape.SpatialRelation; -import org.apache.lucene.util.BytesRef; - -import java.io.PrintStream; -import java.text.NumberFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Locale; - -/** - * A {@link SpatialPrefixTree} which uses a - * quad tree in which an - * indexed term will be generated for each cell, 'A', 'B', 'C', 'D'. - * - * @lucene.experimental - * - * NOTE: Will be removed upon commit of LUCENE-6422 - */ -public class QuadPrefixTree extends LegacyPrefixTree { - - /** - * Factory for creating {@link QuadPrefixTree} instances with useful defaults - */ - public static class Factory extends SpatialPrefixTreeFactory { - - @Override - protected int getLevelForDistance(double degrees) { - QuadPrefixTree grid = new QuadPrefixTree(ctx, MAX_LEVELS_POSSIBLE); - return grid.getLevelForDistance(degrees); - } - - @Override - protected SpatialPrefixTree newSPT() { - return new QuadPrefixTree(ctx, - maxLevels != null ? maxLevels : MAX_LEVELS_POSSIBLE); - } - } - - public static final int MAX_LEVELS_POSSIBLE = 50;//not really sure how big this should be - - public static final int DEFAULT_MAX_LEVELS = 12; - protected final double xmin; - protected final double xmax; - protected final double ymin; - protected final double ymax; - protected final double xmid; - protected final double ymid; - - protected final double gridW; - public final double gridH; - - final double[] levelW; - final double[] levelH; - final int[] levelS; // side - final int[] levelN; // number - - public QuadPrefixTree( - SpatialContext ctx, Rectangle bounds, int maxLevels) { - super(ctx, maxLevels); - this.xmin = bounds.getMinX(); - this.xmax = bounds.getMaxX(); - this.ymin = bounds.getMinY(); - this.ymax = bounds.getMaxY(); - - levelW = new double[maxLevels]; - levelH = new double[maxLevels]; - levelS = new int[maxLevels]; - levelN = new int[maxLevels]; - - gridW = xmax - xmin; - gridH = ymax - ymin; - this.xmid = xmin + gridW/2.0; - this.ymid = ymin + gridH/2.0; - levelW[0] = gridW/2.0; - levelH[0] = gridH/2.0; - levelS[0] = 2; - levelN[0] = 4; - - for (int i = 1; i < levelW.length; i++) { - levelW[i] = levelW[i - 1] / 2.0; - levelH[i] = levelH[i - 1] / 2.0; - levelS[i] = levelS[i - 1] * 2; - levelN[i] = levelN[i - 1] * 4; - } - } - - public QuadPrefixTree(SpatialContext ctx) { - this(ctx, DEFAULT_MAX_LEVELS); - } - - public QuadPrefixTree( - SpatialContext ctx, int maxLevels) { - this(ctx, ctx.getWorldBounds(), maxLevels); - } - - @Override - public Cell getWorldCell() { - return new QuadCell(BytesRef.EMPTY_BYTES, 0, 0); - } - - public void printInfo(PrintStream out) { - NumberFormat nf = NumberFormat.getNumberInstance(Locale.ROOT); - nf.setMaximumFractionDigits(5); - nf.setMinimumFractionDigits(5); - nf.setMinimumIntegerDigits(3); - - for (int i = 0; i < maxLevels; i++) { - out.println(i + "]\t" + nf.format(levelW[i]) + "\t" + nf.format(levelH[i]) + "\t" + - levelS[i] + "\t" + (levelS[i] * levelS[i])); - } - } - - @Override - public int getLevelForDistance(double dist) { - if (dist == 0)//short circuit - return maxLevels; - for (int i = 0; i < maxLevels-1; i++) { - //note: level[i] is actually a lookup for level i+1 - if(dist > levelW[i] && dist > levelH[i]) { - return i+1; - } - } - return maxLevels; - } - - @Override - public Cell getCell(Point p, int level) { - List cells = new ArrayList<>(1); - build(xmid, ymid, 0, cells, new BytesRef(maxLevels+1), ctx.makePoint(p.getX(),p.getY()), level); - return cells.get(0);//note cells could be longer if p on edge - } - - private void build( - double x, - double y, - int level, - List matches, - BytesRef str, - Shape shape, - int maxLevel) { - assert str.length == level; - double w = levelW[level] / 2; - double h = levelH[level] / 2; - - // Z-Order - // http://en.wikipedia.org/wiki/Z-order_%28curve%29 - checkBattenberg('A', x - w, y + h, level, matches, str, shape, maxLevel); - checkBattenberg('B', x + w, y + h, level, matches, str, shape, maxLevel); - checkBattenberg('C', x - w, y - h, level, matches, str, shape, maxLevel); - checkBattenberg('D', x + w, y - h, level, matches, str, shape, maxLevel); - - // possibly consider hilbert curve - // http://en.wikipedia.org/wiki/Hilbert_curve - // http://blog.notdot.net/2009/11/Damn-Cool-Algorithms-Spatial-indexing-with-Quadtrees-and-Hilbert-Curves - // if we actually use the range property in the query, this could be useful - } - - protected void checkBattenberg( - char c, - double cx, - double cy, - int level, - List matches, - BytesRef str, - Shape shape, - int maxLevel) { - assert str.length == level; - assert str.offset == 0; - double w = levelW[level] / 2; - double h = levelH[level] / 2; - - int strlen = str.length; - Rectangle rectangle = ctx.makeRectangle(cx - w, cx + w, cy - h, cy + h); - SpatialRelation v = shape.relate(rectangle); - if (SpatialRelation.CONTAINS == v) { - str.bytes[str.length++] = (byte)c;//append - //str.append(SpatialPrefixGrid.COVER); - matches.add(new QuadCell(BytesRef.deepCopyOf(str), v.transpose())); - } else if (SpatialRelation.DISJOINT == v) { - // nothing - } else { // SpatialRelation.WITHIN, SpatialRelation.INTERSECTS - str.bytes[str.length++] = (byte)c;//append - - int nextLevel = level+1; - if (nextLevel >= maxLevel) { - //str.append(SpatialPrefixGrid.INTERSECTS); - matches.add(new QuadCell(BytesRef.deepCopyOf(str), v.transpose())); - } else { - build(cx, cy, nextLevel, matches, str, shape, maxLevel); - } - } - str.length = strlen; - } - - protected class QuadCell extends LegacyCell { - - QuadCell(byte[] bytes, int off, int len) { - super(bytes, off, len); - } - - QuadCell(BytesRef str, SpatialRelation shapeRel) { - this(str.bytes, str.offset, str.length); - this.shapeRel = shapeRel; - } - - @Override - protected QuadPrefixTree getGrid() { return QuadPrefixTree.this; } - - @Override - protected int getMaxLevels() { return maxLevels; } - - @Override - protected Collection getSubCells() { - BytesRef source = getTokenBytesNoLeaf(null); - - List cells = new ArrayList<>(4); - cells.add(new QuadCell(concat(source, (byte)'A'), null)); - cells.add(new QuadCell(concat(source, (byte)'B'), null)); - cells.add(new QuadCell(concat(source, (byte)'C'), null)); - cells.add(new QuadCell(concat(source, (byte)'D'), null)); - return cells; - } - - protected BytesRef concat(BytesRef source, byte b) { - //+2 for new char + potential leaf - final byte[] buffer = Arrays.copyOfRange(source.bytes, source.offset, source.offset + source.length + 2); - BytesRef target = new BytesRef(buffer); - target.length = source.length; - target.bytes[target.length++] = b; - return target; - } - - @Override - public int getSubCellsSize() { - return 4; - } - - @Override - protected QuadCell getSubCell(Point p) { - return (QuadCell) QuadPrefixTree.this.getCell(p, getLevel() + 1);//not performant! - } - - @Override - public Shape getShape() { - if (shape == null) - shape = makeShape(); - return shape; - } - - protected Rectangle makeShape() { - BytesRef token = getTokenBytesNoLeaf(null); - double xmin = QuadPrefixTree.this.xmin; - double ymin = QuadPrefixTree.this.ymin; - - for (int i = 0; i < token.length; i++) { - byte c = token.bytes[token.offset + i]; - switch (c) { - case 'A': - ymin += levelH[i]; - break; - case 'B': - xmin += levelW[i]; - ymin += levelH[i]; - break; - case 'C': - break;//nothing really - case 'D': - xmin += levelW[i]; - break; - default: - throw new RuntimeException("unexpected char: " + c); - } - } - int len = token.length; - double width, height; - if (len > 0) { - width = levelW[len-1]; - height = levelH[len-1]; - } else { - width = gridW; - height = gridH; - } - return ctx.makeRectangle(xmin, xmin + width, ymin, ymin + height); - } - }//QuadCell -} diff --git a/src/main/java/org/elasticsearch/action/ActionModule.java b/src/main/java/org/elasticsearch/action/ActionModule.java index 3046d15418f..c529a3e876f 100644 --- a/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/src/main/java/org/elasticsearch/action/ActionModule.java @@ -139,8 +139,6 @@ import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptAction; import org.elasticsearch.action.indexedscripts.get.TransportGetIndexedScriptAction; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptAction; import org.elasticsearch.action.indexedscripts.put.TransportPutIndexedScriptAction; -import org.elasticsearch.action.mlt.MoreLikeThisAction; -import org.elasticsearch.action.mlt.TransportMoreLikeThisAction; import org.elasticsearch.action.percolate.*; import org.elasticsearch.action.search.*; import org.elasticsearch.action.search.type.*; @@ -293,7 +291,6 @@ public class ActionModule extends AbstractModule { TransportSearchScrollQueryAndFetchAction.class ); registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); - registerAction(MoreLikeThisAction.INSTANCE, TransportMoreLikeThisAction.class); registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class); registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class); registerAction(ExplainAction.INSTANCE, TransportExplainAction.class); diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index bbf9af5b834..91e650bb15b 100644 --- a/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -148,4 +148,4 @@ public class BulkRequestBuilder extends ActionRequestBuilder shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) { final BulkShardRequest request = shardRequest.request; - IndexService indexService = indicesService.indexServiceSafe(request.index()); - IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); + final IndexService indexService = indicesService.indexServiceSafe(request.index()); + final IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); long[] preVersions = new long[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length]; + Translog.Location location = null; for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) { BulkItemRequest item = request.items()[requestIndex]; if (item.request() instanceof IndexRequest) { @@ -129,7 +126,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation preVersions[requestIndex] = indexRequest.version(); preVersionTypes[requestIndex] = indexRequest.versionType(); try { - WriteResult result = shardIndexOperation(request, indexRequest, clusterState, indexShard, indexService, true); + WriteResult result = shardIndexOperation(request, indexRequest, clusterState, indexShard, true); + location = locationToSync(location, result.location); // add the response IndexResponse indexResponse = result.response(); setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse)); @@ -164,7 +162,9 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation try { // add the response - DeleteResponse deleteResponse = shardDeleteOperation(request, deleteRequest, indexShard).response(); + final WriteResult writeResult = shardDeleteOperation(request, deleteRequest, indexShard); + DeleteResponse deleteResponse = writeResult.response(); + location = locationToSync(location, writeResult.location); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse)); } catch (Throwable e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it @@ -198,15 +198,18 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) { UpdateResult updateResult; try { - updateResult = shardUpdateOperation(clusterState, request, updateRequest, indexShard, indexService); + updateResult = shardUpdateOperation(clusterState, request, updateRequest, indexShard); } catch (Throwable t) { updateResult = new UpdateResult(null, null, false, t, null); } if (updateResult.success()) { + if (updateResult.writeResult != null) { + location = locationToSync(location, updateResult.writeResult.location); + } switch (updateResult.result.operation()) { case UPSERT: case INDEX: - WriteResult result = updateResult.writeResult; + WriteResult result = updateResult.writeResult; IndexRequest indexRequest = updateResult.request(); BytesReference indexSourceAsBytes = indexRequest.source(); // add the response @@ -220,7 +223,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); break; case DELETE: - DeleteResponse response = updateResult.writeResult.response(); + WriteResult writeResult = updateResult.writeResult; + DeleteResponse response = writeResult.response(); DeleteRequest deleteRequest = updateResult.request(); updateResponse = new UpdateResponse(response.getShardInfo(), response.getIndex(), response.getType(), response.getId(), response.getVersion(), false); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, shardRequest.request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); @@ -298,13 +302,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation assert preVersionTypes[requestIndex] != null; } - if (request.refresh()) { - try { - indexShard.refresh("refresh_flag_bulk"); - } catch (Throwable e) { - // ignore - } - } + processAfter(request, indexShard, location); BulkItemResponse[] responses = new BulkItemResponse[request.items().length]; BulkItemRequest[] items = request.items(); for (int i = 0; i < items.length; i++) { @@ -320,28 +318,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } } - static class WriteResult { - - final ActionWriteResponse response; - final Engine.IndexingOperation op; - - WriteResult(ActionWriteResponse response, Engine.IndexingOperation op) { - this.response = response; - this.op = op; - } - - @SuppressWarnings("unchecked") - T response() { - // this sets total, pending and failed to 0 and this is ok, because we will embed this into the replica - // request and not use it - response.setShardInfo(new ActionWriteResponse.ShardInfo()); - return (T) response; - } - - } - private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, ClusterState clusterState, - IndexShard indexShard, IndexService indexService, boolean processed) throws Throwable { + IndexShard indexShard, boolean processed) throws Throwable { // validate, if routing is required, that we got routing MappingMetaData mappingMd = clusterState.metaData().index(request.index()).mappingOrDefault(indexRequest.type()); @@ -355,52 +333,10 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation indexRequest.process(clusterState.metaData(), mappingMd, allowIdGeneration, request.index()); } - SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, indexRequest.source()).type(indexRequest.type()).id(indexRequest.id()) - .routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl()); - - final Engine.IndexingOperation operation; - if (indexRequest.opType() == IndexRequest.OpType.INDEX) { - operation = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); - } else { - assert indexRequest.opType() == IndexRequest.OpType.CREATE : indexRequest.opType(); - operation = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, - request.canHaveDuplicates() || indexRequest.canHaveDuplicates(), indexRequest.autoGeneratedId()); - } - Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - final boolean created; - if (update != null) { - final String indexName = indexService.index().name(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - // With rivers, we have a chicken and egg problem if indexing - // the _meta document triggers a mapping update. Because we would - // like to validate the mapping update first, but on the other - // hand putting the mapping would start the river, which expects - // to find a _meta document - // So we have no choice but to index first and send mappings afterwards - MapperService mapperService = indexService.mapperService(); - mapperService.merge(indexRequest.type(), new CompressedString(update.toBytes()), true); - created = operation.execute(indexShard); - mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, indexRequest.type(), update); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexRequest.type(), update); - created = operation.execute(indexShard); - } - } else { - created = operation.execute(indexShard); - } - - // update the version on request so it will happen on the replicas - final long version = operation.version(); - indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery()); - indexRequest.version(version); - - assert indexRequest.versionType().validateVersionForWrites(indexRequest.version()); - - IndexResponse indexResponse = new IndexResponse(request.index(), indexRequest.type(), indexRequest.id(), version, created); - return new WriteResult(indexResponse, operation); + return executeIndexRequestOnPrimary(request, indexRequest, indexShard); } - private WriteResult shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) { + private WriteResult shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) { Engine.Delete delete = indexShard.prepareDelete(deleteRequest.type(), deleteRequest.id(), deleteRequest.version(), deleteRequest.versionType(), Engine.Operation.Origin.PRIMARY); indexShard.delete(delete); // update the request with the version so it will go to the replicas @@ -410,7 +346,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version()); DeleteResponse deleteResponse = new DeleteResponse(request.index(), deleteRequest.type(), deleteRequest.id(), delete.version(), delete.found()); - return new WriteResult(deleteResponse, null); + return new WriteResult(deleteResponse, delete.getTranslogLocation()); } static class UpdateResult { @@ -466,14 +402,14 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } - private UpdateResult shardUpdateOperation(ClusterState clusterState, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard, IndexService indexService) { + private UpdateResult shardUpdateOperation(ClusterState clusterState, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) { UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard); switch (translate.operation()) { case UPSERT: case INDEX: IndexRequest indexRequest = translate.action(); try { - WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, clusterState, indexShard, indexService, false); + WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, clusterState, indexShard, false); return new UpdateResult(translate, indexRequest, result); } catch (Throwable t) { t = ExceptionsHelper.unwrapCause(t); @@ -510,6 +446,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.shardSafe(shardId.id()); + Translog.Location location = null; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; if (item == null || item.isIgnoreOnReplica()) { @@ -535,6 +472,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } operation.execute(indexShard); + location = locationToSync(location, operation.getTranslogLocation()); } catch (Throwable e) { // if its not an ignore replica failure, we need to make sure to bubble up the failure // so we will fail the shard @@ -547,6 +485,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation try { Engine.Delete delete = indexShard.prepareDelete(deleteRequest.type(), deleteRequest.id(), deleteRequest.version(), deleteRequest.versionType(), Engine.Operation.Origin.REPLICA); indexShard.delete(delete); + location = locationToSync(location, delete.getTranslogLocation()); } catch (Throwable e) { // if its not an ignore replica failure, we need to make sure to bubble up the failure // so we will fail the shard @@ -559,6 +498,10 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } } + processAfter(request, indexShard, location); + } + + private void processAfter(BulkShardRequest request, IndexShard indexShard, Translog.Location location) { if (request.refresh()) { try { indexShard.refresh("refresh_flag_bulk"); @@ -566,6 +509,10 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation // ignore } } + + if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) { + indexShard.sync(location); + } } private void applyVersion(BulkItemRequest item, long version, VersionType versionType) { @@ -579,4 +526,15 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation // log? } } + + private Translog.Location locationToSync(Translog.Location current, Translog.Location next) { + /* here we are moving forward in the translog with each operation. Under the hood + * this might cross translog files which is ok since from the user perspective + * the translog is like a tape where only the highest location needs to be fsynced + * in order to sync all previous locations even though they are not in the same file. + * When the translog rolls over files the previous file is fsynced on after closing if needed.*/ + assert next != null : "next operation can't be null"; + assert current == null || current.compareTo(next) < 0 : "translog locations are not increasing"; + return next; + } } diff --git a/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 5aa929913f2..7fdde8f9754 100644 --- a/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -19,17 +19,20 @@ package org.elasticsearch.action.delete; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.ShardIterator; @@ -40,11 +43,14 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + /** * Performs the delete operation. */ @@ -56,8 +62,10 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct @Inject public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, - TransportCreateIndexAction createIndexAction, ActionFilters actionFilters) { - super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + TransportCreateIndexAction createIndexAction, ActionFilters actionFilters, + MappingUpdatedAction mappingUpdatedAction) { + super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, + mappingUpdatedAction, actionFilters, DeleteRequest.class, DeleteRequest.class, ThreadPool.Names.INDEX); this.createIndexAction = createIndexAction; this.autoCreateIndex = new AutoCreateIndex(settings); @@ -137,13 +145,7 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct assert request.versionType().validateVersionForWrites(request.version()); - if (request.refresh()) { - try { - indexShard.refresh("refresh_flag_delete"); - } catch (Exception e) { - // ignore - } - } + processAfter(request, indexShard, delete.getTranslogLocation()); DeleteResponse response = new DeleteResponse(shardRequest.shardId.getIndex(), request.type(), request.id(), delete.version(), delete.found()); return new Tuple<>(response, shardRequest.request); @@ -155,14 +157,7 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.REPLICA); indexShard.delete(delete); - - if (request.refresh()) { - try { - indexShard.refresh("refresh_flag_delete"); - } catch (Exception e) { - // ignore - } - } + processAfter(request, indexShard, delete.getTranslogLocation()); } @Override @@ -170,4 +165,18 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct return clusterService.operationRouting() .deleteShards(clusterService.state(), request.concreteIndex(), request.request().type(), request.request().id(), request.request().routing()); } + + private void processAfter(DeleteRequest request, IndexShard indexShard, Translog.Location location) { + if (request.refresh()) { + try { + indexShard.refresh("refresh_flag_delete"); + } catch (Throwable e) { + // ignore + } + } + + if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) { + indexShard.sync(location); + } + } } diff --git a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 8e81009b653..c9455d8783a 100644 --- a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -19,13 +19,13 @@ package org.elasticsearch.action.index; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; -import org.elasticsearch.action.index.IndexRequest.OpType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; @@ -38,22 +38,22 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.river.RiverIndexName; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; + /** * Performs the index operation. *

@@ -69,7 +69,6 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; private final TransportCreateIndexAction createIndexAction; - private final MappingUpdatedAction mappingUpdatedAction; private final ClusterService clusterService; @@ -77,10 +76,9 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, TransportCreateIndexAction createIndexAction, MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters) { - super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - IndexRequest.class, IndexRequest.class, ThreadPool.Names.INDEX); + super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction, + actionFilters, IndexRequest.class, IndexRequest.class, ThreadPool.Names.INDEX); this.createIndexAction = createIndexAction; - this.mappingUpdatedAction = mappingUpdatedAction; this.autoCreateIndex = new AutoCreateIndex(settings); this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); this.clusterService = clusterService; @@ -171,56 +169,12 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); - SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).type(request.type()).id(request.id()) - .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); - final Engine.IndexingOperation operation; - if (request.opType() == IndexRequest.OpType.INDEX) { - operation = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates()); - } else { - assert request.opType() == IndexRequest.OpType.CREATE : request.opType(); - operation = indexShard.prepareCreate(sourceToParse, - request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates(), request.autoGeneratedId()); - } - - final boolean created; - Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - if (update != null) { - final String indexName = indexService.index().name(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - // With rivers, we have a chicken and egg problem if indexing - // the _meta document triggers a mapping update. Because we would - // like to validate the mapping update first, but on the other - // hand putting the mapping would start the river, which expects - // to find a _meta document - // So we have no choice but to index first and send mappings afterwards - MapperService mapperService = indexService.mapperService(); - mapperService.merge(request.type(), new CompressedString(update.toBytes()), true); - created = operation.execute(indexShard); - mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, request.type(), update); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); - created = operation.execute(indexShard); - } - } else { - created = operation.execute(indexShard); - } - - if (request.refresh()) { - try { - indexShard.refresh("refresh_flag_index"); - } catch (Throwable e) { - // ignore - } - } - - // update the version on the request, so it will be used for the replicas - final long version = operation.version(); - request.version(version); - request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); - - assert request.versionType().validateVersionForWrites(request.version()); - return new Tuple<>(new IndexResponse(shardRequest.shardId.getIndex(), request.type(), request.id(), version, created), shardRequest.request); + final WriteResult result = executeIndexRequestOnPrimary(null, request, indexShard); + final IndexResponse response = result.response; + final Translog.Location location = result.location; + processAfter(request, indexShard, location); + return new Tuple<>(response, shardRequest.request); } @Override @@ -242,12 +196,20 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); } operation.execute(indexShard); + processAfter(request, indexShard, operation.getTranslogLocation()); + } + + private void processAfter(IndexRequest request, IndexShard indexShard, Translog.Location location) { if (request.refresh()) { try { indexShard.refresh("refresh_flag_index"); - } catch (Exception e) { + } catch (Throwable e) { // ignore } } + + if (indexShard.getTranslogDurability() == Translog.Durabilty.REQUEST && location != null) { + indexShard.sync(location); + } } } diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java deleted file mode 100644 index b0d6bbb6c41..00000000000 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java +++ /dev/null @@ -1,710 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.mlt; - -import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.action.*; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.builder.SearchSourceBuilder; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.search.Scroll.readScroll; - -/** - * A more like this request allowing to search for documents that a "like" the provided document. The document - * to check against to fetched based on the index, type and id provided. Best created with {@link org.elasticsearch.client.Requests#moreLikeThisRequest(String)}. - *

- *

Note, the {@link #index()}, {@link #type(String)} and {@link #id(String)} are required. - * - * @see org.elasticsearch.client.Client#moreLikeThis(MoreLikeThisRequest) - * @see org.elasticsearch.client.Requests#moreLikeThisRequest(String) - * @see org.elasticsearch.action.search.SearchResponse - */ -public class MoreLikeThisRequest extends ActionRequest implements CompositeIndicesRequest { - - private String index; - - private String type; - - private String id; - - private String routing; - - private String[] fields; - - private String minimumShouldMatch = "0%"; - private int minTermFreq = -1; - private int maxQueryTerms = -1; - private String[] stopWords = null; - private int minDocFreq = -1; - private int maxDocFreq = -1; - private int minWordLength = -1; - private int maxWordLength = -1; - private float boostTerms = -1; - private boolean include = false; - - private SearchType searchType = SearchType.DEFAULT; - private int searchSize = 0; - private int searchFrom = 0; - private String[] searchIndices; - private String[] searchTypes; - private Scroll searchScroll; - - private BytesReference searchSource; - - MoreLikeThisRequest() { - } - - /** - * Constructs a new more like this request for a document that will be fetch from the provided index. - * Use {@link #type(String)} and {@link #id(String)} to specify the document to load. - */ - public MoreLikeThisRequest(String index) { - this.index = index; - } - - /** - * The index to load the document from which the "like" query will run with. - */ - public String index() { - return index; - } - - /** - * The type of document to load from which the "like" query will run with. - */ - public String type() { - return type; - } - - void index(String index) { - this.index = index; - } - - public IndicesOptions indicesOptions() { - return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); - } - - @Override - public List subRequests() { - //we create two fake indices subrequests as we don't have the actual ones yet - //since they get created later on in TransportMoreLikeThisAction - List requests = Lists.newArrayList(); - requests.add(new IndicesRequest() { - @Override - public String[] indices() { - return new String[]{index}; - } - - @Override - public IndicesOptions indicesOptions() { - return MoreLikeThisRequest.this.indicesOptions(); - } - }); - requests.add(new IndicesRequest.Replaceable() { - @Override - public String[] indices() { - if (searchIndices != null) { - return searchIndices; - } - return new String[]{index}; - } - - @Override - public IndicesRequest indices(String[] indices) { - searchIndices = indices; - return this; - } - - @Override - public IndicesOptions indicesOptions() { - return SearchRequest.DEFAULT_INDICES_OPTIONS; - } - }); - return requests; - } - - /** - * The type of document to load from which the "like" query will execute with. - */ - public MoreLikeThisRequest type(String type) { - this.type = type; - return this; - } - - /** - * The id of document to load from which the "like" query will execute with. - */ - public String id() { - return id; - } - - /** - * The id of document to load from which the "like" query will execute with. - */ - public MoreLikeThisRequest id(String id) { - this.id = id; - return this; - } - - /** - * @return The routing for this request. This used for the `get` part of the mlt request. - */ - public String routing() { - return routing; - } - - public void routing(String routing) { - this.routing = routing; - } - - /** - * The fields of the document to use in order to find documents "like" this one. Defaults to run - * against all the document fields. - */ - public String[] fields() { - return this.fields; - } - - /** - * The fields of the document to use in order to find documents "like" this one. Defaults to run - * against all the document fields. - */ - public MoreLikeThisRequest fields(String... fields) { - this.fields = fields; - return this; - } - - /** - * Number of terms that must match the generated query expressed in the - * common syntax for minimum should match. Defaults to 30%. - * - * @see org.elasticsearch.common.lucene.search.Queries#calculateMinShouldMatch(int, String) - */ - public MoreLikeThisRequest minimumShouldMatch(String minimumShouldMatch) { - this.minimumShouldMatch = minimumShouldMatch; - return this; - } - - /** - * Number of terms that must match the generated query expressed in the - * common syntax for minimum should match. - * - * @see org.elasticsearch.common.lucene.search.Queries#calculateMinShouldMatch(int, String) - */ - public String minimumShouldMatch() { - return this.minimumShouldMatch; - } - - /** - * The percent of the terms to match for each field. Defaults to 0.3f. - */ - @Deprecated - public MoreLikeThisRequest percentTermsToMatch(float percentTermsToMatch) { - return minimumShouldMatch(Math.round(percentTermsToMatch * 100) + "%"); - } - - /** - * The percent of the terms to match for each field. Defaults to 0.3f. - */ - @Deprecated - public float percentTermsToMatch() { - if (minimumShouldMatch.endsWith("%")) { - return Float.parseFloat(minimumShouldMatch.substring(0, minimumShouldMatch.indexOf("%"))) / 100; - } else { - return -1; - } - } - - /** - * The frequency below which terms will be ignored in the source doc. Defaults to 2. - */ - public MoreLikeThisRequest minTermFreq(int minTermFreq) { - this.minTermFreq = minTermFreq; - return this; - } - - /** - * The frequency below which terms will be ignored in the source doc. Defaults to 2. - */ - public int minTermFreq() { - return this.minTermFreq; - } - - /** - * The maximum number of query terms that will be included in any generated query. Defaults to 25. - */ - public MoreLikeThisRequest maxQueryTerms(int maxQueryTerms) { - this.maxQueryTerms = maxQueryTerms; - return this; - } - - /** - * The maximum number of query terms that will be included in any generated query. Defaults to 25. - */ - public int maxQueryTerms() { - return this.maxQueryTerms; - } - - /** - * Any word in this set is considered "uninteresting" and ignored. - *

- *

Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as - * for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting". - *

- *

Defaults to no stop words. - */ - public MoreLikeThisRequest stopWords(String... stopWords) { - this.stopWords = stopWords; - return this; - } - - /** - * Any word in this set is considered "uninteresting" and ignored. - *

- *

Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as - * for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting". - *

- *

Defaults to no stop words. - */ - public String[] stopWords() { - return this.stopWords; - } - - /** - * The frequency at which words will be ignored which do not occur in at least this - * many docs. Defaults to 5. - */ - public MoreLikeThisRequest minDocFreq(int minDocFreq) { - this.minDocFreq = minDocFreq; - return this; - } - - /** - * The frequency at which words will be ignored which do not occur in at least this - * many docs. Defaults to 5. - */ - public int minDocFreq() { - return this.minDocFreq; - } - - /** - * The maximum frequency in which words may still appear. Words that appear - * in more than this many docs will be ignored. Defaults to unbounded. - */ - public MoreLikeThisRequest maxDocFreq(int maxDocFreq) { - this.maxDocFreq = maxDocFreq; - return this; - } - - /** - * The maximum frequency in which words may still appear. Words that appear - * in more than this many docs will be ignored. Defaults to unbounded. - */ - public int maxDocFreq() { - return this.maxDocFreq; - } - - /** - * The minimum word length below which words will be ignored. Defaults to 0. - */ - public MoreLikeThisRequest minWordLength(int minWordLength) { - this.minWordLength = minWordLength; - return this; - } - - /** - * The minimum word length below which words will be ignored. Defaults to 0. - */ - public int minWordLength() { - return this.minWordLength; - } - - /** - * The maximum word length above which words will be ignored. Defaults to unbounded. - */ - public MoreLikeThisRequest maxWordLength(int maxWordLength) { - this.maxWordLength = maxWordLength; - return this; - } - - /** - * The maximum word length above which words will be ignored. Defaults to unbounded. - */ - public int maxWordLength() { - return this.maxWordLength; - } - - /** - * The boost factor to use when boosting terms. Defaults to 1. - */ - public MoreLikeThisRequest boostTerms(float boostTerms) { - this.boostTerms = boostTerms; - return this; - } - - /** - * The boost factor to use when boosting terms. Defaults to 1. - */ - public float boostTerms() { - return this.boostTerms; - } - - /** - * Whether to include the queried document. Defaults to false. - */ - public MoreLikeThisRequest include(boolean include) { - this.include = include; - return this; - } - - /** - * Whether to include the queried document. Defaults to false. - */ - public boolean include() { - return this.include; - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequest searchSource(SearchSourceBuilder sourceBuilder) { - this.searchSource = sourceBuilder.buildAsBytes(Requests.CONTENT_TYPE); - return this; - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequest searchSource(String searchSource) { - this.searchSource = new BytesArray(searchSource); - return this; - } - - public MoreLikeThisRequest searchSource(Map searchSource) { - try { - XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE); - builder.map(searchSource); - return searchSource(builder); - } catch (IOException e) { - throw new ElasticsearchGenerationException("Failed to generate [" + searchSource + "]", e); - } - } - - public MoreLikeThisRequest searchSource(XContentBuilder builder) { - this.searchSource = builder.bytes(); - return this; - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequest searchSource(byte[] searchSource) { - return searchSource(searchSource, 0, searchSource.length); - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequest searchSource(byte[] searchSource, int offset, int length) { - return searchSource(new BytesArray(searchSource, offset, length)); - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequest searchSource(BytesReference searchSource) { - this.searchSource = searchSource; - return this; - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public BytesReference searchSource() { - return this.searchSource; - } - - /** - * The search type of the mlt search query. - */ - public MoreLikeThisRequest searchType(SearchType searchType) { - this.searchType = searchType; - return this; - } - - /** - * The search type of the mlt search query. - */ - public MoreLikeThisRequest searchType(String searchType) { - return searchType(SearchType.fromString(searchType)); - } - - /** - * The search type of the mlt search query. - */ - public SearchType searchType() { - return this.searchType; - } - - /** - * The indices the resulting mlt query will run against. If not set, will run - * against the index the document was fetched from. - */ - public MoreLikeThisRequest searchIndices(String... searchIndices) { - this.searchIndices = searchIndices; - return this; - } - - /** - * The indices the resulting mlt query will run against. If not set, will run - * against the index the document was fetched from. - */ - public String[] searchIndices() { - return this.searchIndices; - } - - /** - * The types the resulting mlt query will run against. If not set, will run - * against the type of the document fetched. - */ - public MoreLikeThisRequest searchTypes(String... searchTypes) { - this.searchTypes = searchTypes; - return this; - } - - /** - * The types the resulting mlt query will run against. If not set, will run - * against the type of the document fetched. - */ - public String[] searchTypes() { - return this.searchTypes; - } - - /** - * An optional search scroll request to be able to continue and scroll the search - * operation. - */ - public MoreLikeThisRequest searchScroll(Scroll searchScroll) { - this.searchScroll = searchScroll; - return this; - } - - /** - * An optional search scroll request to be able to continue and scroll the search - * operation. - */ - public Scroll searchScroll() { - return this.searchScroll; - } - - /** - * The number of documents to return, defaults to 10. - */ - public MoreLikeThisRequest searchSize(int size) { - this.searchSize = size; - return this; - } - - public int searchSize() { - return this.searchSize; - } - - /** - * From which search result set to return. - */ - public MoreLikeThisRequest searchFrom(int from) { - this.searchFrom = from; - return this; - } - - public int searchFrom() { - return this.searchFrom; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (index == null) { - validationException = ValidateActions.addValidationError("index is missing", validationException); - } - if (type == null) { - validationException = ValidateActions.addValidationError("type is missing", validationException); - } - if (id == null) { - validationException = ValidateActions.addValidationError("id is missing", validationException); - } - return validationException; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - type = in.readString(); - id = in.readString(); - // no need to pass threading over the network, they are always false when coming throw a thread pool - int size = in.readVInt(); - if (size == 0) { - fields = Strings.EMPTY_ARRAY; - } else { - fields = new String[size]; - for (int i = 0; i < size; i++) { - fields[i] = in.readString(); - } - } - - minimumShouldMatch(in.readString()); - - minTermFreq = in.readVInt(); - maxQueryTerms = in.readVInt(); - size = in.readVInt(); - if (size > 0) { - stopWords = new String[size]; - for (int i = 0; i < size; i++) { - stopWords[i] = in.readString(); - } - } - minDocFreq = in.readVInt(); - maxDocFreq = in.readVInt(); - minWordLength = in.readVInt(); - maxWordLength = in.readVInt(); - boostTerms = in.readFloat(); - include = in.readBoolean(); - - searchType = SearchType.fromId(in.readByte()); - size = in.readVInt(); - if (size == 0) { - searchIndices = null; - } else if (size == 1) { - searchIndices = Strings.EMPTY_ARRAY; - } else { - searchIndices = new String[size - 1]; - for (int i = 0; i < searchIndices.length; i++) { - searchIndices[i] = in.readString(); - } - } - size = in.readVInt(); - if (size == 0) { - searchTypes = null; - } else if (size == 1) { - searchTypes = Strings.EMPTY_ARRAY; - } else { - searchTypes = new String[size - 1]; - for (int i = 0; i < searchTypes.length; i++) { - searchTypes[i] = in.readString(); - } - } - if (in.readBoolean()) { - searchScroll = readScroll(in); - } - - searchSource = in.readBytesReference(); - - searchSize = in.readVInt(); - searchFrom = in.readVInt(); - routing = in.readOptionalString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(index); - out.writeString(type); - out.writeString(id); - if (fields == null) { - out.writeVInt(0); - } else { - out.writeVInt(fields.length); - for (String field : fields) { - out.writeString(field); - } - } - - out.writeString(minimumShouldMatch); - - out.writeVInt(minTermFreq); - out.writeVInt(maxQueryTerms); - if (stopWords == null) { - out.writeVInt(0); - } else { - out.writeVInt(stopWords.length); - for (String stopWord : stopWords) { - out.writeString(stopWord); - } - } - out.writeVInt(minDocFreq); - out.writeVInt(maxDocFreq); - out.writeVInt(minWordLength); - out.writeVInt(maxWordLength); - out.writeFloat(boostTerms); - out.writeBoolean(include); - - out.writeByte(searchType.id()); - if (searchIndices == null) { - out.writeVInt(0); - } else { - out.writeVInt(searchIndices.length + 1); - for (String index : searchIndices) { - out.writeString(index); - } - } - if (searchTypes == null) { - out.writeVInt(0); - } else { - out.writeVInt(searchTypes.length + 1); - for (String type : searchTypes) { - out.writeString(type); - } - } - if (searchScroll == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - searchScroll.writeTo(out); - } - out.writeBytesReference(searchSource); - - out.writeVInt(searchSize); - out.writeVInt(searchFrom); - out.writeOptionalString(routing); - } -} diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java deleted file mode 100644 index 2ff9f0d4cae..00000000000 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.mlt; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.builder.SearchSourceBuilder; - -import java.util.Map; - -/** - */ -public class MoreLikeThisRequestBuilder extends ActionRequestBuilder { - - public MoreLikeThisRequestBuilder(ElasticsearchClient client, MoreLikeThisAction action) { - super(client, action, new MoreLikeThisRequest()); - } - - public MoreLikeThisRequestBuilder(ElasticsearchClient client, MoreLikeThisAction action, String index, String type, String id) { - super(client, action, new MoreLikeThisRequest(index).type(type).id(id)); - } - - /** - * The fields of the document to use in order to find documents "like" this one. Defaults to run - * against all the document fields. - */ - public MoreLikeThisRequestBuilder setField(String... fields) { - request.fields(fields); - return this; - } - - /** - * Sets the routing. Required if routing isn't id based. - */ - public MoreLikeThisRequestBuilder setRouting(String routing) { - request.routing(routing); - return this; - } - - /** - * Number of terms that must match the generated query expressed in the - * common syntax for minimum should match. Defaults to 30%. - * - * @see org.elasticsearch.common.lucene.search.Queries#calculateMinShouldMatch(int, String) - */ - public MoreLikeThisRequestBuilder setMinimumShouldMatch(String minimumShouldMatch) { - request.minimumShouldMatch(minimumShouldMatch); - return this; - } - - /** - * The percent of the terms to match for each field. Defaults to 0.3f. - */ - public MoreLikeThisRequestBuilder setPercentTermsToMatch(float percentTermsToMatch) { - return setMinimumShouldMatch(Math.round(percentTermsToMatch * 100) + "%"); - } - - /** - * The frequency below which terms will be ignored in the source doc. Defaults to 2. - */ - public MoreLikeThisRequestBuilder setMinTermFreq(int minTermFreq) { - request.minTermFreq(minTermFreq); - return this; - } - - /** - * The maximum number of query terms that will be included in any generated query. Defaults to 25. - */ - public MoreLikeThisRequestBuilder maxQueryTerms(int maxQueryTerms) { - request.maxQueryTerms(maxQueryTerms); - return this; - } - - /** - * Any word in this set is considered "uninteresting" and ignored. - *

- *

Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as - * for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting". - *

- *

Defaults to no stop words. - */ - public MoreLikeThisRequestBuilder setStopWords(String... stopWords) { - request.stopWords(stopWords); - return this; - } - - /** - * The frequency at which words will be ignored which do not occur in at least this - * many docs. Defaults to 5. - */ - public MoreLikeThisRequestBuilder setMinDocFreq(int minDocFreq) { - request.minDocFreq(minDocFreq); - return this; - } - - /** - * The maximum frequency in which words may still appear. Words that appear - * in more than this many docs will be ignored. Defaults to unbounded. - */ - public MoreLikeThisRequestBuilder setMaxDocFreq(int maxDocFreq) { - request.maxDocFreq(maxDocFreq); - return this; - } - - /** - * The minimum word length below which words will be ignored. Defaults to 0. - */ - public MoreLikeThisRequestBuilder setMinWordLen(int minWordLen) { - request.minWordLength(minWordLen); - return this; - } - - /** - * The maximum word length above which words will be ignored. Defaults to unbounded. - */ - public MoreLikeThisRequestBuilder setMaxWordLen(int maxWordLen) { - request().maxWordLength(maxWordLen); - return this; - } - - /** - * The boost factor to use when boosting terms. Defaults to 1. - */ - public MoreLikeThisRequestBuilder setBoostTerms(float boostTerms) { - request.boostTerms(boostTerms); - return this; - } - - /** - * Whether to include the queried document. Defaults to false. - */ - public MoreLikeThisRequestBuilder setInclude(boolean include) { - request.include(include); - return this; - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequestBuilder setSearchSource(SearchSourceBuilder sourceBuilder) { - request.searchSource(sourceBuilder); - return this; - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequestBuilder setSearchSource(String searchSource) { - request.searchSource(searchSource); - return this; - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequestBuilder setSearchSource(Map searchSource) { - request.searchSource(searchSource); - return this; - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequestBuilder setSearchSource(XContentBuilder builder) { - request.searchSource(builder); - return this; - } - - /** - * An optional search source request allowing to control the search request for the - * more like this documents. - */ - public MoreLikeThisRequestBuilder setSearchSource(byte[] searchSource) { - request.searchSource(searchSource); - return this; - } - - /** - * The search type of the mlt search query. - */ - public MoreLikeThisRequestBuilder setSearchType(SearchType searchType) { - request.searchType(searchType); - return this; - } - - /** - * The search type of the mlt search query. - */ - public MoreLikeThisRequestBuilder setSearchType(String searchType) { - request.searchType(searchType); - return this; - } - - /** - * The indices the resulting mlt query will run against. If not set, will run - * against the index the document was fetched from. - */ - public MoreLikeThisRequestBuilder setSearchIndices(String... searchIndices) { - request.searchIndices(searchIndices); - return this; - } - - /** - * The types the resulting mlt query will run against. If not set, will run - * against the type of the document fetched. - */ - public MoreLikeThisRequestBuilder setSearchTypes(String... searchTypes) { - request.searchTypes(searchTypes); - return this; - } - - /** - * An optional search scroll request to be able to continue and scroll the search - * operation. - */ - public MoreLikeThisRequestBuilder setSearchScroll(Scroll searchScroll) { - request.searchScroll(searchScroll); - return this; - } - - /** - * The number of documents to return, defaults to 10. - */ - public MoreLikeThisRequestBuilder setSearchSize(int size) { - request.searchSize(size); - return this; - } - - /** - * From which search result set to return. - */ - public MoreLikeThisRequestBuilder setSearchFrom(int from) { - request.searchFrom(from); - return this; - } -} diff --git a/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java b/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java deleted file mode 100644 index 679ef533307..00000000000 --- a/src/main/java/org/elasticsearch/action/mlt/TransportMoreLikeThisAction.java +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.mlt; - -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.Term; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.get.TransportGetAction; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.MutableShardRouting; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.engine.DocumentMissingException; -import org.elasticsearch.index.get.GetField; -import org.elasticsearch.index.mapper.*; -import org.elasticsearch.index.mapper.internal.SourceFieldMapper; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; - -import java.util.Collections; -import java.util.Iterator; -import java.util.Set; - -import static com.google.common.collect.Sets.newHashSet; -import static org.elasticsearch.index.query.QueryBuilders.*; -import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; - -/** - * The more like this action. - */ -public class TransportMoreLikeThisAction extends HandledTransportAction { - - private final TransportSearchAction searchAction; - private final TransportGetAction getAction; - private final IndicesService indicesService; - private final ClusterService clusterService; - private final TransportService transportService; - - @Inject - public TransportMoreLikeThisAction(Settings settings, ThreadPool threadPool, TransportSearchAction searchAction, TransportGetAction getAction, - ClusterService clusterService, IndicesService indicesService, TransportService transportService, ActionFilters actionFilters) { - super(settings, MoreLikeThisAction.NAME, threadPool, transportService, actionFilters, MoreLikeThisRequest.class); - this.searchAction = searchAction; - this.getAction = getAction; - this.indicesService = indicesService; - this.clusterService = clusterService; - this.transportService = transportService; - } - - @Override - protected void doExecute(final MoreLikeThisRequest request, final ActionListener listener) { - // update to actual index name - ClusterState clusterState = clusterService.state(); - // update to the concrete index - final String concreteIndex = clusterState.metaData().concreteSingleIndex(request.index(), request.indicesOptions()); - - Iterable routingNode = clusterState.getRoutingNodes().routingNodeIter(clusterService.localNode().getId()); - if (routingNode == null) { - redirect(request, concreteIndex, listener, clusterState); - return; - } - boolean hasIndexLocally = false; - for (MutableShardRouting shardRouting : routingNode) { - if (concreteIndex.equals(shardRouting.index())) { - hasIndexLocally = true; - break; - } - } - if (!hasIndexLocally) { - redirect(request, concreteIndex, listener, clusterState); - return; - } - Set getFields = newHashSet(); - if (request.fields() != null) { - Collections.addAll(getFields, request.fields()); - } - // add the source, in case we need to parse it to get fields - getFields.add(SourceFieldMapper.NAME); - - GetRequest getRequest = new GetRequest(request, request.index()) - .fields(getFields.toArray(new String[getFields.size()])) - .type(request.type()) - .id(request.id()) - .routing(request.routing()) - .operationThreaded(true); - - getAction.execute(getRequest, new ActionListener() { - @Override - public void onResponse(GetResponse getResponse) { - if (!getResponse.isExists()) { - listener.onFailure(new DocumentMissingException(null, request.type(), request.id())); - return; - } - final BoolQueryBuilder boolBuilder = boolQuery(); - try { - final DocumentMapper docMapper = indicesService.indexServiceSafe(concreteIndex).mapperService().documentMapper(request.type()); - if (docMapper == null) { - throw new ElasticsearchException("No DocumentMapper found for type [" + request.type() + "]"); - } - final Set fields = newHashSet(); - if (request.fields() != null) { - for (String field : request.fields()) { - FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field); - if (fieldMapper != null) { - fields.add(fieldMapper.names().indexName()); - } else { - fields.add(field); - } - } - } - - if (!fields.isEmpty()) { - // if fields are not empty, see if we got them in the response - for (Iterator it = fields.iterator(); it.hasNext(); ) { - String field = it.next(); - GetField getField = getResponse.getField(field); - if (getField != null) { - for (Object value : getField.getValues()) { - addMoreLikeThis(request, boolBuilder, getField.getName(), value.toString(), true); - } - it.remove(); - } - } - if (!fields.isEmpty()) { - // if we don't get all the fields in the get response, see if we can parse the source - parseSource(getResponse, boolBuilder, docMapper, fields, request); - } - } else { - // we did not ask for any fields, try and get it from the source - parseSource(getResponse, boolBuilder, docMapper, fields, request); - } - - if (!boolBuilder.hasClauses()) { - // no field added, fail - listener.onFailure(new ElasticsearchException("No fields found to fetch the 'likeText' from")); - return; - } - - // exclude myself - if (!request.include()) { - Term uidTerm = docMapper.uidMapper().term(request.type(), request.id()); - boolBuilder.mustNot(termQuery(uidTerm.field(), uidTerm.text())); - boolBuilder.adjustPureNegative(false); - } - } catch (Throwable e) { - listener.onFailure(e); - return; - } - - String[] searchIndices = request.searchIndices(); - if (searchIndices == null) { - searchIndices = new String[]{request.index()}; - } - String[] searchTypes = request.searchTypes(); - if (searchTypes == null) { - searchTypes = new String[]{request.type()}; - } - - SearchRequest searchRequest = new SearchRequest(request).indices(searchIndices) - .types(searchTypes) - .searchType(request.searchType()) - .scroll(request.searchScroll()); - - SearchSourceBuilder extraSource = searchSource().query(boolBuilder); - if (request.searchFrom() != 0) { - extraSource.from(request.searchFrom()); - } - if (request.searchSize() != 0) { - extraSource.size(request.searchSize()); - } - searchRequest.extraSource(extraSource); - - if (request.searchSource() != null) { - searchRequest.source(request.searchSource()); - } - - searchAction.execute(searchRequest, new ActionListener() { - @Override - public void onResponse(SearchResponse response) { - listener.onResponse(response); - } - - @Override - public void onFailure(Throwable e) { - listener.onFailure(e); - } - }); - - } - - @Override - public void onFailure(Throwable e) { - listener.onFailure(e); - } - }); - } - - // Redirects the request to a data node, that has the index meta data locally available. - private void redirect(MoreLikeThisRequest request, String concreteIndex, final ActionListener listener, ClusterState clusterState) { - ShardIterator shardIterator = clusterService.operationRouting().getShards(clusterState, concreteIndex, request.type(), request.id(), request.routing(), null); - ShardRouting shardRouting = shardIterator.nextOrNull(); - if (shardRouting == null) { - throw new ElasticsearchException("No shards for index " + request.index()); - } - String nodeId = shardRouting.currentNodeId(); - DiscoveryNode discoveryNode = clusterState.nodes().get(nodeId); - transportService.sendRequest(discoveryNode, MoreLikeThisAction.NAME, request, new TransportResponseHandler() { - - @Override - public SearchResponse newInstance() { - return new SearchResponse(); - } - - @Override - public void handleResponse(SearchResponse response) { - listener.onResponse(response); - } - - @Override - public void handleException(TransportException exp) { - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } - - private void parseSource(GetResponse getResponse, final BoolQueryBuilder boolBuilder, DocumentMapper docMapper, final Set fields, final MoreLikeThisRequest request) { - if (getResponse.isSourceEmpty()) { - return; - } - docMapper.parse(SourceToParse.source(getResponse.getSourceAsBytesRef()).type(request.type()).id(request.id()), new DocumentMapper.ParseListenerAdapter() { - @Override - public boolean beforeFieldAdded(FieldMapper fieldMapper, Field field, Object parseContext) { - if (field.fieldType().indexOptions() == IndexOptions.NONE) { - return false; - } - if (fieldMapper instanceof InternalMapper) { - return true; - } - String value = fieldMapper.value(convertField(field)).toString(); - if (value == null) { - return false; - } - - if (fields.isEmpty() || fields.contains(field.name())) { - addMoreLikeThis(request, boolBuilder, fieldMapper, field, !fields.isEmpty()); - } - - return false; - } - }); - } - - private Object convertField(Field field) { - if (field.stringValue() != null) { - return field.stringValue(); - } else if (field.binaryValue() != null) { - return BytesRef.deepCopyOf(field.binaryValue()).bytes; - } else if (field.numericValue() != null) { - return field.numericValue(); - } else { - throw new IllegalStateException("Field should have either a string, numeric or binary value"); - } - } - - private void addMoreLikeThis(MoreLikeThisRequest request, BoolQueryBuilder boolBuilder, FieldMapper fieldMapper, Field field, boolean failOnUnsupportedField) { - addMoreLikeThis(request, boolBuilder, field.name(), fieldMapper.value(convertField(field)).toString(), failOnUnsupportedField); - } - - private void addMoreLikeThis(MoreLikeThisRequest request, BoolQueryBuilder boolBuilder, String fieldName, String likeText, boolean failOnUnsupportedField) { - MoreLikeThisQueryBuilder mlt = moreLikeThisQuery(fieldName) - .likeText(likeText) - .minimumShouldMatch(request.minimumShouldMatch()) - .boostTerms(request.boostTerms()) - .minDocFreq(request.minDocFreq()) - .maxDocFreq(request.maxDocFreq()) - .minWordLength(request.minWordLength()) - .maxWordLength(request.maxWordLength()) - .minTermFreq(request.minTermFreq()) - .maxQueryTerms(request.maxQueryTerms()) - .stopWords(request.stopWords()) - .failOnUnsupportedField(failOnUnsupportedField); - boolBuilder.should(mlt); - } -} diff --git a/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 4a6e073e288..c090c3e6d72 100644 --- a/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -124,7 +124,7 @@ public class ShardSearchFailure implements ShardOperationFailedException, ToXCon @Override public String toString() { - return "shard [" + (shardTarget == null ? "_na" : shardTarget) + "], reason [" + reason + "]"; + return "shard [" + (shardTarget == null ? "_na" : shardTarget) + "], reason [" + reason + "], cause [" + (cause == null ? "_na" : ExceptionsHelper.stackTrace(cause)) + "]"; } public static ShardSearchFailure readShardSearchFailure(StreamInput in) throws IOException { diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index c45a3798318..0136bf9b2a3 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -25,12 +25,17 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionWriteResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.WriteConsistencyLevel; +import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.index.IndexRequest.OpType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -39,6 +44,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.settings.Settings; @@ -48,13 +54,19 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.RefCounted; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.DocumentAlreadyExistsException; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.river.RiverIndexName; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; @@ -76,6 +88,7 @@ public abstract class TransportShardReplicationOperationAction request, Class replicaRequest, String executor) { super(settings, actionName, threadPool, actionFilters); this.transportService = transportService; this.clusterService = clusterService; this.indicesService = indicesService; this.shardStateAction = shardStateAction; + this.mappingUpdatedAction = mappingUpdatedAction; this.transportReplicaAction = actionName + "[r]"; this.executor = executor; @@ -145,7 +160,8 @@ public abstract class TransportShardReplicationOperationAction { + + public final T response; + public final Translog.Location location; + + public WriteResult(T response, Translog.Location location) { + this.response = response; + this.location = location; + } + + @SuppressWarnings("unchecked") + public T response() { + // this sets total, pending and failed to 0 and this is ok, because we will embed this into the replica + // request and not use it + response.setShardInfo(new ActionWriteResponse.ShardInfo()); + return (T) response; + } + + } + class OperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { @@ -278,9 +314,6 @@ public abstract class TransportShardReplicationOperationAction executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable { + Engine.IndexingOperation operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard); + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + final boolean created; + final ShardId shardId = indexShard.shardId(); + if (update != null) { + final String indexName = shardId.getIndex(); + if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { + // With rivers, we have a chicken and egg problem if indexing + // the _meta document triggers a mapping update. Because we would + // like to validate the mapping update first, but on the other + // hand putting the mapping would start the river, which expects + // to find a _meta document + // So we have no choice but to index first and send mappings afterwards + MapperService mapperService = indexShard.indexService().mapperService(); + mapperService.merge(request.type(), new CompressedString(update.toBytes()), true); + created = operation.execute(indexShard); + mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, request.type(), update); + } else { + mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); + operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard); + update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new RetryOnPrimaryException(shardId, + "Dynamics mappings are not available on the node that holds the primary yet"); + } + created = operation.execute(indexShard); + } + } else { + created = operation.execute(indexShard); + } + + // update the version on request so it will happen on the replicas + final long version = operation.version(); + request.version(version); + request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); + + assert request.versionType().validateVersionForWrites(request.version()); + + return new WriteResult(new IndexResponse(shardId.getIndex(), request.type(), request.id(), request.version(), created), operation.getTranslogLocation()); + } } diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 74e8ec62cbc..07e9769d7c2 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -90,6 +90,15 @@ public class Bootstrap { if (mlockAll) { Natives.tryMlockall(); } + + // check if the user is running as root, and bail + if (Natives.definitelyRunningAsRoot()) { + if (Boolean.parseBoolean(System.getProperty("es.insecure.allow.root"))) { + Loggers.getLogger(Bootstrap.class).warn("running as ROOT user. this is a bad idea!"); + } else { + throw new RuntimeException("don't run elasticsearch as root."); + } + } // listener for windows close event if (ctrlHandler) { @@ -107,7 +116,13 @@ public class Bootstrap { } }); } - Kernel32Library.getInstance(); + + // force remainder of JNA to be loaded (if available). + try { + Kernel32Library.getInstance(); + } catch (Throwable ignored) { + // we've already logged this. + } // initialize sigar explicitly try { @@ -187,32 +202,31 @@ public class Bootstrap { public static void main(String[] args) { System.setProperty("es.logger.prefix", ""); INSTANCE = new Bootstrap(); - final String pidFile = System.getProperty("es.pidfile", System.getProperty("es-pidfile")); - if (pidFile != null) { - try { - PidFile.create(PathUtils.get(pidFile), true); - } catch (Exception e) { - String errorMessage = buildErrorMessage("pid", e); - sysError(errorMessage, true); - System.exit(3); - } - } boolean foreground = System.getProperty("es.foreground", System.getProperty("es-foreground")) != null; // handle the wrapper system property, if its a service, don't run as a service if (System.getProperty("wrapper.service", "XXX").equalsIgnoreCase("true")) { foreground = false; } + String stage = "Settings"; + Settings settings = null; Environment environment = null; try { Tuple tuple = initialSettings(); settings = tuple.v1(); environment = tuple.v2(); + + if (environment.pidFile() != null) { + stage = "Pid"; + PidFile.create(environment.pidFile(), true); + } + + stage = "Logging"; setupLogging(settings, environment); } catch (Exception e) { - String errorMessage = buildErrorMessage("Setup", e); + String errorMessage = buildErrorMessage(stage, e); sysError(errorMessage, true); System.exit(3); } @@ -228,7 +242,7 @@ public class Bootstrap { logger.warn("jvm uses the client vm, make sure to run `java` with the server vm for best performance by adding `-server` to the command line"); } - String stage = "Initialization"; + stage = "Initialization"; try { if (!foreground) { Loggers.disableConsoleLogging(); diff --git a/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java b/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java index befef74251b..cbe00b252a4 100644 --- a/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java +++ b/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java @@ -19,6 +19,8 @@ package org.elasticsearch.bootstrap; +import org.elasticsearch.common.SuppressForbidden; + import java.net.URI; import java.security.Permission; import java.security.PermissionCollection; @@ -41,8 +43,12 @@ public class ESPolicy extends Policy { this.dynamic = dynamic; } - @Override + @Override @SuppressForbidden(reason = "fast equals check is desired") public boolean implies(ProtectionDomain domain, Permission permission) { + // run groovy scripts with no permissions + if ("/groovy/script".equals(domain.getCodeSource().getLocation().getFile())) { + return false; + } return template.implies(domain, permission) || dynamic.implies(permission); } } diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java index e90b162c7e3..07496a64c68 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -67,7 +67,9 @@ public class Security { for (Path path : environment.dataWithClusterFiles()) { addPath(policy, path, "read,readlink,write,delete"); } - + if (environment.pidFile() != null) { + addPath(policy, environment.pidFile().getParent(), "read,readlink,write,delete"); + } return policy; } diff --git a/src/main/java/org/elasticsearch/client/Client.java b/src/main/java/org/elasticsearch/client/Client.java index e356244db1a..5c5714bbb6f 100644 --- a/src/main/java/org/elasticsearch/client/Client.java +++ b/src/main/java/org/elasticsearch/client/Client.java @@ -19,7 +19,8 @@ package org.elasticsearch.client; -import org.elasticsearch.action.*; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -51,8 +52,6 @@ import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptResponse; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequestBuilder; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse; -import org.elasticsearch.action.mlt.MoreLikeThisRequest; -import org.elasticsearch.action.mlt.MoreLikeThisRequestBuilder; import org.elasticsearch.action.percolate.*; import org.elasticsearch.action.search.*; import org.elasticsearch.action.suggest.SuggestRequest; @@ -467,32 +466,7 @@ public interface Client extends ElasticsearchClient, Releasable { * Performs multiple search requests. */ MultiSearchRequestBuilder prepareMultiSearch(); - - /** - * A more like this action to search for documents that are "like" a specific document. - * - * @param request The more like this request - * @return The response future - */ - ActionFuture moreLikeThis(MoreLikeThisRequest request); - - /** - * A more like this action to search for documents that are "like" a specific document. - * - * @param request The more like this request - * @param listener A listener to be notified of the result - */ - void moreLikeThis(MoreLikeThisRequest request, ActionListener listener); - - /** - * A more like this action to search for documents that are "like" a specific document. - * - * @param index The index to load the document from - * @param type The type of the document - * @param id The id of the document - */ - MoreLikeThisRequestBuilder prepareMoreLikeThis(String index, String type, String id); - + /** * An action that returns the term vectors for a specific document. * diff --git a/src/main/java/org/elasticsearch/client/Requests.java b/src/main/java/org/elasticsearch/client/Requests.java index 13fce3326fe..bc2a778f570 100644 --- a/src/main/java/org/elasticsearch/client/Requests.java +++ b/src/main/java/org/elasticsearch/client/Requests.java @@ -55,7 +55,6 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.exists.ExistsRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.mlt.MoreLikeThisRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.suggest.SuggestRequest; @@ -156,19 +155,7 @@ public class Requests { public static SuggestRequest suggestRequest(String... indices) { return new SuggestRequest(indices); } - - /** - * More like this request represents a request to search for documents that are "like" the provided (fetched) - * document. - * - * @param index The index to load the document from - * @return The more like this request - * @see org.elasticsearch.client.Client#moreLikeThis(org.elasticsearch.action.mlt.MoreLikeThisRequest) - */ - public static MoreLikeThisRequest moreLikeThisRequest(String index) { - return new MoreLikeThisRequest(index); - } - + /** * Creates a search request against one or more indices. Note, the search source must be set either using the * actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}. diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 6ac9a0202ab..edec9af848a 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -249,9 +249,6 @@ import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptAction; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequestBuilder; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse; -import org.elasticsearch.action.mlt.MoreLikeThisAction; -import org.elasticsearch.action.mlt.MoreLikeThisRequest; -import org.elasticsearch.action.mlt.MoreLikeThisRequestBuilder; import org.elasticsearch.action.percolate.*; import org.elasticsearch.action.search.*; import org.elasticsearch.action.suggest.SuggestAction; @@ -636,21 +633,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new SuggestRequestBuilder(this, SuggestAction.INSTANCE).setIndices(indices); } - @Override - public ActionFuture moreLikeThis(final MoreLikeThisRequest request) { - return execute(MoreLikeThisAction.INSTANCE, request); - } - - @Override - public void moreLikeThis(final MoreLikeThisRequest request, final ActionListener listener) { - execute(MoreLikeThisAction.INSTANCE, request, listener); - } - - @Override - public MoreLikeThisRequestBuilder prepareMoreLikeThis(String index, String type, String id) { - return new MoreLikeThisRequestBuilder(this, MoreLikeThisAction.INSTANCE, index, type, id); - } - @Override public ActionFuture termVectors(final TermVectorsRequest request) { return execute(TermVectorsAction.INSTANCE, request); diff --git a/src/main/java/org/elasticsearch/common/jna/CLibrary.java b/src/main/java/org/elasticsearch/common/jna/CLibrary.java index eda896040f7..d3e2c19188d 100644 --- a/src/main/java/org/elasticsearch/common/jna/CLibrary.java +++ b/src/main/java/org/elasticsearch/common/jna/CLibrary.java @@ -48,7 +48,7 @@ public class CLibrary { public static native int mlockall(int flags); - public static native int munlockall(); + public static native int geteuid(); private CLibrary() { } diff --git a/src/main/java/org/elasticsearch/common/jna/Natives.java b/src/main/java/org/elasticsearch/common/jna/Natives.java index d6c6838842a..b1cea21a954 100644 --- a/src/main/java/org/elasticsearch/common/jna/Natives.java +++ b/src/main/java/org/elasticsearch/common/jna/Natives.java @@ -61,6 +61,19 @@ public class Natives { } } } + + /** Returns true if user is root, false if not, or if we don't know */ + public static boolean definitelyRunningAsRoot() { + if (Constants.WINDOWS) { + return false; // don't know + } + try { + return CLibrary.geteuid() == 0; + } catch (Throwable error) { + logger.warn("unable to determine euid", error); + return false; // don't know + } + } public static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) { // The console Ctrl handler is necessary on Windows platforms only. diff --git a/src/main/java/org/elasticsearch/env/Environment.java b/src/main/java/org/elasticsearch/env/Environment.java index 46947c5667c..c3bdb260855 100644 --- a/src/main/java/org/elasticsearch/env/Environment.java +++ b/src/main/java/org/elasticsearch/env/Environment.java @@ -52,6 +52,9 @@ public class Environment { private final Path logsFile; + /** Path to the PID file (can be null if no PID file is configured) **/ + private final Path pidFile; + /** List of filestores on the system */ private static final FileStore[] fileStores; @@ -106,6 +109,12 @@ public class Environment { } else { logsFile = homeFile.resolve("logs"); } + + if (settings.get("pidfile") != null) { + pidFile = PathUtils.get(cleanPath(settings.get("pidfile"))); + } else { + pidFile = null; + } } /** @@ -151,6 +160,13 @@ public class Environment { return logsFile; } + /** + * The PID file location (can be null if no PID file is configured) + */ + public Path pidFile() { + return pidFile; + } + /** * Looks up the filestore associated with a Path. *

diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java index 52002ecb34f..1ac08f94cf1 100644 --- a/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -625,6 +625,7 @@ public abstract class Engine implements Closeable { private final VersionType versionType; private final Origin origin; private final boolean canHaveDuplicates; + private Translog.Location location; private final long startTime; private long endTime; @@ -690,6 +691,14 @@ public abstract class Engine implements Closeable { this.doc.version().setLongValue(version); } + public void setTranslogLocation(Translog.Location location) { + this.location = location; + } + + public Translog.Location getTranslogLocation() { + return this.location; + } + public VersionType versionType() { return this.versionType; } @@ -805,6 +814,7 @@ public abstract class Engine implements Closeable { private final long startTime; private long endTime; + private Translog.Location location; public Delete(String type, String id, Term uid, long version, VersionType versionType, Origin origin, long startTime, boolean found) { this.type = type; @@ -884,6 +894,14 @@ public abstract class Engine implements Closeable { public long endTime() { return this.endTime; } + + public void setTranslogLocation(Translog.Location location) { + this.location = location; + } + + public Translog.Location getTranslogLocation() { + return this.location; + } } public static class DeleteByQuery { diff --git a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index bb8006ad6b9..bf192c10612 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.BigArray; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.codec.CodecService; @@ -41,11 +40,9 @@ import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.threadpool.ThreadPool; -import java.io.IOException; import java.nio.file.Path; import java.util.concurrent.TimeUnit; diff --git a/src/main/java/org/elasticsearch/index/engine/EngineFactory.java b/src/main/java/org/elasticsearch/index/engine/EngineFactory.java index b29148edff5..77bcc3b28e4 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineFactory.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineFactory.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.engine; -import org.elasticsearch.index.translog.fs.FsTranslog; - /** * Simple Engine Factory */ diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 44fe5df2643..4e9453ad547 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.engine; -import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexWriter.IndexReaderWarmer; @@ -50,7 +49,6 @@ import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider; import org.elasticsearch.index.search.nested.IncludeNestedDocsQuery; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -79,7 +77,7 @@ public class InternalEngine extends Engine { private final ShardIndexingService indexingService; @Nullable private final IndicesWarmer warmer; - private final FsTranslog translog; + private final Translog translog; private final MergePolicyProvider mergePolicyProvider; private final MergeSchedulerProvider mergeScheduler; @@ -111,7 +109,7 @@ public class InternalEngine extends Engine { this.versionMap = new LiveVersionMap(); store.incRef(); IndexWriter writer = null; - FsTranslog translog = null; + Translog translog = null; SearcherManager manager = null; boolean success = false; try { @@ -131,7 +129,7 @@ public class InternalEngine extends Engine { try { writer = createWriter(); indexWriter = writer; - translog = new FsTranslog(engineConfig.getShardId(), engineConfig.getIndesSettingService(), engineConfig.getBigArrays(), engineConfig.getTranslogPath(), engineConfig.getThreadPool()); + translog = new Translog(engineConfig.getShardId(), engineConfig.getIndesSettingService(), engineConfig.getBigArrays(), engineConfig.getTranslogPath(), engineConfig.getThreadPool()); committedTranslogId = loadCommittedTranslogId(writer, translog); } catch (IOException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); @@ -403,7 +401,7 @@ public class InternalEngine extends Engine { Translog.Location translogLocation = translog.add(new Translog.Create(create)); versionMap.putUnderLock(create.uid().bytes(), new VersionValue(updatedVersion, translogLocation)); - + create.setTranslogLocation(translogLocation); indexingService.postCreateUnderLock(create); } @@ -506,7 +504,7 @@ public class InternalEngine extends Engine { Translog.Location translogLocation = translog.add(new Translog.Index(index)); versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation)); - + index.setTranslogLocation(translogLocation); indexingService.postIndexUnderLock(index); return created; } @@ -576,7 +574,7 @@ public class InternalEngine extends Engine { delete.updateVersion(updatedVersion, found); Translog.Location translogLocation = translog.add(new Translog.Delete(delete)); versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis(), translogLocation)); - + delete.setTranslogLocation(translogLocation); indexingService.postDeleteUnderLock(delete); } } diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngineFactory.java b/src/main/java/org/elasticsearch/index/engine/InternalEngineFactory.java index c9c13e3d879..fdf708cfd51 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngineFactory.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngineFactory.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.engine; -import org.elasticsearch.index.translog.fs.FsTranslog; - public class InternalEngineFactory implements EngineFactory { @Override public Engine newReadWriteEngine(EngineConfig config, boolean skipTranslogRecovery) { diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index fa459d36b13..01a70b5fe1c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -585,7 +585,9 @@ public class DocumentMapper implements ToXContent { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("script", script); - builder.field("lang", language); + if (language != null) { + builder.field("lang", language); + } if (parameters != null) { builder.field("params", parameters); } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 5ab0049178b..5b8c5e8941f 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -471,17 +471,18 @@ public class DateFieldMapper extends NumberFieldMapper { context.allEntries().addText(names.fullName(), dateAsString, boost); } value = parseStringValue(dateAsString); + } else if (value != null) { + value = timeUnit.toMillis(value); } if (value != null) { - final long timestamp = timeUnit.toMillis(value); if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { - CustomLongNumericField field = new CustomLongNumericField(this, timestamp, fieldType); + CustomLongNumericField field = new CustomLongNumericField(this, value, fieldType); field.setBoost(boost); fields.add(field); } if (hasDocValues()) { - addDocValue(context, fields, timestamp); + addDocValue(context, fields, value); } } } @@ -549,7 +550,7 @@ public class DateFieldMapper extends NumberFieldMapper { return dateTimeFormatter.parser().parseMillis(value); } catch (RuntimeException e) { try { - return Long.parseLong(value); + return timeUnit.toMillis(Long.parseLong(value)); } catch (NumberFormatException e1) { throw new MapperParsingException("failed to parse date field [" + value + "], tried both date format [" + dateTimeFormatter.format() + "], and timestamp number with locale [" + dateTimeFormatter.locale() + "]", e); } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index e1315d3e0c4..4df729c10a5 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -145,15 +145,15 @@ public class SourceFieldMapper extends AbstractFieldMapper implements In Map.Entry entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); - if (fieldName.equals("enabled")) { + if (fieldName.equals("enabled") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { builder.enabled(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (fieldName.equals("compress")) { + } else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { if (fieldNode != null) { builder.compress(nodeBooleanValue(fieldNode)); } iterator.remove(); - } else if (fieldName.equals("compress_threshold")) { + } else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { if (fieldNode != null) { if (fieldNode instanceof Number) { builder.compressThreshold(((Number) fieldNode).longValue()); diff --git a/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index f383dc33ac6..12f5607cfc9 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -475,6 +475,7 @@ public abstract class QueryBuilders { * @param type The child type. * @param query The query. */ + @Deprecated public static TopChildrenQueryBuilder topChildrenQuery(String type, QueryBuilder query) { return new TopChildrenQueryBuilder(type, query); } diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryBuilder.java index a8174230db9..011f6817227 100644 --- a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryBuilder.java @@ -25,6 +25,7 @@ import java.io.IOException; /** * */ +@Deprecated public class TopChildrenQueryBuilder extends BaseQueryBuilder implements BoostableQueryBuilder { private final QueryBuilder queryBuilder; diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java index 6c1b0e45aaa..7ca32527b02 100644 --- a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java @@ -38,6 +38,7 @@ import java.io.IOException; /** * */ +@Deprecated public class TopChildrenQueryParser implements QueryParser { public static final String NAME = "top_children"; diff --git a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java index 4fc233b21b9..a211621f69a 100644 --- a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java @@ -54,6 +54,7 @@ import java.util.Set; * This query is most of the times faster than the {@link ChildrenQuery}. Usually enough parent documents can be returned * in the first child document query round. */ +@Deprecated public class TopChildrenQuery extends IndexCacheableQuery { private static final ParentDocComparator PARENT_DOC_COMP = new ParentDocComparator(); diff --git a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java index ff502ea27bc..0bd73d92143 100644 --- a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java +++ b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java @@ -38,7 +38,7 @@ import org.elasticsearch.index.search.slowlog.ShardSlowLogSearchService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.translog.TranslogService; -import org.elasticsearch.index.translog.fs.FsTranslog; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.ttl.IndicesTTLService; @@ -64,7 +64,7 @@ public class IndexDynamicSettingsModule extends AbstractModule { indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION); indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION); indexDynamicSettings.addDynamicSetting(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_REPLICA_ALLOCATION); - indexDynamicSettings.addDynamicSetting(FsTranslog.INDEX_TRANSLOG_FS_TYPE); + indexDynamicSettings.addDynamicSetting(Translog.INDEX_TRANSLOG_FS_TYPE); indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, Validator.NON_NEGATIVE_INTEGER); indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS); indexDynamicSettings.addDynamicSetting(IndexMetaData.SETTING_READ_ONLY); @@ -118,6 +118,7 @@ public class IndexDynamicSettingsModule extends AbstractModule { indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE); indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, Validator.TIME); indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH); + indexDynamicSettings.addDynamicSetting(Translog.INDEX_TRANSLOG_DURABILITY); indexDynamicSettings.addDynamicSetting(IndicesWarmer.INDEX_WARMER_ENABLED); indexDynamicSettings.addDynamicSetting(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, Validator.BOOLEAN); } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 8a084927c62..27b348890c0 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -116,6 +116,8 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; +import java.util.Arrays; +import java.util.Locale; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; @@ -454,7 +456,12 @@ public class IndexShard extends AbstractIndexShardComponent { } public Engine.Create prepareCreate(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates, boolean autoGeneratedId) { - return prepareCreate(docMapper(source.type()), source, version, versionType, origin, state != IndexShardState.STARTED || canHaveDuplicates, autoGeneratedId); + try { + return prepareCreate(docMapper(source.type()), source, version, versionType, origin, state != IndexShardState.STARTED || canHaveDuplicates, autoGeneratedId); + } catch (Throwable t) { + verifyNotClosed(t); + throw t; + } } static Engine.Create prepareCreate(Tuple docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates, boolean autoGeneratedId) { @@ -484,7 +491,12 @@ public class IndexShard extends AbstractIndexShardComponent { } public Engine.Index prepareIndex(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) { - return prepareIndex(docMapper(source.type()), source, version, versionType, origin, state != IndexShardState.STARTED || canHaveDuplicates); + try { + return prepareIndex(docMapper(source.type()), source, version, versionType, origin, state != IndexShardState.STARTED || canHaveDuplicates); + } catch (Throwable t) { + verifyNotClosed(t); + throw t; + } } static Engine.Index prepareIndex(Tuple docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) { @@ -943,9 +955,17 @@ public class IndexShard extends AbstractIndexShardComponent { } private void verifyNotClosed() throws IllegalIndexShardStateException { + verifyNotClosed(null); + } + + private void verifyNotClosed(Throwable suppressed) throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read if (state == IndexShardState.CLOSED) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed"); + final IllegalIndexShardStateException exc = new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed"); + if (suppressed != null) { + exc.addSuppressed(suppressed); + } + throw exc; } } @@ -1346,4 +1366,24 @@ public class IndexShard extends AbstractIndexShardComponent { public int getOperationsCount() { return indexShardOperationCounter.refCount(); } + + /** + * Syncs the given location with the underlying storage unless already synced. + */ + public void sync(Translog.Location location) { + final Engine engine = engine(); + try { + engine.getTranslog().ensureSynced(location); + } catch (IOException ex) { // if this fails we are in deep shit - fail the request + logger.debug("failed to sync translog", ex); + throw new ElasticsearchException("failed to sync translog", ex); + } + } + + /** + * Returns the current translog durability mode + */ + public Translog.Durabilty getTranslogDurability() { + return engine().getTranslog().getDurabilty(); + } } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/BufferingTranslogFile.java similarity index 58% rename from src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java rename to src/main/java/org/elasticsearch/index/translog/BufferingTranslogFile.java index 7236d19f654..b7eb4dc9f2a 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/BufferingTranslogFile.java @@ -17,15 +17,12 @@ * under the License. */ -package org.elasticsearch.index.translog.fs; +package org.elasticsearch.index.translog; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogException; -import org.elasticsearch.index.translog.TranslogStream; import java.io.IOException; import java.io.OutputStream; @@ -33,68 +30,51 @@ import java.nio.ByteBuffer; /** */ -public final class BufferingFsTranslogFile extends FsTranslogFile { - - private volatile int operationCounter; - private volatile long lastPosition; - private volatile long lastWrittenPosition; - - private volatile long lastSyncPosition = 0; +public final class BufferingTranslogFile extends TranslogFile { private byte[] buffer; private int bufferCount; private WrapperOutputStream bufferOs = new WrapperOutputStream(); - public BufferingFsTranslogFile(ShardId shardId, long id, ChannelReference channelReference, int bufferSize) throws IOException { + /* the total offset of this file including the bytes written to the file as well as into the buffer */ + private volatile long totalOffset; + + public BufferingTranslogFile(ShardId shardId, long id, ChannelReference channelReference, int bufferSize) throws IOException { super(shardId, id, channelReference); this.buffer = new byte[bufferSize]; - final TranslogStream stream = this.channelReference.stream(); - int headerSize = stream.writeHeader(channelReference.channel()); - this.lastPosition += headerSize; - this.lastWrittenPosition += headerSize; - this.lastSyncPosition += headerSize; - } - - @Override - public int totalOperations() { - return operationCounter; - } - - @Override - public long sizeInBytes() { - return lastWrittenPosition; + this.totalOffset = writtenOffset; } @Override public Translog.Location add(BytesReference data) throws IOException { try (ReleasableLock lock = writeLock.acquire()) { operationCounter++; - long position = lastPosition; + final long offset = totalOffset; if (data.length() >= buffer.length) { - flushBuffer(); + flush(); // we use the channel to write, since on windows, writing to the RAF might not be reflected // when reading through the channel data.writeTo(channelReference.channel()); - lastWrittenPosition += data.length(); - lastPosition += data.length(); - return new Translog.Location(id, position, data.length()); + writtenOffset += data.length(); + totalOffset += data.length(); + return new Translog.Location(id, offset, data.length()); } if (data.length() > buffer.length - bufferCount) { - flushBuffer(); + flush(); } data.writeTo(bufferOs); - lastPosition += data.length(); - return new Translog.Location(id, position, data.length()); + totalOffset += data.length(); + return new Translog.Location(id, offset, data.length()); } } - private void flushBuffer() throws IOException { + protected final void flush() throws IOException { assert writeLock.isHeldByCurrentThread(); if (bufferCount > 0) { // we use the channel to write, since on windows, writing to the RAF might not be reflected // when reading through the channel Channels.writeToChannel(buffer, 0, bufferCount, channelReference.channel()); - lastWrittenPosition += bufferCount; + writtenOffset += bufferCount; bufferCount = 0; } } @@ -102,8 +82,8 @@ public final class BufferingFsTranslogFile extends FsTranslogFile { @Override protected void readBytes(ByteBuffer targetBuffer, long position) throws IOException { try (ReleasableLock lock = readLock.acquire()) { - if (position >= lastWrittenPosition) { - System.arraycopy(buffer, (int) (position - lastWrittenPosition), + if (position >= writtenOffset) { + System.arraycopy(buffer, (int) (position - writtenOffset), targetBuffer.array(), targetBuffer.position(), targetBuffer.limit()); return; } @@ -113,26 +93,9 @@ public final class BufferingFsTranslogFile extends FsTranslogFile { Channels.readFromFileChannelWithEofException(channelReference.channel(), position, targetBuffer); } - public FsChannelImmutableReader immutableReader() throws TranslogException { - if (channelReference.tryIncRef()) { - try (ReleasableLock lock = writeLock.acquire()) { - flushBuffer(); - FsChannelImmutableReader reader = new FsChannelImmutableReader(this.id, channelReference, lastWrittenPosition, operationCounter); - channelReference.incRef(); // for new reader - return reader; - } catch (Exception e) { - throw new TranslogException(shardId, "exception while creating an immutable reader", e); - } finally { - channelReference.decRef(); - } - } else { - throw new TranslogException(shardId, "can't increment channel [" + channelReference + "] ref count"); - } - } - @Override public boolean syncNeeded() { - return lastPosition != lastSyncPosition; + return totalOffset != lastSyncedOffset; } @Override @@ -141,30 +104,21 @@ public final class BufferingFsTranslogFile extends FsTranslogFile { return; } try (ReleasableLock lock = writeLock.acquire()) { - flushBuffer(); - lastSyncPosition = lastPosition; + flush(); + lastSyncedOffset = totalOffset; } channelReference.channel().force(false); } @Override - protected void doClose() throws IOException { - try { - sync(); - } finally { - super.doClose(); - } - } - - @Override - public void reuse(FsTranslogFile other) { - if (!(other instanceof BufferingFsTranslogFile)) { + public void reuse(TranslogFile other) { + if (!(other instanceof BufferingTranslogFile)) { return; } try (ReleasableLock lock = writeLock.acquire()) { try { - flushBuffer(); - this.buffer = ((BufferingFsTranslogFile) other).buffer; + flush(); + this.buffer = ((BufferingTranslogFile) other).buffer; } catch (IOException e) { throw new TranslogException(shardId, "failed to flush", e); } @@ -176,7 +130,7 @@ public final class BufferingFsTranslogFile extends FsTranslogFile { if (this.buffer.length == bufferSize) { return; } - flushBuffer(); + flush(); this.buffer = new byte[bufferSize]; } catch (IOException e) { throw new TranslogException(shardId, "failed to flush", e); @@ -197,5 +151,4 @@ public final class BufferingFsTranslogFile extends FsTranslogFile { bufferCount += len; } } - } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelImmutableReader.java b/src/main/java/org/elasticsearch/index/translog/ChannelImmutableReader.java similarity index 83% rename from src/main/java/org/elasticsearch/index/translog/fs/FsChannelImmutableReader.java rename to src/main/java/org/elasticsearch/index/translog/ChannelImmutableReader.java index 7e4bc1172d1..61ee844a1bc 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelImmutableReader.java +++ b/src/main/java/org/elasticsearch/index/translog/ChannelImmutableReader.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.translog.fs; +package org.elasticsearch.index.translog; import org.elasticsearch.common.io.Channels; @@ -28,7 +28,7 @@ import java.nio.ByteBuffer; /** * a channel reader which is fixed in length */ -public final class FsChannelImmutableReader extends FsChannelReader { +public final class ChannelImmutableReader extends ChannelReader { private final int totalOperations; private final long length; @@ -37,17 +37,17 @@ public final class FsChannelImmutableReader extends FsChannelReader { * Create a snapshot of translog file channel. The length parameter should be consistent with totalOperations and point * at the end of the last operation in this snapshot. */ - public FsChannelImmutableReader(long id, ChannelReference channelReference, long length, int totalOperations) { + public ChannelImmutableReader(long id, ChannelReference channelReference, long length, int totalOperations) { super(id, channelReference); this.length = length; this.totalOperations = totalOperations; } - public FsChannelImmutableReader clone() { + public ChannelImmutableReader clone() { if (channelReference.tryIncRef()) { try { - FsChannelImmutableReader reader = new FsChannelImmutableReader(id, channelReference, length, totalOperations); + ChannelImmutableReader reader = new ChannelImmutableReader(id, channelReference, length, totalOperations); channelReference.incRef(); // for the new object return reader; } finally { @@ -80,7 +80,7 @@ public final class FsChannelImmutableReader extends FsChannelReader { } @Override - public FsChannelSnapshot newSnapshot() { - return new FsChannelSnapshot(clone()); + public ChannelSnapshot newSnapshot() { + return new ChannelSnapshot(clone()); } } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelReader.java b/src/main/java/org/elasticsearch/index/translog/ChannelReader.java similarity index 92% rename from src/main/java/org/elasticsearch/index/translog/fs/FsChannelReader.java rename to src/main/java/org/elasticsearch/index/translog/ChannelReader.java index 31ec0a07209..c4769d8698e 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelReader.java +++ b/src/main/java/org/elasticsearch/index/translog/ChannelReader.java @@ -17,11 +17,10 @@ * under the License. */ -package org.elasticsearch.index.translog.fs; +package org.elasticsearch.index.translog; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.index.translog.Translog; import java.io.Closeable; import java.io.IOException; @@ -32,7 +31,7 @@ import java.util.concurrent.atomic.AtomicBoolean; /** * A base class for all classes that allows reading ops from translog files */ -public abstract class FsChannelReader implements Closeable, Comparable { +public abstract class ChannelReader implements Closeable, Comparable { public static final int UNKNOWN_OP_COUNT = -1; @@ -41,7 +40,7 @@ public abstract class FsChannelReader implements Closeable, Comparable syncScheduler; + private volatile Durabilty durabilty = Durabilty.REQUEST; - void updateBuffer(ByteSizeValue bufferSize); - /** - * Returns the id of the current transaction log. - */ - long currentId(); + // this is a concurrent set and is not protected by any of the locks. The main reason + // is that is being accessed by two separate classes (additions & reading are done by FsTranslog, remove by FsView when closed) + private final Set outstandingViews = ConcurrentCollections.newConcurrentSet(); - /** - * Returns the number of operations in the transaction files that aren't committed to lucene.. - * Note: may return -1 if unknown - */ - int totalOperations(); - /** - * Returns the size in bytes of the translog files that aren't committed to lucene. - */ - long sizeInBytes(); + class ApplySettings implements IndexSettingsService.Listener { + @Override + public void onRefreshSettings(Settings settings) { + TranslogFile.Type type = TranslogFile.Type.fromString(settings.get(INDEX_TRANSLOG_FS_TYPE, Translog.this.type.name())); + if (type != Translog.this.type) { + logger.info("updating type from [{}] to [{}]", Translog.this.type, type); + Translog.this.type = type; + } - /** - * Creates a new transaction log file internally. That new file will be visible to all outstanding views. - * The id of the new translog file is returned. - */ - long newTranslog() throws TranslogException, IOException; + final Durabilty durabilty = Durabilty.getFromSettings(logger, settings, Translog.this.durabilty); + if (durabilty != Translog.this.durabilty) { + logger.info("updating durability from [{}] to [{}]", Translog.this.durabilty, durabilty); + Translog.this.durabilty = durabilty; + } + } + } - /** - * Adds a create operation to the transaction log. - */ - Location add(Operation operation) throws TranslogException; + private final IndexSettingsService indexSettingsService; + private final BigArrays bigArrays; + private final ThreadPool threadPool; - Translog.Operation read(Location location); + protected final ReleasableLock readLock; + protected final ReleasableLock writeLock; - /** - * Snapshots the current transaction log allowing to safely iterate over the snapshot. - * Snapshots are fixed in time and will not be updated with future operations. - */ - Snapshot newSnapshot() throws TranslogException; + private final Path location; - /** - * Returns a view into the current translog that is guaranteed to retain all current operations - * while receiving future ones as well - */ - View newView(); + // protected by the write lock + private long idGenerator = 1; + private TranslogFile current; + // ordered by age + private final List uncommittedTranslogs = new ArrayList<>(); + private long lastCommittedTranslogId = -1; // -1 is safe as it will not cause an translog deletion. - /** - * Sync's the translog. - */ - void sync() throws IOException; + private TranslogFile.Type type; - boolean syncNeeded(); + private boolean syncOnEachOperation = false; - void syncOnEachOperation(boolean syncOnEachOperation); + private volatile int bufferSize; + + private final ApplySettings applySettings = new ApplySettings(); + + private final AtomicBoolean closed = new AtomicBoolean(); + + public Translog(ShardId shardId, IndexSettingsService indexSettingsService, + BigArrays bigArrays, Path location, ThreadPool threadPool) throws IOException { + this(shardId, indexSettingsService.getSettings(), indexSettingsService, bigArrays, location, threadPool); + } + + public Translog(ShardId shardId, @IndexSettings Settings indexSettings, + BigArrays bigArrays, Path location) throws IOException { + this(shardId, indexSettings, null, bigArrays, location, null); + } + + private Translog(ShardId shardId, @IndexSettings Settings indexSettings, @Nullable IndexSettingsService indexSettingsService, + BigArrays bigArrays, Path location, @Nullable ThreadPool threadPool) throws IOException { + super(shardId, indexSettings); + ReadWriteLock rwl = new ReentrantReadWriteLock(); + readLock = new ReleasableLock(rwl.readLock()); + writeLock = new ReleasableLock(rwl.writeLock()); + this.durabilty = Durabilty.getFromSettings(logger, indexSettings, durabilty); + this.indexSettingsService = indexSettingsService; + this.bigArrays = bigArrays; + this.location = location; + Files.createDirectories(this.location); + this.threadPool = threadPool; + + this.type = TranslogFile.Type.fromString(indexSettings.get(INDEX_TRANSLOG_FS_TYPE, TranslogFile.Type.BUFFERED.name())); + this.bufferSize = (int) indexSettings.getAsBytesSize(INDEX_TRANSLOG_BUFFER_SIZE, ByteSizeValue.parseBytesSizeValue("64k")).bytes(); // Not really interesting, updated by IndexingMemoryController... + + syncInterval = indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5)); + if (syncInterval.millis() > 0 && threadPool != null) { + this.syncOnEachOperation = false; + syncScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, new Sync()); + } else if (syncInterval.millis() == 0) { + this.syncOnEachOperation = true; + } + + if (indexSettingsService != null) { + indexSettingsService.addListener(applySettings); + } + try { + recoverFromFiles(); + // now that we know which files are there, create a new current one. + current = createTranslogFile(null); + } catch (Throwable t) { + // close the opened translog files if we fail to create a new translog... + IOUtils.closeWhileHandlingException(uncommittedTranslogs); + throw t; + } + } + + /** recover all translog files found on disk */ + private void recoverFromFiles() throws IOException { + boolean success = false; + ArrayList foundTranslogs = new ArrayList<>(); + try (ReleasableLock lock = writeLock.acquire()) { + try (DirectoryStream stream = Files.newDirectoryStream(location, TRANSLOG_FILE_PREFIX + "[0-9]*")) { + for (Path file : stream) { + final long id = parseIdFromFileName(file); + if (id < 0) { + throw new TranslogException(shardId, "failed to parse id from file name matching pattern " + file); + } + idGenerator = Math.max(idGenerator, id + 1); + final ChannelReference raf = new InternalChannelReference(id, location.resolve(getFilename(id)), StandardOpenOption.READ); + foundTranslogs.add(new ChannelImmutableReader(id, raf, raf.channel().size(), ChannelReader.UNKNOWN_OP_COUNT)); + logger.debug("found local translog with id [{}]", id); + } + } + CollectionUtil.timSort(foundTranslogs); + uncommittedTranslogs.addAll(foundTranslogs); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(foundTranslogs); + } + } + } + + /* extracts the translog id from a file name. returns -1 upon failure */ + public static long parseIdFromFileName(Path translogFile) { + final String fileName = translogFile.getFileName().toString(); + final Matcher matcher = PARSE_ID_PATTERN.matcher(fileName); + if (matcher.matches()) { + try { + return Long.parseLong(matcher.group(1)); + } catch (NumberFormatException e) { + throw new ElasticsearchException("number formatting issue in a file that passed PARSE_ID_PATTERN: " + fileName + "]", e); + } + } + return -1; + } + + public void updateBuffer(ByteSizeValue bufferSize) { + this.bufferSize = bufferSize.bytesAsInt(); + try (ReleasableLock lock = writeLock.acquire()) { + current.updateBufferSize(this.bufferSize); + } + } + + boolean isOpen() { + return closed.get() == false; + } + + @Override + public void close() throws IOException { + if (closed.compareAndSet(false, true)) { + if (indexSettingsService != null) { + indexSettingsService.removeListener(applySettings); + } + + try (ReleasableLock lock = writeLock.acquire()) { + try { + IOUtils.close(this.current); + } finally { + IOUtils.close(uncommittedTranslogs); + } + } finally { + FutureUtils.cancel(syncScheduler); + logger.debug("translog closed"); + } + } + } /** * Returns all translog locations as absolute paths. * These paths don't contain actual translog files they are * directories holding the transaction logs. */ - public Path location(); + public Path location() { + return location; + } /** - * return stats + * Returns the id of the current transaction log. */ - TranslogStats stats(); + public long currentId() { + try (ReleasableLock lock = readLock.acquire()) { + return current.translogId(); + } + } + + /** + * Returns the number of operations in the transaction files that aren't committed to lucene.. + * Note: may return -1 if unknown + */ + public int totalOperations() { + int ops = 0; + try (ReleasableLock lock = readLock.acquire()) { + ops += current.totalOperations(); + for (ChannelReader translog : uncommittedTranslogs) { + int tops = translog.totalOperations(); + if (tops == ChannelReader.UNKNOWN_OP_COUNT) { + return ChannelReader.UNKNOWN_OP_COUNT; + } + ops += tops; + } + } + return ops; + } + + /** + * Returns the size in bytes of the translog files that aren't committed to lucene. + */ + public long sizeInBytes() { + long size = 0; + try (ReleasableLock lock = readLock.acquire()) { + size += current.sizeInBytes(); + for (ChannelReader translog : uncommittedTranslogs) { + size += translog.sizeInBytes(); + } + } + return size; + } /** * notifies the translog that translogId was committed as part of the commit data in lucene, together @@ -123,10 +310,396 @@ public interface Translog extends IndexShardComponent { * * @throws FileNotFoundException if the given translog id can not be found. */ - void markCommitted(long translogId) throws FileNotFoundException; + public void markCommitted(final long translogId) throws FileNotFoundException { + try (ReleasableLock lock = writeLock.acquire()) { + logger.trace("updating translogs on commit of [{}]", translogId); + if (translogId < lastCommittedTranslogId) { + throw new IllegalArgumentException("committed translog id can only go up (current [" + + lastCommittedTranslogId + "], got [" + translogId + "]"); + } + boolean found = false; + if (current.translogId() == translogId) { + found = true; + } else { + if (translogId > current.translogId()) { + throw new IllegalArgumentException("committed translog id must be lower or equal to current id (current [" + + current.translogId() + "], got [" + translogId + "]"); + } + } + if (found == false) { + // try to find it in uncommittedTranslogs + for (ChannelImmutableReader translog : uncommittedTranslogs) { + if (translog.translogId() == translogId) { + found = true; + break; + } + } + } + if (found == false) { + ArrayList currentIds = new ArrayList<>(); + for (ChannelReader translog : Iterables.concat(uncommittedTranslogs, Collections.singletonList(current))) { + currentIds.add(translog.translogId()); + } + throw new FileNotFoundException("committed translog id can not be found (current [" + + Strings.collectionToCommaDelimitedString(currentIds) + "], got [" + translogId + "]"); + } + lastCommittedTranslogId = translogId; + while (uncommittedTranslogs.isEmpty() == false && uncommittedTranslogs.get(0).translogId() < translogId) { + ChannelReader old = uncommittedTranslogs.remove(0); + logger.trace("removed [{}] from uncommitted translog list", old.translogId()); + try { + old.close(); + } catch (IOException e) { + logger.error("failed to closed old translog [{}] (committed id [{}])", e, old, translogId); + } + } + } + } + + /** + * Creates a new transaction log file internally. That new file will be visible to all outstanding views. + * The id of the new translog file is returned. + */ + public long newTranslog() throws TranslogException, IOException { + try (ReleasableLock lock = writeLock.acquire()) { + final TranslogFile old = current; + final TranslogFile newFile = createTranslogFile(old); + current = newFile; + ChannelImmutableReader reader = old.immutableReader(); + uncommittedTranslogs.add(reader); + // notify all outstanding views of the new translog (no views are created now as + // we hold a write lock). + for (FsView view : outstandingViews) { + view.onNewTranslog(old.immutableReader(), current.reader()); + } + IOUtils.close(old); + logger.trace("current translog set to [{}]", current.translogId()); + return current.translogId(); + } + } + + protected TranslogFile createTranslogFile(@Nullable TranslogFile reuse) throws IOException { + TranslogFile newFile; + long size = Long.MAX_VALUE; + try { + long id = idGenerator++; + newFile = type.create(shardId, id, new InternalChannelReference(id, location.resolve(getFilename(id)), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW), bufferSize); + } catch (IOException e) { + throw new TranslogException(shardId, "failed to create new translog file", e); + } + if (reuse != null) { + newFile.reuse(reuse); + } + return newFile; + } - static class Location implements Accountable { + /** + * Read the Operation object from the given location, returns null if the + * Operation could not be read. + */ + public Translog.Operation read(Location location) { + try (ReleasableLock lock = readLock.acquire()) { + ChannelReader reader = null; + if (current.translogId() == location.translogId) { + reader = current; + } else { + for (ChannelReader translog : uncommittedTranslogs) { + if (translog.translogId() == location.translogId) { + reader = translog; + break; + } + } + } + return reader == null ? null : reader.read(location); + } catch (IOException e) { + throw new ElasticsearchException("failed to read source from translog location " + location, e); + } + } + + /** + * Adds a create operation to the transaction log. + */ + public Location add(Operation operation) throws TranslogException { + ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays); + try { + TranslogStreams.writeTranslogOperation(out, operation); + ReleasablePagedBytesReference bytes = out.bytes(); + try (ReleasableLock lock = readLock.acquire()) { + Location location = current.add(bytes); + if (syncOnEachOperation) { + current.sync(); + } + + assert current.assertBytesAtLocation(location, bytes); + return location; + } + } catch (Throwable e) { + throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); + } finally { + Releasables.close(out.bytes()); + } + } + + /** + * Snapshots the current transaction log allowing to safely iterate over the snapshot. + * Snapshots are fixed in time and will not be updated with future operations. + */ + public Snapshot newSnapshot() { + try (ReleasableLock lock = readLock.acquire()) { + // leave one place for current. + final ChannelReader[] readers = uncommittedTranslogs.toArray(new ChannelReader[uncommittedTranslogs.size() + 1]); + readers[readers.length - 1] = current; + return createdSnapshot(readers); + } + } + + private Snapshot createdSnapshot(ChannelReader... translogs) { + ArrayList channelSnapshots = new ArrayList<>(); + boolean success = false; + try { + for (ChannelReader translog : translogs) { + channelSnapshots.add(translog.newSnapshot()); + } + Snapshot snapshot = new TranslogSnapshot(channelSnapshots, logger); + success = true; + return snapshot; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(channelSnapshots); + } + } + } + + /** + * Returns a view into the current translog that is guaranteed to retain all current operations + * while receiving future ones as well + */ + public Translog.View newView() { + // we need to acquire the read lock to make sure new translog is created + // and will be missed by the view we're making + try (ReleasableLock lock = readLock.acquire()) { + ArrayList translogs = new ArrayList<>(); + try { + for (ChannelImmutableReader translog : uncommittedTranslogs) { + translogs.add(translog.clone()); + } + translogs.add(current.reader()); + FsView view = new FsView(translogs); + // this is safe as we know that no new translog is being made at the moment + // (we hold a read lock) and the view will be notified of any future one + outstandingViews.add(view); + translogs.clear(); + return view; + } finally { + // close if anything happend and we didn't reach the clear + IOUtils.closeWhileHandlingException(translogs); + } + } + } + + /** + * Sync's the translog. + */ + public void sync() throws IOException { + try (ReleasableLock lock = readLock.acquire()) { + if (closed.get() == false) { + current.sync(); + } + } + } + + public boolean syncNeeded() { + try (ReleasableLock lock = readLock.acquire()) { + return current.syncNeeded(); + } + } + + /** package private for testing */ + String getFilename(long translogId) { + return TRANSLOG_FILE_PREFIX + translogId; + } + + + /** + * Ensures that the given location has be synced / written to the underlying storage. + * @return Returns true iff this call caused an actual sync operation otherwise false + */ + public boolean ensureSynced(Location location) throws IOException { + try (ReleasableLock lock = readLock.acquire()) { + if (location.translogId == current.id) { // if we have a new one it's already synced + return current.syncUpTo(location.translogLocation + location.size); + } + } + return false; + } + + /** + * return stats + */ + public TranslogStats stats() { + // acquire lock to make the two numbers roughly consistent (no file change half way) + try (ReleasableLock lock = readLock.acquire()) { + return new TranslogStats(totalOperations(), sizeInBytes()); + } + } + + private boolean isReferencedTranslogId(long translogId) { + return translogId >= lastCommittedTranslogId; + } + + private final class InternalChannelReference extends ChannelReference { + final long translogId; + + public InternalChannelReference(long translogId, Path file, OpenOption... openOptions) throws IOException { + super(file, openOptions); + this.translogId = translogId; + } + + @Override + protected void closeInternal() { + super.closeInternal(); + try (ReleasableLock lock = writeLock.acquire()) { + if (isReferencedTranslogId(translogId) == false) { + // if the given path is not the current we can safely delete the file since all references are released + logger.trace("delete translog file - not referenced and not current anymore {}", file()); + IOUtils.deleteFilesIgnoringExceptions(file()); + } + } + } + } + + + /** + * a view into the translog, capturing all translog file at the moment of creation + * and updated with any future translog. + */ + class FsView implements View { + + boolean closed; + // last in this list is always FsTranslog.current + final List orderedTranslogs; + + FsView(List orderedTranslogs) { + assert orderedTranslogs.isEmpty() == false; + // clone so we can safely mutate.. + this.orderedTranslogs = new ArrayList<>(orderedTranslogs); + } + + /** + * Called by the parent class when ever the current translog changes + * + * @param oldCurrent a new read only reader for the old current (should replace the previous reference) + * @param newCurrent a reader into the new current. + */ + synchronized void onNewTranslog(ChannelReader oldCurrent, ChannelReader newCurrent) throws IOException { + // even though the close method removes this view from outstandingViews, there is no synchronisation in place + // between that operation and an ongoing addition of a new translog, already having an iterator. + // As such, this method can be called despite of the fact that we are closed. We need to check and ignore. + if (closed) { + // we have to close the new references created for as as we will not hold them + IOUtils.close(oldCurrent, newCurrent); + return; + } + orderedTranslogs.remove(orderedTranslogs.size() - 1).close(); + orderedTranslogs.add(oldCurrent); + orderedTranslogs.add(newCurrent); + } + + @Override + public synchronized long minTranslogId() { + ensureOpen(); + return orderedTranslogs.get(0).translogId(); + } + + @Override + public synchronized int totalOperations() { + int ops = 0; + for (ChannelReader translog : orderedTranslogs) { + int tops = translog.totalOperations(); + if (tops == ChannelReader.UNKNOWN_OP_COUNT) { + return -1; + } + ops += tops; + } + return ops; + } + + @Override + public synchronized long sizeInBytes() { + long size = 0; + for (ChannelReader translog : orderedTranslogs) { + size += translog.sizeInBytes(); + } + return size; + } + + public synchronized Snapshot snapshot() { + ensureOpen(); + return createdSnapshot(orderedTranslogs.toArray(new ChannelReader[orderedTranslogs.size()])); + } + + + void ensureOpen() { + if (closed) { + throw new ElasticsearchException("View is already closed"); + } + } + + @Override + public void close() { + List toClose = new ArrayList<>(); + try { + synchronized (this) { + if (closed == false) { + logger.trace("closing view starting at translog [{}]", minTranslogId()); + closed = true; + outstandingViews.remove(this); + toClose.addAll(orderedTranslogs); + orderedTranslogs.clear(); + } + } + } finally { + try { + // Close out of lock to prevent deadlocks between channel close which checks for + // references in InternalChannelReference.closeInternal (waiting on a read lock) + // and other FsTranslog#newTranslog calling FsView.onNewTranslog (while having a write lock) + IOUtils.close(toClose); + } catch (Exception e) { + throw new ElasticsearchException("failed to close view", e); + } + } + } + } + + class Sync implements Runnable { + @Override + public void run() { + // don't re-schedule if its closed..., we are done + if (closed.get()) { + return; + } + if (syncNeeded()) { + threadPool.executor(ThreadPool.Names.FLUSH).execute(new Runnable() { + @Override + public void run() { + try { + sync(); + } catch (Exception e) { + logger.warn("failed to sync translog", e); + } + if (closed.get() == false) { + syncScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this); + } + } + }); + } else { + syncScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this); + } + } + } + + public static class Location implements Accountable, Comparable { public final long translogId; public final long translogLocation; @@ -152,12 +725,41 @@ public interface Translog extends IndexShardComponent { public String toString() { return "[id: " + translogId + ", location: " + translogLocation + ", size: " + size + "]"; } + + @Override + public int compareTo(Location o) { + if (translogId == o.translogId) { + return Long.compare(translogLocation, o.translogLocation); + } + return Long.compare(translogId, o.translogId); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Location location = (Location) o; + + if (translogId != location.translogId) return false; + if (translogLocation != location.translogLocation) return false; + return size == location.size; + + } + + @Override + public int hashCode() { + int result = (int) (translogId ^ (translogId >>> 32)); + result = 31 * result + (int) (translogLocation ^ (translogLocation >>> 32)); + result = 31 * result + size; + return result; + } } /** * A snapshot of the transaction log, allows to iterate over all the transaction log operations. */ - static interface Snapshot extends Releasable { + public interface Snapshot extends Releasable { /** * The total number of operations in the translog. @@ -172,7 +774,7 @@ public interface Translog extends IndexShardComponent { } /** a view into the current translog that receives all operations from the moment created */ - interface View extends Releasable { + public interface View extends Releasable { /** * The total number of operations in the view. @@ -196,8 +798,8 @@ public interface Translog extends IndexShardComponent { * A generic interface representing an operation performed on the transaction log. * Each is associated with a type. */ - static interface Operation extends Streamable { - static enum Type { + public interface Operation extends Streamable { + enum Type { CREATE((byte) 1), SAVE((byte) 2), DELETE((byte) 3), @@ -237,7 +839,7 @@ public interface Translog extends IndexShardComponent { } - static class Source { + public static class Source { public final BytesReference source; public final String routing; public final String parent; @@ -253,7 +855,7 @@ public interface Translog extends IndexShardComponent { } } - static class Create implements Operation { + public static class Create implements Operation { public static final int SERIALIZATION_FORMAT = 6; private String id; @@ -446,7 +1048,7 @@ public interface Translog extends IndexShardComponent { } } - static class Index implements Operation { + public static class Index implements Operation { public static final int SERIALIZATION_FORMAT = 6; private String id; @@ -641,7 +1243,7 @@ public interface Translog extends IndexShardComponent { } } - static class Delete implements Operation { + public static class Delete implements Operation { public static final int SERIALIZATION_FORMAT = 2; private Term uid; @@ -751,7 +1353,7 @@ public interface Translog extends IndexShardComponent { /** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */ @Deprecated - static class DeleteByQuery implements Operation { + public static class DeleteByQuery implements Operation { public static final int SERIALIZATION_FORMAT = 2; private BytesReference source; @@ -880,4 +1482,32 @@ public interface Translog extends IndexShardComponent { '}'; } } + + /** + * Returns the current durability mode of this translog. + */ + public Durabilty getDurabilty() { + return durabilty; + } + + public enum Durabilty { + /** + * Async durability - translogs are synced based on a time interval. + */ + ASYNC, + /** + * Request durability - translogs are synced for each high levle request (bulk, index, delete) + */ + REQUEST; + + public static Durabilty getFromSettings(ESLogger logger, Settings settings, Durabilty defaultValue) { + final String value = settings.get(INDEX_TRANSLOG_DURABILITY, defaultValue.name()); + try { + return valueOf(value.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException ex) { + logger.warn("Can't apply {} illegal value: {} using {} instead, use one of: {}", INDEX_TRANSLOG_DURABILITY, value, defaultValue, Arrays.toString(values())); + return defaultValue; + } + } + } } diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogFile.java b/src/main/java/org/elasticsearch/index/translog/TranslogFile.java new file mode 100644 index 00000000000..3f767752c53 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/translog/TranslogFile.java @@ -0,0 +1,240 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +public class TranslogFile extends ChannelReader { + + protected final ShardId shardId; + protected final ReleasableLock readLock; + protected final ReleasableLock writeLock; + /* the offset in bytes that was written when the file was last synced*/ + protected volatile long lastSyncedOffset; + /* the number of translog operations written to this file */ + protected volatile int operationCounter; + /* the offset in bytes written to the file */ + protected volatile long writtenOffset; + + public TranslogFile(ShardId shardId, long id, ChannelReference channelReference) throws IOException { + super(id, channelReference); + this.shardId = shardId; + ReadWriteLock rwl = new ReentrantReadWriteLock(); + readLock = new ReleasableLock(rwl.readLock()); + writeLock = new ReleasableLock(rwl.writeLock()); + final TranslogStream stream = this.channelReference.stream(); + int headerSize = stream.writeHeader(channelReference.channel()); + this.writtenOffset += headerSize; + this.lastSyncedOffset += headerSize; + } + + + public enum Type { + + SIMPLE() { + @Override + public TranslogFile create(ShardId shardId, long id, ChannelReference channelReference, int bufferSize) throws IOException { + return new TranslogFile(shardId, id, channelReference); + } + }, + BUFFERED() { + @Override + public TranslogFile create(ShardId shardId, long id, ChannelReference channelReference, int bufferSize) throws IOException { + return new BufferingTranslogFile(shardId, id, channelReference, bufferSize); + } + }; + + public abstract TranslogFile create(ShardId shardId, long id, ChannelReference raf, int bufferSize) throws IOException; + + public static Type fromString(String type) { + if (SIMPLE.name().equalsIgnoreCase(type)) { + return SIMPLE; + } else if (BUFFERED.name().equalsIgnoreCase(type)) { + return BUFFERED; + } + throw new IllegalArgumentException("No translog fs type [" + type + "]"); + } + } + + + /** add the given bytes to the translog and return the location they were written at */ + public Translog.Location add(BytesReference data) throws IOException { + try (ReleasableLock lock = writeLock.acquire()) { + long position = writtenOffset; + data.writeTo(channelReference.channel()); + writtenOffset = writtenOffset + data.length(); + operationCounter = operationCounter + 1; + return new Translog.Location(id, position, data.length()); + } + } + + /** reuse resources from another translog file, which is guaranteed not to be used anymore */ + public void reuse(TranslogFile other) throws TranslogException {} + + /** change the size of the internal buffer if relevant */ + public void updateBufferSize(int bufferSize) throws TranslogException {} + + /** write all buffered ops to disk and fsync file */ + public void sync() throws IOException { + // check if we really need to sync here... + if (syncNeeded()) { + try (ReleasableLock lock = writeLock.acquire()) { + lastSyncedOffset = writtenOffset; + channelReference.channel().force(false); + } + } + } + + /** returns true if there are buffered ops */ + public boolean syncNeeded() { + return writtenOffset != lastSyncedOffset; // by default nothing is buffered + } + + @Override + public int totalOperations() { + return operationCounter; + } + + @Override + public long sizeInBytes() { + return writtenOffset; + } + + @Override + public ChannelSnapshot newSnapshot() { + return new ChannelSnapshot(immutableReader()); + } + + /** + * Flushes the buffer if the translog is buffered. + */ + protected void flush() throws IOException {} + + /** + * returns a new reader that follows the current writes (most importantly allows making + * repeated snapshots that includes new content) + */ + public ChannelReader reader() { + channelReference.incRef(); + boolean success = false; + try { + ChannelReader reader = new InnerReader(this.id, channelReference); + success = true; + return reader; + } finally { + if (!success) { + channelReference.decRef(); + } + } + } + + + /** returns a new immutable reader which only exposes the current written operation * */ + public ChannelImmutableReader immutableReader() throws TranslogException { + if (channelReference.tryIncRef()) { + try (ReleasableLock lock = writeLock.acquire()) { + flush(); + ChannelImmutableReader reader = new ChannelImmutableReader(this.id, channelReference, writtenOffset, operationCounter); + channelReference.incRef(); // for new reader + return reader; + } catch (Exception e) { + throw new TranslogException(shardId, "exception while creating an immutable reader", e); + } finally { + channelReference.decRef(); + } + } else { + throw new TranslogException(shardId, "can't increment channel [" + channelReference + "] ref count"); + } + } + + boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException { + ByteBuffer buffer = ByteBuffer.allocate(location.size); + readBytes(buffer, location.translogLocation); + return new BytesArray(buffer.array()).equals(expectedBytes); + } + + /** + * this class is used when one wants a reference to this file which exposes all recently written operation. + * as such it needs access to the internals of the current reader + */ + final class InnerReader extends ChannelReader { + + public InnerReader(long id, ChannelReference channelReference) { + super(id, channelReference); + } + + @Override + public long sizeInBytes() { + return TranslogFile.this.sizeInBytes(); + } + + @Override + public int totalOperations() { + return TranslogFile.this.totalOperations(); + } + + @Override + protected void readBytes(ByteBuffer buffer, long position) throws IOException { + TranslogFile.this.readBytes(buffer, position); + } + + @Override + public ChannelSnapshot newSnapshot() { + return TranslogFile.this.newSnapshot(); + } + } + + /** + * Syncs the translog up to at least the given offset unless already synced + * @return true if this call caused an actual sync operation + */ + public boolean syncUpTo(long offset) throws IOException { + if (lastSyncedOffset < offset) { + sync(); + return true; + } + return false; + } + + @Override + protected final void doClose() throws IOException { + try { + sync(); + } finally { + super.doClose(); + } + } + + @Override + protected void readBytes(ByteBuffer buffer, long position) throws IOException { + try (ReleasableLock lock = readLock.acquire()) { + Channels.readFromFileChannelWithEofException(channelReference.channel(), position, buffer); + } + } +} diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogService.java b/src/main/java/org/elasticsearch/index/translog/TranslogService.java index 633aeae2e83..1aa7184a4d9 100644 --- a/src/main/java/org/elasticsearch/index/translog/TranslogService.java +++ b/src/main/java/org/elasticsearch/index/translog/TranslogService.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.translog; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -33,6 +34,8 @@ import org.elasticsearch.index.shard.*; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; +import java.util.Arrays; +import java.util.Locale; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ThreadLocalRandom; @@ -53,7 +56,6 @@ public class TranslogService extends AbstractIndexShardComponent implements Clos private final ThreadPool threadPool; private final IndexSettingsService indexSettingsService; private final IndexShard indexShard; - private volatile Translog translog; private volatile TimeValue interval; private volatile int flushThresholdOperations; @@ -75,7 +77,6 @@ public class TranslogService extends AbstractIndexShardComponent implements Clos this.flushThresholdPeriod = indexSettings.getAsTime(INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, TimeValue.timeValueMinutes(30)); this.interval = indexSettings.getAsTime(INDEX_TRANSLOG_FLUSH_INTERVAL, timeValueMillis(5000)); this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false); - logger.debug("interval [{}], flush_threshold_ops [{}], flush_threshold_size [{}], flush_threshold_period [{}]", interval, flushThresholdOperations, flushThresholdSize, flushThresholdPeriod); this.future = threadPool.schedule(interval, ThreadPool.Names.SAME, new TranslogBasedFlush()); @@ -141,12 +142,11 @@ public class TranslogService extends AbstractIndexShardComponent implements Clos reschedule(); return; } - - if (indexShard.engine().getTranslog() == null) { + Translog translog = indexShard.engine().getTranslog(); + if (translog == null) { reschedule(); return; } - int currentNumberOfOperations = translog.totalOperations(); if (currentNumberOfOperations == 0) { reschedule(); diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogSnapshot.java b/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java similarity index 84% rename from src/main/java/org/elasticsearch/index/translog/fs/FsTranslogSnapshot.java rename to src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java index f771bcaef5a..9c0f5561143 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogSnapshot.java +++ b/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java @@ -17,23 +17,21 @@ * under the License. */ -package org.elasticsearch.index.translog.fs; +package org.elasticsearch.index.translog; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TruncatedTranslogException; import java.io.IOException; import java.nio.ByteBuffer; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -public class FsTranslogSnapshot implements Translog.Snapshot { +public class TranslogSnapshot implements Translog.Snapshot { - private final List orderedTranslogs; + private final List orderedTranslogs; private final ESLogger logger; private final ByteBuffer cacheBuffer; private AtomicBoolean closed = new AtomicBoolean(false); @@ -44,15 +42,15 @@ public class FsTranslogSnapshot implements Translog.Snapshot { * Create a snapshot of translog file channel. The length parameter should be consistent with totalOperations and point * at the end of the last operation in this snapshot. */ - public FsTranslogSnapshot(List orderedTranslogs, ESLogger logger) { + public TranslogSnapshot(List orderedTranslogs, ESLogger logger) { this.orderedTranslogs = orderedTranslogs; this.logger = logger; int ops = 0; - for (FsChannelSnapshot translog : orderedTranslogs) { + for (ChannelSnapshot translog : orderedTranslogs) { final int tops = translog.estimatedTotalOperations(); if (tops < 0) { - ops = FsChannelReader.UNKNOWN_OP_COUNT; + ops = ChannelReader.UNKNOWN_OP_COUNT; break; } ops += tops; @@ -72,7 +70,7 @@ public class FsTranslogSnapshot implements Translog.Snapshot { public Translog.Operation next() throws IOException { ensureOpen(); for (; currentTranslog < orderedTranslogs.size(); currentTranslog++) { - final FsChannelSnapshot current = orderedTranslogs.get(currentTranslog); + final ChannelSnapshot current = orderedTranslogs.get(currentTranslog); Translog.Operation op = null; try { op = current.next(cacheBuffer); diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java deleted file mode 100644 index 314e73a97a7..00000000000 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java +++ /dev/null @@ -1,650 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.translog.fs; - -import com.google.common.collect.Iterables; -import org.apache.lucene.util.CollectionUtil; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.util.concurrent.ReleasableLock; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.settings.IndexSettingsService; -import org.elasticsearch.index.shard.AbstractIndexShardComponent; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogException; -import org.elasticsearch.index.translog.TranslogStats; -import org.elasticsearch.index.translog.TranslogStreams; -import org.elasticsearch.threadpool.ThreadPool; - -import java.io.Closeable; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.file.*; -import java.util.*; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * - */ -public class FsTranslog extends AbstractIndexShardComponent implements Translog, Closeable { - - public static final String INDEX_TRANSLOG_FS_TYPE = "index.translog.fs.type"; - public static final String INDEX_TRANSLOG_BUFFER_SIZE = "index.translog.fs.buffer_size"; - public static final String INDEX_TRANSLOG_SYNC_INTERVAL = "index.translog.sync_interval"; - public static final String TRANSLOG_FILE_PREFIX = "translog-"; - static final Pattern PARSE_ID_PATTERN = Pattern.compile(TRANSLOG_FILE_PREFIX + "(\\d+)(\\.recovering)?$"); - private final TimeValue syncInterval; - private volatile ScheduledFuture syncScheduler; - - - // this is a concurrent set and is not protected by any of the locks. The main reason - // is that is being accessed by two separate classes (additions & reading are done by FsTranslog, remove by FsView when closed) - private final Set outstandingViews = ConcurrentCollections.newConcurrentSet(); - - - class ApplySettings implements IndexSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - FsTranslogFile.Type type = FsTranslogFile.Type.fromString(settings.get(INDEX_TRANSLOG_FS_TYPE, FsTranslog.this.type.name())); - if (type != FsTranslog.this.type) { - logger.info("updating type from [{}] to [{}]", FsTranslog.this.type, type); - FsTranslog.this.type = type; - } - } - } - - private final IndexSettingsService indexSettingsService; - private final BigArrays bigArrays; - private final ThreadPool threadPool; - - protected final ReleasableLock readLock; - protected final ReleasableLock writeLock; - - private final Path location; - - // protected by the write lock - private long idGenerator = 1; - private FsTranslogFile current; - // ordered by age - private final List uncommittedTranslogs = new ArrayList<>(); - private long lastCommittedTranslogId = -1; // -1 is safe as it will not cause an translog deletion. - - private FsTranslogFile.Type type; - - private boolean syncOnEachOperation = false; - - private volatile int bufferSize; - - private final ApplySettings applySettings = new ApplySettings(); - - private final AtomicBoolean closed = new AtomicBoolean(); - - public FsTranslog(ShardId shardId, IndexSettingsService indexSettingsService, - BigArrays bigArrays, Path location, ThreadPool threadPool) throws IOException { - this(shardId, indexSettingsService.getSettings(), indexSettingsService, bigArrays, location, threadPool); - } - - public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, - BigArrays bigArrays, Path location) throws IOException { - this(shardId, indexSettings, null, bigArrays, location, null); - } - - private FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, @Nullable IndexSettingsService indexSettingsService, - BigArrays bigArrays, Path location, @Nullable ThreadPool threadPool) throws IOException { - super(shardId, indexSettings); - ReadWriteLock rwl = new ReentrantReadWriteLock(); - readLock = new ReleasableLock(rwl.readLock()); - writeLock = new ReleasableLock(rwl.writeLock()); - - this.indexSettingsService = indexSettingsService; - this.bigArrays = bigArrays; - this.location = location; - Files.createDirectories(this.location); - this.threadPool = threadPool; - - this.type = FsTranslogFile.Type.fromString(indexSettings.get(INDEX_TRANSLOG_FS_TYPE, FsTranslogFile.Type.BUFFERED.name())); - this.bufferSize = (int) indexSettings.getAsBytesSize(INDEX_TRANSLOG_BUFFER_SIZE, ByteSizeValue.parseBytesSizeValue("64k")).bytes(); // Not really interesting, updated by IndexingMemoryController... - - syncInterval = indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5)); - if (syncInterval.millis() > 0 && threadPool != null) { - syncOnEachOperation(false); - syncScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, new Sync()); - } else if (syncInterval.millis() == 0) { - syncOnEachOperation(true); - } - - if (indexSettingsService != null) { - indexSettingsService.addListener(applySettings); - } - try { - recoverFromFiles(); - // now that we know which files are there, create a new current one. - current = createTranslogFile(null); - } catch (Throwable t) { - // close the opened translog files if we fail to create a new translog... - IOUtils.closeWhileHandlingException(uncommittedTranslogs); - throw t; - } - } - - /** recover all translog files found on disk */ - private void recoverFromFiles() throws IOException { - boolean success = false; - ArrayList foundTranslogs = new ArrayList<>(); - try (ReleasableLock lock = writeLock.acquire()) { - try (DirectoryStream stream = Files.newDirectoryStream(location, TRANSLOG_FILE_PREFIX + "[0-9]*")) { - for (Path file : stream) { - final long id = parseIdFromFileName(file); - if (id < 0) { - throw new TranslogException(shardId, "failed to parse id from file name matching pattern " + file); - } - idGenerator = Math.max(idGenerator, id + 1); - final ChannelReference raf = new InternalChannelReference(id, location.resolve(getFilename(id)), StandardOpenOption.READ); - foundTranslogs.add(new FsChannelImmutableReader(id, raf, raf.channel().size(), FsChannelReader.UNKNOWN_OP_COUNT)); - logger.debug("found local translog with id [{}]", id); - } - } - CollectionUtil.timSort(foundTranslogs); - uncommittedTranslogs.addAll(foundTranslogs); - success = true; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(foundTranslogs); - } - } - } - - /* extracts the translog id from a file name. returns -1 upon failure */ - public static long parseIdFromFileName(Path translogFile) { - final String fileName = translogFile.getFileName().toString(); - final Matcher matcher = PARSE_ID_PATTERN.matcher(fileName); - if (matcher.matches()) { - try { - return Long.parseLong(matcher.group(1)); - } catch (NumberFormatException e) { - throw new ElasticsearchException("number formatting issue in a file that passed PARSE_ID_PATTERN: " + fileName + "]", e); - } - } - return -1; - } - - @Override - public void updateBuffer(ByteSizeValue bufferSize) { - this.bufferSize = bufferSize.bytesAsInt(); - try (ReleasableLock lock = writeLock.acquire()) { - current.updateBufferSize(this.bufferSize); - } - } - - boolean isOpen() { - return closed.get() == false; - } - - @Override - public void close() throws IOException { - if (closed.compareAndSet(false, true)) { - if (indexSettingsService != null) { - indexSettingsService.removeListener(applySettings); - } - - try (ReleasableLock lock = writeLock.acquire()) { - try { - IOUtils.close(this.current); - } finally { - IOUtils.close(uncommittedTranslogs); - } - } finally { - FutureUtils.cancel(syncScheduler); - logger.debug("translog closed"); - } - } - } - - @Override - public Path location() { - return location; - } - - @Override - public long currentId() { - try (ReleasableLock lock = readLock.acquire()) { - return current.translogId(); - } - } - - @Override - public int totalOperations() { - int ops = 0; - try (ReleasableLock lock = readLock.acquire()) { - ops += current.totalOperations(); - for (FsChannelReader translog : uncommittedTranslogs) { - int tops = translog.totalOperations(); - if (tops == FsChannelReader.UNKNOWN_OP_COUNT) { - return FsChannelReader.UNKNOWN_OP_COUNT; - } - ops += tops; - } - } - return ops; - } - - @Override - public long sizeInBytes() { - long size = 0; - try (ReleasableLock lock = readLock.acquire()) { - size += current.sizeInBytes(); - for (FsChannelReader translog : uncommittedTranslogs) { - size += translog.sizeInBytes(); - } - } - return size; - } - - @Override - public void markCommitted(final long translogId) throws FileNotFoundException { - try (ReleasableLock lock = writeLock.acquire()) { - logger.trace("updating translogs on commit of [{}]", translogId); - if (translogId < lastCommittedTranslogId) { - throw new IllegalArgumentException("committed translog id can only go up (current [" - + lastCommittedTranslogId + "], got [" + translogId + "]"); - } - boolean found = false; - if (current.translogId() == translogId) { - found = true; - } else { - if (translogId > current.translogId()) { - throw new IllegalArgumentException("committed translog id must be lower or equal to current id (current [" - + current.translogId() + "], got [" + translogId + "]"); - } - } - if (found == false) { - // try to find it in uncommittedTranslogs - for (FsChannelImmutableReader translog : uncommittedTranslogs) { - if (translog.translogId() == translogId) { - found = true; - break; - } - } - } - if (found == false) { - ArrayList currentIds = new ArrayList<>(); - for (FsChannelReader translog : Iterables.concat(uncommittedTranslogs, Collections.singletonList(current))) { - currentIds.add(translog.translogId()); - } - throw new FileNotFoundException("committed translog id can not be found (current [" - + Strings.collectionToCommaDelimitedString(currentIds) + "], got [" + translogId + "]"); - } - lastCommittedTranslogId = translogId; - while (uncommittedTranslogs.isEmpty() == false && uncommittedTranslogs.get(0).translogId() < translogId) { - FsChannelReader old = uncommittedTranslogs.remove(0); - logger.trace("removed [{}] from uncommitted translog list", old.translogId()); - try { - old.close(); - } catch (IOException e) { - logger.error("failed to closed old translog [{}] (committed id [{}])", e, old, translogId); - } - } - } - } - - @Override - public long newTranslog() throws TranslogException, IOException { - try (ReleasableLock lock = writeLock.acquire()) { - final FsTranslogFile old = current; - final FsTranslogFile newFile = createTranslogFile(old); - current = newFile; - FsChannelImmutableReader reader = old.immutableReader(); - uncommittedTranslogs.add(reader); - // notify all outstanding views of the new translog (no views are created now as - // we hold a write lock). - for (FsView view : outstandingViews) { - view.onNewTranslog(old.immutableReader(), current.reader()); - } - IOUtils.close(old); - logger.trace("current translog set to [{}]", current.translogId()); - return current.translogId(); - } - } - - protected FsTranslogFile createTranslogFile(@Nullable FsTranslogFile reuse) throws IOException { - FsTranslogFile newFile; - long size = Long.MAX_VALUE; - try { - long id = idGenerator++; - newFile = type.create(shardId, id, new InternalChannelReference(id, location.resolve(getFilename(id)), StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW), bufferSize); - } catch (IOException e) { - throw new TranslogException(shardId, "failed to create new translog file", e); - } - if (reuse != null) { - newFile.reuse(reuse); - } - return newFile; - } - - - /** - * Read the Operation object from the given location, returns null if the - * Operation could not be read. - */ - @Override - public Translog.Operation read(Location location) { - try (ReleasableLock lock = readLock.acquire()) { - FsChannelReader reader = null; - if (current.translogId() == location.translogId) { - reader = current; - } else { - for (FsChannelReader translog : uncommittedTranslogs) { - if (translog.translogId() == location.translogId) { - reader = translog; - break; - } - } - } - return reader == null ? null : reader.read(location); - } catch (IOException e) { - throw new ElasticsearchException("failed to read source from translog location " + location, e); - } - } - - @Override - public Location add(Operation operation) throws TranslogException { - ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays); - try { - TranslogStreams.writeTranslogOperation(out, operation); - ReleasablePagedBytesReference bytes = out.bytes(); - try (ReleasableLock lock = readLock.acquire()) { - Location location = current.add(bytes); - if (syncOnEachOperation) { - current.sync(); - } - - assert current.assertBytesAtLocation(location, bytes); - return location; - } - } catch (Throwable e) { - throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); - } finally { - Releasables.close(out.bytes()); - } - } - - @Override - public Snapshot newSnapshot() { - try (ReleasableLock lock = readLock.acquire()) { - // leave one place for current. - final FsChannelReader[] readers = uncommittedTranslogs.toArray(new FsChannelReader[uncommittedTranslogs.size() + 1]); - readers[readers.length - 1] = current; - return createdSnapshot(readers); - } - } - - private Snapshot createdSnapshot(FsChannelReader... translogs) { - ArrayList channelSnapshots = new ArrayList<>(); - boolean success = false; - try { - for (FsChannelReader translog : translogs) { - channelSnapshots.add(translog.newSnapshot()); - } - Snapshot snapshot = new FsTranslogSnapshot(channelSnapshots, logger); - success = true; - return snapshot; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(channelSnapshots); - } - } - } - - @Override - public Translog.View newView() { - // we need to acquire the read lock to make sure new translog is created - // and will be missed by the view we're making - try (ReleasableLock lock = readLock.acquire()) { - ArrayList translogs = new ArrayList<>(); - try { - for (FsChannelImmutableReader translog : uncommittedTranslogs) { - translogs.add(translog.clone()); - } - translogs.add(current.reader()); - FsView view = new FsView(translogs); - // this is safe as we know that no new translog is being made at the moment - // (we hold a read lock) and the view will be notified of any future one - outstandingViews.add(view); - translogs.clear(); - return view; - } finally { - // close if anything happend and we didn't reach the clear - IOUtils.closeWhileHandlingException(translogs); - } - } - } - - @Override - public void sync() throws IOException { - try (ReleasableLock lock = readLock.acquire()) { - if (closed.get()) { - return; - } - current.sync(); - } - } - - @Override - public boolean syncNeeded() { - try (ReleasableLock lock = readLock.acquire()) { - return current.syncNeeded(); - } - } - - @Override - public void syncOnEachOperation(boolean syncOnEachOperation) { - this.syncOnEachOperation = syncOnEachOperation; - if (syncOnEachOperation) { - type = FsTranslogFile.Type.SIMPLE; - } else { - type = FsTranslogFile.Type.BUFFERED; - } - } - - /** package private for testing */ - String getFilename(long translogId) { - return TRANSLOG_FILE_PREFIX + translogId; - } - - @Override - public TranslogStats stats() { - // acquire lock to make the two numbers roughly consistent (no file change half way) - try (ReleasableLock lock = readLock.acquire()) { - return new TranslogStats(totalOperations(), sizeInBytes()); - } - } - - private boolean isReferencedTranslogId(long translogId) { - return translogId >= lastCommittedTranslogId; - } - - private final class InternalChannelReference extends ChannelReference { - final long translogId; - - public InternalChannelReference(long translogId, Path file, OpenOption... openOptions) throws IOException { - super(file, openOptions); - this.translogId = translogId; - } - - @Override - protected void closeInternal() { - super.closeInternal(); - try (ReleasableLock lock = writeLock.acquire()) { - if (isReferencedTranslogId(translogId) == false) { - // if the given path is not the current we can safely delete the file since all references are released - logger.trace("delete translog file - not referenced and not current anymore {}", file()); - IOUtils.deleteFilesIgnoringExceptions(file()); - } - } - } - } - - /** - * a view into the translog, capturing all translog file at the moment of creation - * and updated with any future translog. - */ - class FsView implements View { - - boolean closed; - // last in this list is always FsTranslog.current - final List orderedTranslogs; - - FsView(List orderedTranslogs) { - assert orderedTranslogs.isEmpty() == false; - // clone so we can safely mutate.. - this.orderedTranslogs = new ArrayList<>(orderedTranslogs); - } - - /** - * Called by the parent class when ever the current translog changes - * - * @param oldCurrent a new read only reader for the old current (should replace the previous reference) - * @param newCurrent a reader into the new current. - */ - synchronized void onNewTranslog(FsChannelReader oldCurrent, FsChannelReader newCurrent) throws IOException { - // even though the close method removes this view from outstandingViews, there is no synchronisation in place - // between that operation and an ongoing addition of a new translog, already having an iterator. - // As such, this method can be called despite of the fact that we are closed. We need to check and ignore. - if (closed) { - // we have to close the new references created for as as we will not hold them - IOUtils.close(oldCurrent, newCurrent); - return; - } - orderedTranslogs.remove(orderedTranslogs.size() - 1).close(); - orderedTranslogs.add(oldCurrent); - orderedTranslogs.add(newCurrent); - } - - @Override - public synchronized long minTranslogId() { - ensureOpen(); - return orderedTranslogs.get(0).translogId(); - } - - @Override - public synchronized int totalOperations() { - int ops = 0; - for (FsChannelReader translog : orderedTranslogs) { - int tops = translog.totalOperations(); - if (tops == FsChannelReader.UNKNOWN_OP_COUNT) { - return -1; - } - ops += tops; - } - return ops; - } - - @Override - public synchronized long sizeInBytes() { - long size = 0; - for (FsChannelReader translog : orderedTranslogs) { - size += translog.sizeInBytes(); - } - return size; - } - - public synchronized Snapshot snapshot() { - ensureOpen(); - return createdSnapshot(orderedTranslogs.toArray(new FsChannelReader[orderedTranslogs.size()])); - } - - - void ensureOpen() { - if (closed) { - throw new ElasticsearchException("View is already closed"); - } - } - - @Override - public void close() { - List toClose = new ArrayList<>(); - try { - synchronized (this) { - if (closed == false) { - logger.trace("closing view starting at translog [{}]", minTranslogId()); - closed = true; - outstandingViews.remove(this); - toClose.addAll(orderedTranslogs); - orderedTranslogs.clear(); - } - } - } finally { - try { - // Close out of lock to prevent deadlocks between channel close which checks for - // references in InternalChannelReference.closeInternal (waiting on a read lock) - // and other FsTranslog#newTranslog calling FsView.onNewTranslog (while having a write lock) - IOUtils.close(toClose); - } catch (Exception e) { - throw new ElasticsearchException("failed to close view", e); - } - } - } - } - - class Sync implements Runnable { - @Override - public void run() { - // don't re-schedule if its closed..., we are done - if (closed.get()) { - return; - } - if (syncNeeded()) { - threadPool.executor(ThreadPool.Names.FLUSH).execute(new Runnable() { - @Override - public void run() { - try { - sync(); - } catch (Exception e) { - logger.warn("failed to sync translog", e); - } - if (closed.get() == false) { - syncScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this); - } - } - }); - } else { - syncScheduler = threadPool.schedule(syncInterval, ThreadPool.Names.SAME, Sync.this); - } - } - } -} diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java deleted file mode 100644 index 1bfe8dae61c..00000000000 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.translog.fs; - -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.util.concurrent.ReleasableLock; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogException; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -public abstract class FsTranslogFile extends FsChannelReader { - - protected final ShardId shardId; - protected final ReleasableLock readLock; - protected final ReleasableLock writeLock; - - public FsTranslogFile(ShardId shardId, long id, ChannelReference channelReference) { - super(id, channelReference); - this.shardId = shardId; - ReadWriteLock rwl = new ReentrantReadWriteLock(); - readLock = new ReleasableLock(rwl.readLock()); - writeLock = new ReleasableLock(rwl.writeLock()); - } - - - public static enum Type { - - SIMPLE() { - @Override - public FsTranslogFile create(ShardId shardId, long id, ChannelReference channelReference, int bufferSize) throws IOException { - return new SimpleFsTranslogFile(shardId, id, channelReference); - } - }, - BUFFERED() { - @Override - public FsTranslogFile create(ShardId shardId, long id, ChannelReference channelReference, int bufferSize) throws IOException { - return new BufferingFsTranslogFile(shardId, id, channelReference, bufferSize); - } - }; - - public abstract FsTranslogFile create(ShardId shardId, long id, ChannelReference raf, int bufferSize) throws IOException; - - public static Type fromString(String type) { - if (SIMPLE.name().equalsIgnoreCase(type)) { - return SIMPLE; - } else if (BUFFERED.name().equalsIgnoreCase(type)) { - return BUFFERED; - } - throw new IllegalArgumentException("No translog fs type [" + type + "]"); - } - } - - - /** add the given bytes to the translog and return the location they were written at */ - public abstract Translog.Location add(BytesReference data) throws IOException; - - /** reuse resources from another translog file, which is guaranteed not to be used anymore */ - public abstract void reuse(FsTranslogFile other) throws TranslogException; - - /** change the size of the internal buffer if relevant */ - public abstract void updateBufferSize(int bufferSize) throws TranslogException; - - /** write all buffered ops to disk and fsync file */ - public abstract void sync() throws IOException; - - /** returns true if there are buffered ops */ - public abstract boolean syncNeeded(); - - @Override - public FsChannelSnapshot newSnapshot() { - return new FsChannelSnapshot(immutableReader()); - } - - /** - * returns a new reader that follows the current writes (most importantly allows making - * repeated snapshots that includes new content) - */ - public FsChannelReader reader() { - channelReference.incRef(); - boolean success = false; - try { - FsChannelReader reader = new InnerReader(this.id, channelReference); - success = true; - return reader; - } finally { - if (!success) { - channelReference.decRef(); - } - } - } - - - /** returns a new immutable reader which only exposes the current written operation * */ - abstract public FsChannelImmutableReader immutableReader(); - - boolean assertBytesAtLocation(Translog.Location location, BytesReference expectedBytes) throws IOException { - ByteBuffer buffer = ByteBuffer.allocate(location.size); - readBytes(buffer, location.translogLocation); - return new BytesArray(buffer.array()).equals(expectedBytes); - } - - /** - * this class is used when one wants a reference to this file which exposes all recently written operation. - * as such it needs access to the internals of the current reader - */ - final class InnerReader extends FsChannelReader { - - public InnerReader(long id, ChannelReference channelReference) { - super(id, channelReference); - } - - @Override - public long sizeInBytes() { - return FsTranslogFile.this.sizeInBytes(); - } - - @Override - public int totalOperations() { - return FsTranslogFile.this.totalOperations(); - } - - @Override - protected void readBytes(ByteBuffer buffer, long position) throws IOException { - FsTranslogFile.this.readBytes(buffer, position); - } - - @Override - public FsChannelSnapshot newSnapshot() { - return FsTranslogFile.this.newSnapshot(); - } - } -} diff --git a/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java deleted file mode 100644 index 1c4ea31a2b5..00000000000 --- a/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.translog.fs; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.Channels; -import org.elasticsearch.common.util.concurrent.ReleasableLock; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogException; - -import java.io.IOException; -import java.nio.ByteBuffer; - -public final class SimpleFsTranslogFile extends FsTranslogFile { - - private volatile int operationCounter = 0; - private volatile long lastPosition = 0; - private volatile long lastWrittenPosition = 0; - private volatile long lastSyncPosition = 0; - - public SimpleFsTranslogFile(ShardId shardId, long id, ChannelReference channelReference) throws IOException { - super(shardId, id, channelReference); - int headerSize = this.channelReference.stream().writeHeader(channelReference.channel()); - this.lastPosition += headerSize; - this.lastWrittenPosition += headerSize; - this.lastSyncPosition += headerSize; - } - - @Override - public int totalOperations() { - return operationCounter; - } - - @Override - public long sizeInBytes() { - return lastWrittenPosition; - } - - @Override - public Translog.Location add(BytesReference data) throws IOException { - try (ReleasableLock lock = writeLock.acquire()) { - long position = lastPosition; - data.writeTo(channelReference.channel()); - lastPosition = lastPosition + data.length(); - lastWrittenPosition = lastWrittenPosition + data.length(); - operationCounter = operationCounter + 1; - return new Translog.Location(id, position, data.length()); - } - } - - @Override - protected void readBytes(ByteBuffer buffer, long position) throws IOException { - try (ReleasableLock lock = readLock.acquire()) { - Channels.readFromFileChannelWithEofException(channelReference.channel(), position, buffer); - } - } - - @Override - public void doClose() throws IOException { - try { - sync(); - } finally { - super.doClose(); - } - } - - public FsChannelImmutableReader immutableReader() throws TranslogException { - if (channelReference.tryIncRef()) { - try (ReleasableLock lock = writeLock.acquire()) { - FsChannelImmutableReader reader = new FsChannelImmutableReader(this.id, channelReference, lastWrittenPosition, operationCounter); - channelReference.incRef(); // for the new object - return reader; - } finally { - channelReference.decRef(); - } - } else { - throw new TranslogException(shardId, "can't increment channel [" + channelReference + "] channel ref count"); - } - - } - - @Override - public boolean syncNeeded() { - return lastWrittenPosition != lastSyncPosition; - } - - @Override - public void sync() throws IOException { - // check if we really need to sync here... - if (!syncNeeded()) { - return; - } - try (ReleasableLock lock = writeLock.acquire()) { - lastSyncPosition = lastWrittenPosition; - channelReference.channel().force(false); - } - } - - @Override - public void reuse(FsTranslogFile other) { - // nothing to do there - } - - @Override - public void updateBufferSize(int bufferSize) throws TranslogException { - // nothing to do here... - } -} diff --git a/src/main/java/org/elasticsearch/indices/IndicesService.java b/src/main/java/org/elasticsearch/indices/IndicesService.java index d932bbb3803..0d474824cf4 100644 --- a/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -621,10 +621,18 @@ public class IndicesService extends AbstractLifecycleComponent i if (settings == null) { throw new IllegalArgumentException("settings must not be null"); } - PendingDelete pendingDelete = new PendingDelete(shardId, settings, false); + PendingDelete pendingDelete = new PendingDelete(shardId, settings); addPendingDelete(shardId.index(), pendingDelete); } + /** + * Adds a pending delete for the given index. + */ + public void addPendingDelete(Index index, @IndexSettings Settings settings) { + PendingDelete pendingDelete = new PendingDelete(index, settings); + addPendingDelete(index, pendingDelete); + } + private void addPendingDelete(Index index, PendingDelete pendingDelete) { synchronized (pendingDeletes) { List list = pendingDeletes.get(index); @@ -636,36 +644,45 @@ public class IndicesService extends AbstractLifecycleComponent i } } - /** - * Adds a pending delete for the given index shard. - */ - public void addPendingDelete(Index index, @IndexSettings Settings settings) { - PendingDelete pendingDelete = new PendingDelete(null, settings, true); - addPendingDelete(index, pendingDelete); - } - private static final class PendingDelete implements Comparable { - final ShardId shardId; + final String index; + final int shardId; final Settings settings; final boolean deleteIndex; - public PendingDelete(ShardId shardId, Settings settings, boolean deleteIndex) { - this.shardId = shardId; + /** + * Creates a new pending delete of an index + */ + public PendingDelete(ShardId shardId, Settings settings) { + this.index = shardId.getIndex(); + this.shardId = shardId.getId(); this.settings = settings; - this.deleteIndex = deleteIndex; - assert deleteIndex || shardId != null; + this.deleteIndex = false; + } + + /** + * Creates a new pending delete of a shard + */ + public PendingDelete(Index index, Settings settings) { + this.index = index.getName(); + this.shardId = -1; + this.settings = settings; + this.deleteIndex = true; } @Override public String toString() { - return shardId.toString(); + StringBuilder sb = new StringBuilder(); + sb.append("[").append(index).append("]"); + if (shardId != -1) { + sb.append("[").append(shardId).append("]"); + } + return sb.toString(); } @Override public int compareTo(PendingDelete o) { - int left = deleteIndex ? -1 : shardId.id(); - int right = o.deleteIndex ? -1 : o.shardId.id(); - return Integer.compare(left, right); + return Integer.compare(shardId, o.shardId); } } @@ -704,6 +721,7 @@ public class IndicesService extends AbstractLifecycleComponent i PendingDelete delete = iterator.next(); if (delete.deleteIndex) { + assert delete.shardId == -1; logger.debug("{} deleting index store reason [{}]", index, "pending delete"); try { nodeEnv.deleteIndexDirectoryUnderLock(index, indexSettings); @@ -712,7 +730,8 @@ public class IndicesService extends AbstractLifecycleComponent i logger.debug("{} retry pending delete", ex, index); } } else { - ShardLock shardLock = locks.get(delete.shardId); + assert delete.shardId != -1; + ShardLock shardLock = locks.get(new ShardId(delete.index, delete.shardId)); if (shardLock != null) { try { deleteShardStore("pending delete", shardLock, delete.settings); diff --git a/src/main/java/org/elasticsearch/plugins/PluginManager.java b/src/main/java/org/elasticsearch/plugins/PluginManager.java index 5678c03fdb9..cee8fde738e 100644 --- a/src/main/java/org/elasticsearch/plugins/PluginManager.java +++ b/src/main/java/org/elasticsearch/plugins/PluginManager.java @@ -33,10 +33,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManager; -import javax.net.ssl.X509TrustManager; import java.io.IOException; import java.io.PrintStream; import java.net.MalformedURLException; @@ -87,34 +83,6 @@ public class PluginManager { this.url = url; this.outputMode = outputMode; this.timeout = timeout; - - TrustManager[] trustAllCerts = new TrustManager[]{ - new X509TrustManager() { - @Override - public java.security.cert.X509Certificate[] getAcceptedIssuers() { - return null; - } - - @Override - public void checkClientTrusted( - java.security.cert.X509Certificate[] certs, String authType) { - } - - @Override - public void checkServerTrusted( - java.security.cert.X509Certificate[] certs, String authType) { - } - } - }; - - // Install the all-trusting trust manager - try { - SSLContext sc = SSLContext.getInstance("SSL"); - sc.init(null, trustAllCerts, new java.security.SecureRandom()); - HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); - } catch (Exception e) { - throw new ElasticsearchException("Failed to install all-trusting trust manager", e); - } } public void downloadAndExtract(String name) throws IOException { diff --git a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java index 424d6d0d954..59c28553622 100644 --- a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java +++ b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java @@ -23,8 +23,6 @@ import com.google.common.collect.Lists; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; @@ -32,6 +30,7 @@ import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsActi import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction; import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction; import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction; @@ -64,6 +63,7 @@ import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAct import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction; import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction; import org.elasticsearch.rest.action.admin.indices.optimize.RestOptimizeAction; +import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction; import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction; import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction; @@ -73,11 +73,11 @@ import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteInd import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction; import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction; import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction; -import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; import org.elasticsearch.rest.action.bulk.RestBulkAction; import org.elasticsearch.rest.action.cat.*; import org.elasticsearch.rest.action.delete.RestDeleteAction; @@ -89,7 +89,6 @@ import org.elasticsearch.rest.action.get.RestHeadAction; import org.elasticsearch.rest.action.get.RestMultiGetAction; import org.elasticsearch.rest.action.index.RestIndexAction; import org.elasticsearch.rest.action.main.RestMainAction; -import org.elasticsearch.rest.action.mlt.RestMoreLikeThisAction; import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction; import org.elasticsearch.rest.action.percolate.RestPercolateAction; import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction; @@ -209,8 +208,6 @@ public class RestActionModule extends AbstractModule { bind(RestValidateQueryAction.class).asEagerSingleton(); - bind(RestMoreLikeThisAction.class).asEagerSingleton(); - bind(RestExplainAction.class).asEagerSingleton(); bind(RestRecoveryAction.class).asEagerSingleton(); diff --git a/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java b/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java deleted file mode 100644 index 41f28574bdf..00000000000 --- a/src/main/java/org/elasticsearch/rest/action/mlt/RestMoreLikeThisAction.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.mlt; - -import org.elasticsearch.action.mlt.MoreLikeThisRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.*; -import org.elasticsearch.rest.action.support.RestToXContentListener; -import org.elasticsearch.search.Scroll; - -import static org.elasticsearch.client.Requests.moreLikeThisRequest; -import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestRequest.Method.POST; - -/** - * - */ -public class RestMoreLikeThisAction extends BaseRestHandler { - - @Inject - public RestMoreLikeThisAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); - controller.registerHandler(GET, "/{index}/{type}/{id}/_mlt", this); - controller.registerHandler(POST, "/{index}/{type}/{id}/_mlt", this); - } - - @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - MoreLikeThisRequest mltRequest = moreLikeThisRequest(request.param("index")).type(request.param("type")).id(request.param("id")); - mltRequest.routing(request.param("routing")); - //TODO the ParseField class that encapsulates the supported names used for an attribute - //needs some work if it is to be used in a REST context like this too - // See the MoreLikeThisQueryParser constants that hold the valid syntax - mltRequest.fields(request.paramAsStringArray("mlt_fields", null)); - mltRequest.minimumShouldMatch(request.param("minimum_should_match", "0")); - mltRequest.minTermFreq(request.paramAsInt("min_term_freq", -1)); - mltRequest.maxQueryTerms(request.paramAsInt("max_query_terms", -1)); - mltRequest.stopWords(request.paramAsStringArray("stop_words", null)); - mltRequest.minDocFreq(request.paramAsInt("min_doc_freq", -1)); - mltRequest.maxDocFreq(request.paramAsInt("max_doc_freq", -1)); - mltRequest.minWordLength(request.paramAsInt("min_word_len", request.paramAsInt("min_word_length", -1))); - mltRequest.maxWordLength(request.paramAsInt("max_word_len", request.paramAsInt("max_word_length", -1))); - mltRequest.boostTerms(request.paramAsFloat("boost_terms", -1)); - mltRequest.include(request.paramAsBoolean("include", false)); - - mltRequest.searchType(SearchType.fromString(request.param("search_type"))); - mltRequest.searchIndices(request.paramAsStringArray("search_indices", null)); - mltRequest.searchTypes(request.paramAsStringArray("search_types", null)); - mltRequest.searchSize(request.paramAsInt("search_size", mltRequest.searchSize())); - mltRequest.searchFrom(request.paramAsInt("search_from", mltRequest.searchFrom())); - String searchScroll = request.param("search_scroll"); - if (searchScroll != null) { - mltRequest.searchScroll(new Scroll(parseTimeValue(searchScroll, null))); - } - if (request.hasContent()) { - mltRequest.searchSource(request.content()); - } else { - String searchSource = request.param("search_source"); - if (searchSource != null) { - mltRequest.searchSource(searchSource); - } - } - - client.moreLikeThis(mltRequest, new RestToXContentListener(channel)); - } -} diff --git a/src/main/java/org/elasticsearch/script/ScriptParameterParser.java b/src/main/java/org/elasticsearch/script/ScriptParameterParser.java index 95d60a58a9b..8addcd6ebc7 100644 --- a/src/main/java/org/elasticsearch/script/ScriptParameterParser.java +++ b/src/main/java/org/elasticsearch/script/ScriptParameterParser.java @@ -26,8 +26,13 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.ScriptService.ScriptType; import java.io.IOException; -import java.util.*; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; import java.util.Map.Entry; +import java.util.Set; public class ScriptParameterParser { @@ -102,12 +107,12 @@ public class ScriptParameterParser { String parameterName = entry.getKey(); Object parameterValue = entry.getValue(); if (ScriptService.SCRIPT_LANG.match(parameterName)) { - if (parameterValue instanceof String) { + if (parameterValue instanceof String || parameterValue == null) { lang = (String) parameterValue; if (removeMatchedEntries) { itr.remove(); } - } else { + } else { throw new ScriptParameterParseException("Value must be of type String: [" + parameterName + "]"); } } else { diff --git a/src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java b/src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java index 64eed0741bc..f71a3697664 100644 --- a/src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java +++ b/src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java @@ -34,7 +34,7 @@ class DateMethodFunctionValues extends FieldDataFunctionValues { super(parent, data); this.calendarType = calendarType; - calendar = Calendar.getInstance(TimeZone.getTimeZone("GMT"), Locale.ROOT); + calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java index 2f6e929071f..9c41dc63379 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java @@ -57,8 +57,10 @@ import org.elasticsearch.search.aggregations.metrics.sum.SumParser; import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsParser; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser; import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.avg.AvgBucketParser; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.max.MaxBucketParser; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketParser; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.sum.SumBucketParser; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelModule; @@ -109,6 +111,8 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ reducerParsers.add(DerivativeParser.class); reducerParsers.add(MaxBucketParser.class); reducerParsers.add(MinBucketParser.class); + reducerParsers.add(AvgBucketParser.class); + reducerParsers.add(SumBucketParser.class); reducerParsers.add(MovAvgParser.class); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java index 27f5cbcf0aa..81051a7d9bb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java @@ -61,9 +61,12 @@ import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount; import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.avg.AvgBucketReducer; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.max.MaxBucketReducer; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketReducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.sum.SumBucketReducer; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer; +import org.elasticsearch.search.aggregations.reducers.derivative.InternalDerivative; import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgReducer; import org.elasticsearch.search.aggregations.reducers.movavg.models.TransportMovAvgModelModule; @@ -116,10 +119,13 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM // Reducers DerivativeReducer.registerStreams(); + InternalDerivative.registerStreams(); InternalSimpleValue.registerStreams(); InternalBucketMetricValue.registerStreams(); MaxBucketReducer.registerStreams(); MinBucketReducer.registerStreams(); + AvgBucketReducer.registerStreams(); + SumBucketReducer.registerStreams(); MovAvgReducer.registerStreams(); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java index e2ce1cc4b09..aaae94dfe1a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java @@ -49,10 +49,10 @@ public class DateHistogramParser implements Aggregator.Parser { static final ParseField OFFSET = new ParseField("offset"); static final ParseField INTERVAL = new ParseField("interval"); - private final ImmutableMap dateFieldUnits; + public static final ImmutableMap DATE_FIELD_UNITS; - public DateHistogramParser() { - dateFieldUnits = MapBuilder.newMapBuilder() + static { + DATE_FIELD_UNITS = MapBuilder.newMapBuilder() .put("year", DateTimeUnit.YEAR_OF_CENTURY) .put("1y", DateTimeUnit.YEAR_OF_CENTURY) .put("quarter", DateTimeUnit.QUARTER) @@ -184,7 +184,7 @@ public class DateHistogramParser implements Aggregator.Parser { } TimeZoneRounding.Builder tzRoundingBuilder; - DateTimeUnit dateTimeUnit = dateFieldUnits.get(interval); + DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(interval); if (dateTimeUnit != null) { tzRoundingBuilder = TimeZoneRounding.builder(dateTimeUnit); } else { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 1934676ac40..3516e9d5ddb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -310,6 +310,10 @@ public class InternalHistogram extends Inter return factory; } + public Rounding getRounding() { + return emptyBucketInfo.rounding; + } + @Override public InternalHistogram create(List buckets) { return getFactory().create(buckets, this); diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java index 2106f3247e1..6c0edd811d3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java @@ -53,7 +53,8 @@ public class InternalSimpleValue extends InternalNumericMetricsAggregation.Singl private double value; - InternalSimpleValue() {} // for serialization + protected InternalSimpleValue() { + } // for serialization public InternalSimpleValue(String name, double value, @Nullable ValueFormatter formatter, List reducers, Map metaData) { super(name, reducers, metaData); diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java index d2632721c64..71481de17ce 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java @@ -19,8 +19,10 @@ package org.elasticsearch.search.aggregations.reducers; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.avg.AvgBucketBuilder; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.max.MaxBucketBuilder; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketBuilder; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.sum.SumBucketBuilder; import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeBuilder; import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgBuilder; @@ -41,6 +43,14 @@ public final class ReducerBuilders { return new MinBucketBuilder(name); } + public static final AvgBucketBuilder avgBucket(String name) { + return new AvgBucketBuilder(name); + } + + public static final SumBucketBuilder sumBucket(String name) { + return new SumBucketBuilder(name); + } + public static final MovAvgBuilder movingAvg(String name) { return new MovAvgBuilder(name); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/BucketMetricsBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/BucketMetricsBuilder.java new file mode 100644 index 00000000000..d19363b835d --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/BucketMetricsBuilder.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketParser; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; + +import java.io.IOException; + +/** + * A builder for building requests for a {@link BucketMetricsReducer} + */ +public abstract class BucketMetricsBuilder> extends ReducerBuilder { + + private String format; + private GapPolicy gapPolicy; + + public BucketMetricsBuilder(String name, String type) { + super(name, type); + } + + public B format(String format) { + this.format = format; + return (B) this; + } + + public B gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return (B) this; + } + + @Override + protected final XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(MinBucketParser.FORMAT.getPreferredName(), format); + } + if (gapPolicy != null) { + builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } + doInternalXContent(builder, params); + return builder; + } + + protected void doInternalXContent(XContentBuilder builder, Params params) { + } + +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/BucketMetricsParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/BucketMetricsParser.java new file mode 100644 index 00000000000..0ff7dcf2fdd --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/BucketMetricsParser.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * A parser for parsing requests for a {@link BucketMetricsReducer} + */ +public abstract class BucketMetricsParser implements Reducer.Parser { + + public static final ParseField FORMAT = new ParseField("format"); + + public BucketMetricsParser() { + super(); + } + + @Override + public final ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String[] bucketsPaths = null; + String format = null; + GapPolicy gapPolicy = GapPolicy.SKIP; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (doParse(reducerName, currentFieldName, token, parser, context)) { + // Do nothing as subclass has stored the state for this token + } else if (token == XContentParser.Token.VALUE_STRING) { + if (FORMAT.match(currentFieldName)) { + format = parser.text(); + } else if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPaths = new String[] { parser.text() }; + } else if (GAP_POLICY.match(currentFieldName)) { + gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BUCKETS_PATH.match(currentFieldName)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "].", + parser.getTokenLocation()); + } + } + + if (bucketsPaths == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for derivative aggregation [" + reducerName + "]", parser.getTokenLocation()); + } + + ValueFormatter formatter = null; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } + + return buildFactory(reducerName, bucketsPaths, gapPolicy, formatter); + } + + protected abstract ReducerFactory buildFactory(String reducerName, String[] bucketsPaths, GapPolicy gapPolicy, + @Nullable ValueFormatter formatter); + + protected boolean doParse(String reducerName, String currentFieldName, Token token, XContentParser parser, SearchContext context) { + return false; + } + +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/BucketMetricsReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/BucketMetricsReducer.java new file mode 100644 index 00000000000..bf3d8b47e9e --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/BucketMetricsReducer.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * A class of sibling reducers which calculate metrics across the buckets of a + * sibling aggregation + */ +public abstract class BucketMetricsReducer extends SiblingReducer { + + protected ValueFormatter formatter; + protected GapPolicy gapPolicy; + + public BucketMetricsReducer() { + super(); + } + + protected BucketMetricsReducer(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter, + Map metaData) { + super(name, bucketsPaths, metaData); + this.gapPolicy = gapPolicy; + this.formatter = formatter; + } + + public final InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) { + preCollection(); + List bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList(); + for (Aggregation aggregation : aggregations) { + if (aggregation.getName().equals(bucketsPath.get(0))) { + bucketsPath = bucketsPath.subList(1, bucketsPath.size()); + InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; + List buckets = multiBucketsAgg.getBuckets(); + for (int i = 0; i < buckets.size(); i++) { + Bucket bucket = buckets.get(i); + Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy); + if (bucketValue != null && !Double.isNaN(bucketValue)) { + collectBucketValue(bucket.getKeyAsString(), bucketValue); + } + } + } + } + return buildAggregation(Collections.EMPTY_LIST, metaData()); + } + + /** + * Called before initial collection and between successive collection runs. + * A chance to initialize or re-initialize state + */ + protected void preCollection() { + } + + /** + * Called after a collection run is finished to build the aggregation for + * the collected state. + * + * @param reducers + * the reducers to add to the resulting aggregation + * @param metadata + * the metadata to add to the resulting aggregation + * @return + */ + protected abstract InternalAggregation buildAggregation(List reducers, Map metadata); + + /** + * Called for each bucket with a value so the state can be modified based on + * the key and metric value for this bucket + * + * @param bucketKey + * the key for this bucket as a String + * @param bucketValue + * the value of the metric specified in bucketsPath + * for this bucket + */ + protected abstract void collectBucketValue(String bucketKey, Double bucketValue); + + @Override + public void doReadFrom(StreamInput in) throws IOException { + formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); + } + +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/mlt/package-info.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/avg/AvgBucketBuilder.java similarity index 69% rename from src/main/java/org/elasticsearch/action/mlt/package-info.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/avg/AvgBucketBuilder.java index 8c02ddebdca..dff77e73525 100644 --- a/src/main/java/org/elasticsearch/action/mlt/package-info.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/avg/AvgBucketBuilder.java @@ -17,7 +17,14 @@ * under the License. */ -/** - * More Like This action. - */ -package org.elasticsearch.action.mlt; \ No newline at end of file +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.avg; + +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsBuilder; + +public class AvgBucketBuilder extends BucketMetricsBuilder { + + public AvgBucketBuilder(String name) { + super(name, AvgBucketReducer.TYPE.name()); + } + +} diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/avg/AvgBucketParser.java similarity index 51% rename from src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/avg/AvgBucketParser.java index 83313dfc095..9c6e4ce1a87 100644 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisAction.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/avg/AvgBucketParser.java @@ -17,30 +17,22 @@ * under the License. */ -package org.elasticsearch.action.mlt; +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.avg; -import org.elasticsearch.action.Action; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsParser; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; -/** - */ -public class MoreLikeThisAction extends Action { - - public static final MoreLikeThisAction INSTANCE = new MoreLikeThisAction(); - public static final String NAME = "indices:data/read/mlt"; - - private MoreLikeThisAction() { - super(NAME); +public class AvgBucketParser extends BucketMetricsParser { + @Override + public String type() { + return AvgBucketReducer.TYPE.name(); } @Override - public SearchResponse newResponse() { - return new SearchResponse(); - } - - @Override - public MoreLikeThisRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new MoreLikeThisRequestBuilder(client, this); + protected ReducerFactory buildFactory(String reducerName, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter) { + return new AvgBucketReducer.Factory(reducerName, bucketsPaths, gapPolicy, formatter); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/avg/AvgBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/avg/AvgBucketReducer.java new file mode 100644 index 00000000000..7c418007758 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/avg/AvgBucketReducer.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.avg; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsReducer; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class AvgBucketReducer extends BucketMetricsReducer { + + public final static Type TYPE = new Type("avg_bucket"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public AvgBucketReducer readResult(StreamInput in) throws IOException { + AvgBucketReducer result = new AvgBucketReducer(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private int count = 0; + private double sum = 0; + + private AvgBucketReducer() { + } + + protected AvgBucketReducer(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter, + Map metaData) { + super(name, bucketsPaths, gapPolicy, formatter, metaData); + } + + @Override + public Type type() { + return TYPE; + } + + @Override + protected void preCollection() { + count = 0; + sum = 0; + } + + @Override + protected void collectBucketValue(String bucketKey, Double bucketValue) { + count++; + sum += bucketValue; + } + + @Override + protected InternalAggregation buildAggregation(List reducers, Map metadata) { + double avgValue = count == 0 ? Double.NaN : (sum / count); + return new InternalSimpleValue(name(), avgValue, formatter, reducers, metadata); + } + + public static class Factory extends ReducerFactory { + + private final ValueFormatter formatter; + private final GapPolicy gapPolicy; + + public Factory(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter) { + super(name, TYPE.name(), bucketsPaths); + this.gapPolicy = gapPolicy; + this.formatter = formatter; + } + + @Override + protected Reducer createInternal(Map metaData) throws IOException { + return new AvgBucketReducer(name, bucketsPaths, gapPolicy, formatter, metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new IllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + } + + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java index 31d588a6497..88055660d1b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java @@ -19,41 +19,12 @@ package org.elasticsearch.search.aggregations.reducers.bucketmetrics.max; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; -import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsBuilder; -import java.io.IOException; - -public class MaxBucketBuilder extends ReducerBuilder { - - private String format; - private GapPolicy gapPolicy; +public class MaxBucketBuilder extends BucketMetricsBuilder { public MaxBucketBuilder(String name) { super(name, MaxBucketReducer.TYPE.name()); } - public MaxBucketBuilder format(String format) { - this.format = format; - return this; - } - - public MaxBucketBuilder gapPolicy(GapPolicy gapPolicy) { - this.gapPolicy = gapPolicy; - return this; - } - - @Override - protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { - if (format != null) { - builder.field(MaxBucketParser.FORMAT.getPreferredName(), format); - } - if (gapPolicy != null) { - builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); - } - return builder; - } - } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java index c8f3bad49f1..2d438fdf9a4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java @@ -19,22 +19,12 @@ package org.elasticsearch.search.aggregations.reducers.bucketmetrics.max; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; -import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsParser; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; -import org.elasticsearch.search.internal.SearchContext; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -public class MaxBucketParser implements Reducer.Parser { - public static final ParseField FORMAT = new ParseField("format"); +public class MaxBucketParser extends BucketMetricsParser { @Override public String type() { @@ -42,55 +32,7 @@ public class MaxBucketParser implements Reducer.Parser { } @Override - public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { - XContentParser.Token token; - String currentFieldName = null; - String[] bucketsPaths = null; - String format = null; - GapPolicy gapPolicy = GapPolicy.SKIP; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_STRING) { - if (FORMAT.match(currentFieldName)) { - format = parser.text(); - } else if (BUCKETS_PATH.match(currentFieldName)) { - bucketsPaths = new String[] { parser.text() }; - } else if (GAP_POLICY.match(currentFieldName)) { - gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); - } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" - + currentFieldName + "].", parser.getTokenLocation()); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH.match(currentFieldName)) { - List paths = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - String path = parser.text(); - paths.add(path); - } - bucketsPaths = paths.toArray(new String[paths.size()]); - } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" - + currentFieldName + "].", parser.getTokenLocation()); - } - } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "].", - parser.getTokenLocation()); - } - } - - if (bucketsPaths == null) { - throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() - + "] for derivative aggregation [" + reducerName + "]", parser.getTokenLocation()); - } - - ValueFormatter formatter = null; - if (format != null) { - formatter = ValueFormat.Patternable.Number.format(format).formatter(); - } - + protected ReducerFactory buildFactory(String reducerName, String[] bucketsPaths, GapPolicy gapPolicy, ValueFormatter formatter) { return new MaxBucketReducer.Factory(reducerName, bucketsPaths, gapPolicy, formatter); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java index 1d2d5c8d26c..079a3c5b40a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java @@ -21,25 +21,16 @@ package org.elasticsearch.search.aggregations.reducers.bucketmetrics.max; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.Type; -import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; -import org.elasticsearch.search.aggregations.reducers.BucketHelpers; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.ReducerStreams; -import org.elasticsearch.search.aggregations.reducers.SiblingReducer; -import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsReducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; -import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; import java.io.IOException; import java.util.ArrayList; @@ -47,7 +38,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; -public class MaxBucketReducer extends SiblingReducer { +public class MaxBucketReducer extends BucketMetricsReducer { public final static Type TYPE = new Type("max_bucket"); @@ -60,21 +51,19 @@ public class MaxBucketReducer extends SiblingReducer { } }; - private ValueFormatter formatter; - private GapPolicy gapPolicy; - public static void registerStreams() { ReducerStreams.registerStream(STREAM, TYPE.stream()); } + private List maxBucketKeys; + private double maxValue; + private MaxBucketReducer() { } protected MaxBucketReducer(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter, Map metaData) { - super(name, bucketsPaths, metaData); - this.gapPolicy = gapPolicy; - this.formatter = formatter; + super(name, bucketsPaths, gapPolicy, formatter, metaData); } @Override @@ -82,46 +71,29 @@ public class MaxBucketReducer extends SiblingReducer { return TYPE; } - public InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) { - List maxBucketKeys = new ArrayList<>(); - double maxValue = Double.NEGATIVE_INFINITY; - List bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList(); - for (Aggregation aggregation : aggregations) { - if (aggregation.getName().equals(bucketsPath.get(0))) { - bucketsPath = bucketsPath.subList(1, bucketsPath.size()); - InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; - List buckets = multiBucketsAgg.getBuckets(); - for (int i = 0; i < buckets.size(); i++) { - Bucket bucket = buckets.get(i); - Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy); - if (bucketValue != null) { - if (bucketValue > maxValue) { - maxBucketKeys.clear(); - maxBucketKeys.add(bucket.getKeyAsString()); - maxValue = bucketValue; - } else if (bucketValue.equals(maxValue)) { - maxBucketKeys.add(bucket.getKeyAsString()); - } - } - } - } + @Override + protected void preCollection() { + maxBucketKeys = new ArrayList<>(); + maxValue = Double.NEGATIVE_INFINITY; + } + + @Override + protected void collectBucketValue(String bucketKey, Double bucketValue) { + if (bucketValue > maxValue) { + maxBucketKeys.clear(); + maxBucketKeys.add(bucketKey); + maxValue = bucketValue; + } else if (bucketValue.equals(maxValue)) { + maxBucketKeys.add(bucketKey); } + } + + @Override + protected InternalAggregation buildAggregation(List reducers, Map metadata) { String[] keys = maxBucketKeys.toArray(new String[maxBucketKeys.size()]); return new InternalBucketMetricValue(name(), keys, maxValue, formatter, Collections.EMPTY_LIST, metaData()); } - @Override - public void doReadFrom(StreamInput in) throws IOException { - formatter = ValueFormatterStreams.readOptional(in); - gapPolicy = GapPolicy.readFrom(in); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - ValueFormatterStreams.writeOptional(formatter, out); - gapPolicy.writeTo(out); - } - public static class Factory extends ReducerFactory { private final ValueFormatter formatter; diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java index b792b7bbac9..6160df80732 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java @@ -19,41 +19,13 @@ package org.elasticsearch.search.aggregations.reducers.bucketmetrics.min; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; -import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsBuilder; -import java.io.IOException; -public class MinBucketBuilder extends ReducerBuilder { - - private String format; - private GapPolicy gapPolicy; +public class MinBucketBuilder extends BucketMetricsBuilder { public MinBucketBuilder(String name) { super(name, MinBucketReducer.TYPE.name()); } - public MinBucketBuilder format(String format) { - this.format = format; - return this; - } - - public MinBucketBuilder gapPolicy(GapPolicy gapPolicy) { - this.gapPolicy = gapPolicy; - return this; - } - - @Override - protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { - if (format != null) { - builder.field(MinBucketParser.FORMAT.getPreferredName(), format); - } - if (gapPolicy != null) { - builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); - } - return builder; - } - } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java index b956bdb6d79..aa3fc48a5fa 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java @@ -19,79 +19,20 @@ package org.elasticsearch.search.aggregations.reducers.bucketmetrics.min; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; -import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsParser; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; -import org.elasticsearch.search.internal.SearchContext; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -public class MinBucketParser implements Reducer.Parser { - public static final ParseField FORMAT = new ParseField("format"); +public class MinBucketParser extends BucketMetricsParser { @Override public String type() { return MinBucketReducer.TYPE.name(); } - @Override - public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { - XContentParser.Token token; - String currentFieldName = null; - String[] bucketsPaths = null; - String format = null; - GapPolicy gapPolicy = GapPolicy.SKIP; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_STRING) { - if (FORMAT.match(currentFieldName)) { - format = parser.text(); - } else if (BUCKETS_PATH.match(currentFieldName)) { - bucketsPaths = new String[] { parser.text() }; - } else if (GAP_POLICY.match(currentFieldName)) { - gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); - } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" - + currentFieldName + "].", parser.getTokenLocation()); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if (BUCKETS_PATH.match(currentFieldName)) { - List paths = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - String path = parser.text(); - paths.add(path); - } - bucketsPaths = paths.toArray(new String[paths.size()]); - } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" - + currentFieldName + "].", parser.getTokenLocation()); - } - } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "].", - parser.getTokenLocation()); - } - } - - if (bucketsPaths == null) { - throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() - + "] for derivative aggregation [" + reducerName + "]", parser.getTokenLocation()); - } - - ValueFormatter formatter = null; - if (format != null) { - formatter = ValueFormat.Patternable.Number.format(format).formatter(); - } - + protected ReducerFactory buildFactory(String reducerName, String[] bucketsPaths, GapPolicy gapPolicy, ValueFormatter formatter) { return new MinBucketReducer.Factory(reducerName, bucketsPaths, gapPolicy, formatter); - } + }; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java index 7ab257c9fb0..5e0749e245c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java @@ -21,25 +21,16 @@ package org.elasticsearch.search.aggregations.reducers.bucketmetrics.min; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.Type; -import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; -import org.elasticsearch.search.aggregations.reducers.BucketHelpers; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.ReducerStreams; -import org.elasticsearch.search.aggregations.reducers.SiblingReducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsReducer; import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; -import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; import java.io.IOException; import java.util.ArrayList; @@ -47,7 +38,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; -public class MinBucketReducer extends SiblingReducer { +public class MinBucketReducer extends BucketMetricsReducer { public final static Type TYPE = new Type("min_bucket"); @@ -60,21 +51,19 @@ public class MinBucketReducer extends SiblingReducer { } }; - private ValueFormatter formatter; - private GapPolicy gapPolicy; - public static void registerStreams() { ReducerStreams.registerStream(STREAM, TYPE.stream()); } + private List minBucketKeys; + private double minValue; + private MinBucketReducer() { } protected MinBucketReducer(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter, Map metaData) { - super(name, bucketsPaths, metaData); - this.gapPolicy = gapPolicy; - this.formatter = formatter; + super(name, bucketsPaths, gapPolicy, formatter, metaData); } @Override @@ -82,45 +71,27 @@ public class MinBucketReducer extends SiblingReducer { return TYPE; } - public InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) { - List minBucketKeys = new ArrayList<>(); - double minValue = Double.POSITIVE_INFINITY; - List bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList(); - for (Aggregation aggregation : aggregations) { - if (aggregation.getName().equals(bucketsPath.get(0))) { - bucketsPath = bucketsPath.subList(1, bucketsPath.size()); - InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; - List buckets = multiBucketsAgg.getBuckets(); - for (int i = 0; i < buckets.size(); i++) { - Bucket bucket = buckets.get(i); - Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy); - if (bucketValue != null) { - if (bucketValue < minValue) { - minBucketKeys.clear(); - minBucketKeys.add(bucket.getKeyAsString()); - minValue = bucketValue; - } else if (bucketValue.equals(minValue)) { - minBucketKeys.add(bucket.getKeyAsString()); - } - } - } - } + @Override + protected void preCollection() { + minBucketKeys = new ArrayList<>(); + minValue = Double.POSITIVE_INFINITY; + } + + @Override + protected void collectBucketValue(String bucketKey, Double bucketValue) { + if (bucketValue < minValue) { + minBucketKeys.clear(); + minBucketKeys.add(bucketKey); + minValue = bucketValue; + } else if (bucketValue.equals(minValue)) { + minBucketKeys.add(bucketKey); } + } + + protected InternalAggregation buildAggregation(java.util.List reducers, java.util.Map metadata) { String[] keys = minBucketKeys.toArray(new String[minBucketKeys.size()]); return new InternalBucketMetricValue(name(), keys, minValue, formatter, Collections.EMPTY_LIST, metaData()); - } - - @Override - public void doReadFrom(StreamInput in) throws IOException { - formatter = ValueFormatterStreams.readOptional(in); - gapPolicy = GapPolicy.readFrom(in); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - ValueFormatterStreams.writeOptional(formatter, out); - gapPolicy.writeTo(out); - } + }; public static class Factory extends ReducerFactory { diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/sum/SumBucketBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/sum/SumBucketBuilder.java new file mode 100644 index 00000000000..6c3f18f6b99 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/sum/SumBucketBuilder.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.sum; + +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsBuilder; + +public class SumBucketBuilder extends BucketMetricsBuilder { + + public SumBucketBuilder(String name) { + super(name, SumBucketReducer.TYPE.name()); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/sum/SumBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/sum/SumBucketParser.java new file mode 100644 index 00000000000..30e8093afd1 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/sum/SumBucketParser.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.sum; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsParser; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; + +public class SumBucketParser extends BucketMetricsParser { + @Override + public String type() { + return SumBucketReducer.TYPE.name(); + } + + @Override + protected ReducerFactory buildFactory(String reducerName, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter) { + return new SumBucketReducer.Factory(reducerName, bucketsPaths, gapPolicy, formatter); + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/sum/SumBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/sum/SumBucketReducer.java new file mode 100644 index 00000000000..018a87ffa2d --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/sum/SumBucketReducer.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.sum; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.BucketMetricsReducer; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class SumBucketReducer extends BucketMetricsReducer { + + public final static Type TYPE = new Type("sum_bucket"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public SumBucketReducer readResult(StreamInput in) throws IOException { + SumBucketReducer result = new SumBucketReducer(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private double sum = 0; + + private SumBucketReducer() { + } + + protected SumBucketReducer(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter, + Map metaData) { + super(name, bucketsPaths, gapPolicy, formatter, metaData); + } + + @Override + public Type type() { + return TYPE; + } + + @Override + protected void preCollection() { + sum = 0; + } + + @Override + protected void collectBucketValue(String bucketKey, Double bucketValue) { + sum += bucketValue; + } + + @Override + protected InternalAggregation buildAggregation(List reducers, Map metadata) { + return new InternalSimpleValue(name(), sum, formatter, reducers, metadata); + } + + public static class Factory extends ReducerFactory { + + private final ValueFormatter formatter; + private final GapPolicy gapPolicy; + + public Factory(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter) { + super(name, TYPE.name(), bucketsPaths); + this.gapPolicy = gapPolicy; + this.formatter = formatter; + } + + @Override + protected Reducer createInternal(Map metaData) throws IOException { + return new SumBucketReducer(name, bucketsPaths, gapPolicy, formatter, metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new IllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + } + + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/Derivative.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/Derivative.java new file mode 100644 index 00000000000..432f5e1ca8b --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/Derivative.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.derivative; + +import org.elasticsearch.search.aggregations.reducers.SimpleValue; + +public interface Derivative extends SimpleValue { + + /** + * Returns the normalized value. If no normalised factor has been specified + * this method will return {@link #value()} + * + * @return the normalized value + */ + double normalizedValue(); +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java index 210d56d4a6f..ac08264164b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java @@ -20,16 +20,17 @@ package org.elasticsearch.search.aggregations.reducers.derivative; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import java.io.IOException; -import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; - public class DerivativeBuilder extends ReducerBuilder { private String format; private GapPolicy gapPolicy; + private String unit; public DerivativeBuilder(String name) { super(name, DerivativeReducer.TYPE.name()); @@ -45,6 +46,21 @@ public class DerivativeBuilder extends ReducerBuilder { return this; } + public DerivativeBuilder unit(String unit) { + this.unit = unit; + return this; + } + + /** + * Sets the unit using the provided {@link DateHistogramInterval}. This + * method is only useful when calculating the derivative using a + * `date_histogram` + */ + public DerivativeBuilder unit(DateHistogramInterval unit) { + this.unit = unit.toString(); + return this; + } + @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { if (format != null) { @@ -53,6 +69,9 @@ public class DerivativeBuilder extends ReducerBuilder { if (gapPolicy != null) { builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); } + if (unit != null) { + builder.field(DerivativeParser.UNIT.getPreferredName(), unit); + } return builder; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java index a049a285b0e..0ccd399427c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -19,8 +19,12 @@ package org.elasticsearch.search.aggregations.reducers.derivative; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramParser; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; @@ -34,6 +38,10 @@ import java.util.List; public class DerivativeParser implements Reducer.Parser { + public static final ParseField FORMAT = new ParseField("format"); + public static final ParseField GAP_POLICY = new ParseField("gap_policy"); + public static final ParseField UNIT = new ParseField("unit"); + @Override public String type() { return DerivativeReducer.TYPE.name(); @@ -45,6 +53,7 @@ public class DerivativeParser implements Reducer.Parser { String currentFieldName = null; String[] bucketsPaths = null; String format = null; + String units = null; GapPolicy gapPolicy = GapPolicy.SKIP; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -57,6 +66,8 @@ public class DerivativeParser implements Reducer.Parser { bucketsPaths = new String[] { parser.text() }; } else if (GAP_POLICY.match(currentFieldName)) { gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); + } else if (UNIT.match(currentFieldName)) { + units = parser.text(); } else { throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); @@ -89,7 +100,20 @@ public class DerivativeParser implements Reducer.Parser { formatter = ValueFormat.Patternable.Number.format(format).formatter(); } - return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy); + Long xAxisUnits = null; + if (units != null) { + DateTimeUnit dateTimeUnit = DateHistogramParser.DATE_FIELD_UNITS.get(units); + if (dateTimeUnit != null) { + xAxisUnits = dateTimeUnit.field().getDurationField().getUnitMillis(); + } else { + TimeValue timeValue = TimeValue.parseTimeValue(units, null); + if (timeValue != null) { + xAxisUnits = timeValue.getMillis(); + } + } + } + + return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy, xAxisUnits); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java index 9d3397e8746..cb99200660e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -24,6 +24,7 @@ import com.google.common.collect.Lists; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; @@ -32,12 +33,12 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; +import org.joda.time.DateTime; import java.io.IOException; import java.util.ArrayList; @@ -65,15 +66,17 @@ public class DerivativeReducer extends Reducer { private ValueFormatter formatter; private GapPolicy gapPolicy; + private Double xAxisUnits; public DerivativeReducer() { } - public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, + public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, Long xAxisUnits, Map metadata) { super(name, bucketsPaths, metadata); this.formatter = formatter; this.gapPolicy = gapPolicy; + this.xAxisUnits = xAxisUnits == null ? null : (double) xAxisUnits; } @Override @@ -88,51 +91,83 @@ public class DerivativeReducer extends Reducer { InternalHistogram.Factory factory = histo.getFactory(); List newBuckets = new ArrayList<>(); + Long lastBucketKey = null; Double lastBucketValue = null; for (InternalHistogram.Bucket bucket : buckets) { + Long thisBucketKey = resolveBucketKeyAsLong(bucket); Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); if (lastBucketValue != null) { - double diff = thisBucketValue - lastBucketValue; - - List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION)); - aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList(), metaData())); + double gradient = thisBucketValue - lastBucketValue; + double xDiff = -1; + if (xAxisUnits != null) { + xDiff = (thisBucketKey - lastBucketKey) / xAxisUnits; + } + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), + AGGREGATION_TRANFORM_FUNCTION)); + aggs.add(new InternalDerivative(name(), gradient, xDiff, formatter, new ArrayList(), metaData())); InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations( aggs), bucket.getKeyed(), bucket.getFormatter()); newBuckets.add(newBucket); } else { newBuckets.add(bucket); } + lastBucketKey = thisBucketKey; lastBucketValue = thisBucketValue; } return factory.create(newBuckets, histo); } + private Long resolveBucketKeyAsLong(InternalHistogram.Bucket bucket) { + Object key = bucket.getKey(); + if (key instanceof DateTime) { + return ((DateTime) key).getMillis(); + } else if (key instanceof Number) { + return ((Number) key).longValue(); + } else { + throw new AggregationExecutionException("Bucket keys must be either a Number or a DateTime for aggregation " + name() + + ". Found bucket with key " + key); + } + } + @Override public void doReadFrom(StreamInput in) throws IOException { formatter = ValueFormatterStreams.readOptional(in); gapPolicy = GapPolicy.readFrom(in); + if (in.readBoolean()) { + xAxisUnits = in.readDouble(); + } else { + xAxisUnits = null; + + } } @Override public void doWriteTo(StreamOutput out) throws IOException { ValueFormatterStreams.writeOptional(formatter, out); gapPolicy.writeTo(out); + boolean hasXAxisUnitsValue = xAxisUnits != null; + out.writeBoolean(hasXAxisUnitsValue); + if (hasXAxisUnitsValue) { + out.writeDouble(xAxisUnits); + } } public static class Factory extends ReducerFactory { private final ValueFormatter formatter; private GapPolicy gapPolicy; + private Long xAxisUnits; - public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy) { + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, Long xAxisUnits) { super(name, TYPE.name(), bucketsPaths); this.formatter = formatter; this.gapPolicy = gapPolicy; + this.xAxisUnits = xAxisUnits; } @Override protected Reducer createInternal(Map metaData) throws IOException { - return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, metaData); + return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, xAxisUnits, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/InternalDerivative.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/InternalDerivative.java new file mode 100644 index 00000000000..5542064f5d3 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/InternalDerivative.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.derivative; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AggregationStreams; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class InternalDerivative extends InternalSimpleValue implements Derivative { + + public final static Type TYPE = new Type("derivative"); + + public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() { + @Override + public InternalDerivative readResult(StreamInput in) throws IOException { + InternalDerivative result = new InternalDerivative(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + AggregationStreams.registerStream(STREAM, TYPE.stream()); + } + + private double normalizationFactor; + + InternalDerivative() { + } + + public InternalDerivative(String name, double value, double normalizationFactor, ValueFormatter formatter, List reducers, + Map metaData) { + super(name, value, formatter, reducers, metaData); + this.normalizationFactor = normalizationFactor; + } + + @Override + public double normalizedValue() { + return normalizationFactor > 0 ? (value() / normalizationFactor) : value(); + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public Object getProperty(List path) { + if (path.isEmpty()) { + return this; + } else if (path.size() == 1 && "value".equals(path.get(0))) { + return value(); + } else if (path.size() == 1 && "normalized_value".equals(path.get(0))) { + return normalizedValue(); + } else { + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); + } + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + super.doWriteTo(out); + out.writeDouble(normalizationFactor); + } + + @Override + protected void doReadFrom(StreamInput in) throws IOException { + super.doReadFrom(in); + normalizationFactor = in.readDouble(); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + super.doXContentBody(builder, params); + + if (normalizationFactor > 0) { + boolean hasValue = !(Double.isInfinite(normalizedValue()) || Double.isNaN(normalizedValue())); + builder.field("normalized_value", hasValue ? normalizedValue() : null); + if (hasValue && valueFormatter != null) { + builder.field("normalized_value_as_string", valueFormatter.format(normalizedValue())); + } + } + return builder; + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/EwmaModel.java similarity index 92% rename from src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/EwmaModel.java index f17ba68f498..8d563062813 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/EwmaModel.java @@ -33,9 +33,9 @@ import java.util.Map; /** * Calculate a exponentially weighted moving average */ -public class SingleExpModel extends MovAvgModel { +public class EwmaModel extends MovAvgModel { - protected static final ParseField NAME_FIELD = new ParseField("single_exp"); + protected static final ParseField NAME_FIELD = new ParseField("ewma"); /** * Controls smoothing of data. Alpha = 1 retains no memory of past values @@ -44,7 +44,7 @@ public class SingleExpModel extends MovAvgModel { */ private double alpha; - public SingleExpModel(double alpha) { + public EwmaModel(double alpha) { this.alpha = alpha; } @@ -68,7 +68,7 @@ public class SingleExpModel extends MovAvgModel { public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { @Override public MovAvgModel readResult(StreamInput in) throws IOException { - return new SingleExpModel(in.readDouble()); + return new EwmaModel(in.readDouble()); } @Override @@ -98,11 +98,11 @@ public class SingleExpModel extends MovAvgModel { alpha = 0.5; } - return new SingleExpModel(alpha); + return new EwmaModel(alpha); } } - public static class SingleExpModelBuilder implements MovAvgModelBuilder { + public static class EWMAModelBuilder implements MovAvgModelBuilder { private double alpha = 0.5; @@ -115,7 +115,7 @@ public class SingleExpModel extends MovAvgModel { * * @return The builder to continue chaining */ - public SingleExpModelBuilder alpha(double alpha) { + public EWMAModelBuilder alpha(double alpha) { this.alpha = alpha; return this; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/HoltLinearModel.java similarity index 90% rename from src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/HoltLinearModel.java index 7d32989cda1..e386de73052 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/HoltLinearModel.java @@ -32,9 +32,9 @@ import java.util.*; /** * Calculate a doubly exponential weighted moving average */ -public class DoubleExpModel extends MovAvgModel { +public class HoltLinearModel extends MovAvgModel { - protected static final ParseField NAME_FIELD = new ParseField("double_exp"); + protected static final ParseField NAME_FIELD = new ParseField("holt"); /** * Controls smoothing of data. Alpha = 1 retains no memory of past values @@ -48,15 +48,15 @@ public class DoubleExpModel extends MovAvgModel { */ private double beta; - public DoubleExpModel(double alpha, double beta) { + public HoltLinearModel(double alpha, double beta) { this.alpha = alpha; this.beta = beta; } /** * Predicts the next `n` values in the series, using the smoothing model to generate new values. - * Unlike the other moving averages, double-exp has forecasting/prediction built into the algorithm. - * Prediction is more than simply adding the next prediction to the window and repeating. Double-exp + * Unlike the other moving averages, Holt-Linear has forecasting/prediction built into the algorithm. + * Prediction is more than simply adding the next prediction to the window and repeating. Holt-Linear * will extrapolate into the future by applying the trend information to the smoothed data. * * @param values Collection of numerics to movingAvg, usually windowed @@ -75,7 +75,7 @@ public class DoubleExpModel extends MovAvgModel { } /** - * Calculate a doubly exponential weighted moving average + * Calculate a Holt-Linear (doubly exponential weighted) moving average * * @param values Collection of values to calculate avg for * @param numForecasts number of forecasts into the future to return @@ -99,8 +99,6 @@ public class DoubleExpModel extends MovAvgModel { int counter = 0; - //TODO bail if too few values - T last; for (T v : values) { last = v; @@ -128,7 +126,7 @@ public class DoubleExpModel extends MovAvgModel { public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { @Override public MovAvgModel readResult(StreamInput in) throws IOException { - return new DoubleExpModel(in.readDouble(), in.readDouble()); + return new HoltLinearModel(in.readDouble(), in.readDouble()); } @Override @@ -165,11 +163,11 @@ public class DoubleExpModel extends MovAvgModel { beta = 0.5; } - return new DoubleExpModel(alpha, beta); + return new HoltLinearModel(alpha, beta); } } - public static class DoubleExpModelBuilder implements MovAvgModelBuilder { + public static class HoltLinearModelBuilder implements MovAvgModelBuilder { private double alpha = 0.5; private double beta = 0.5; @@ -183,7 +181,7 @@ public class DoubleExpModel extends MovAvgModel { * * @return The builder to continue chaining */ - public DoubleExpModelBuilder alpha(double alpha) { + public HoltLinearModelBuilder alpha(double alpha) { this.alpha = alpha; return this; } @@ -195,7 +193,7 @@ public class DoubleExpModel extends MovAvgModel { * * @return The builder to continue chaining */ - public DoubleExpModelBuilder beta(double beta) { + public HoltLinearModelBuilder beta(double beta) { this.beta = beta; return this; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java index 71ccbcb31b0..a144459ab5d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java @@ -36,8 +36,8 @@ public class MovAvgModelModule extends AbstractModule { public MovAvgModelModule() { registerParser(SimpleModel.SimpleModelParser.class); registerParser(LinearModel.LinearModelParser.class); - registerParser(SingleExpModel.SingleExpModelParser.class); - registerParser(DoubleExpModel.DoubleExpModelParser.class); + registerParser(EwmaModel.SingleExpModelParser.class); + registerParser(HoltLinearModel.DoubleExpModelParser.class); } public void registerParser(Class parser) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java index bc085f6241a..a09c8265654 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java @@ -34,8 +34,8 @@ public class TransportMovAvgModelModule extends AbstractModule { public TransportMovAvgModelModule() { registerStream(SimpleModel.STREAM); registerStream(LinearModel.STREAM); - registerStream(SingleExpModel.STREAM); - registerStream(DoubleExpModel.STREAM); + registerStream(EwmaModel.STREAM); + registerStream(HoltLinearModel.STREAM); } public void registerStream(MovAvgModelStreams.Stream stream) { diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java index 7ae314e4756..ce330e80d9e 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.lookup; import com.google.common.collect.Maps; + import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -26,6 +27,8 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.apache.lucene.index.LeafReaderContext; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collection; import java.util.Map; @@ -73,11 +76,18 @@ public class LeafDocLookup implements Map { String fieldName = key.toString(); ScriptDocValues scriptValues = localCacheFieldData.get(fieldName); if (scriptValues == null) { - FieldMapper mapper = mapperService.smartNameFieldMapper(fieldName, types); + final FieldMapper mapper = mapperService.smartNameFieldMapper(fieldName, types); if (mapper == null) { throw new IllegalArgumentException("No field found for [" + fieldName + "] in mapping with types " + Arrays.toString(types) + ""); } - scriptValues = fieldDataService.getForField(mapper).load(reader).getScriptValues(); + // load fielddata on behalf of the script: otherwise it would need additional permissions + // to deal with pagedbytes/ramusagestimator/etc + scriptValues = AccessController.doPrivileged(new PrivilegedAction() { + @Override + public ScriptDocValues run() { + return fieldDataService.getForField(mapper).load(reader).getScriptValues(); + } + }); localCacheFieldData.put(fieldName, scriptValues); } scriptValues.setNextDocId(docId); diff --git a/src/main/resources/org/elasticsearch/bootstrap/security.policy b/src/main/resources/org/elasticsearch/bootstrap/security.policy index e6500109dc7..3f11557284a 100644 --- a/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -68,9 +68,6 @@ grant { // needed by ImmutableSettings permission java.lang.RuntimePermission "getenv.*"; - // needed by PluginManager - permission java.lang.RuntimePermission "setFactory"; - // needed by LuceneTestCase/TestRuleLimitSysouts permission java.lang.RuntimePermission "setIO"; diff --git a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java index 2fa6da48ce6..37f61da4679 100644 --- a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java +++ b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action; +import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; @@ -52,7 +53,6 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.bulk.BulkAction; @@ -71,7 +71,6 @@ import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.mlt.MoreLikeThisRequest; import org.elasticsearch.action.percolate.MultiPercolateAction; import org.elasticsearch.action.percolate.MultiPercolateRequest; import org.elasticsearch.action.percolate.PercolateAction; @@ -89,20 +88,15 @@ import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; -import org.elasticsearch.cluster.settings.DynamicSettings; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; -import org.apache.lucene.util.LuceneTestCase.Slow; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -730,34 +724,6 @@ public class IndicesRequestTests extends ElasticsearchIntegrationTest { assertSameIndices(searchRequest, SearchServiceTransportAction.SCAN_ACTION_NAME); } - @Test - public void testMoreLikeThis() { - interceptTransportActions(GetAction.NAME + "[s]", SearchServiceTransportAction.QUERY_ACTION_NAME, - SearchServiceTransportAction.FETCH_ID_ACTION_NAME, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); - - String[] randomIndicesOrAliases = randomIndicesOrAliases(); - for (int i = 0; i < randomIndicesOrAliases.length; i++) { - client().prepareIndex(randomIndicesOrAliases[i], "type", "id-" + i).setSource("field", "value").get(); - } - refresh(); - - assertAcked(prepareCreate("test-get").addAlias(new Alias("alias-get"))); - client().prepareIndex("test-get", "type", "1").setSource("field","value").get(); - String indexGet = randomBoolean() ? "test-get" : "alias-get"; - - MoreLikeThisRequest moreLikeThisRequest = new MoreLikeThisRequest(indexGet).type("type").id("1") - .searchIndices(randomIndicesOrAliases()); - internalCluster().clientNodeClient().moreLikeThis(moreLikeThisRequest).actionGet(); - - clearInterceptedActions(); - //get might end up being executed locally, only optionally over the transport - assertSameIndicesOptionalRequests(new String[]{indexGet}, GetAction.NAME + "[s]"); - //query might end up being executed locally as well, only optionally over the transport - assertSameIndicesOptionalRequests(moreLikeThisRequest.searchIndices(), SearchServiceTransportAction.QUERY_ACTION_NAME); - //free context messages are not necessarily sent through the transport, but if they are, check their indices - assertSameIndicesOptionalRequests(moreLikeThisRequest.searchIndices(), SearchServiceTransportAction.FETCH_ID_ACTION_NAME, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME); - } - private static void assertSameIndices(IndicesRequest originalRequest, String... actions) { assertSameIndices(originalRequest, false, actions); } diff --git a/src/test/java/org/elasticsearch/action/mlt/MoreLikeThisRequestTests.java b/src/test/java/org/elasticsearch/action/mlt/MoreLikeThisRequestTests.java deleted file mode 100644 index 77400ff0817..00000000000 --- a/src/test/java/org/elasticsearch/action/mlt/MoreLikeThisRequestTests.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.mlt; - -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.BytesStreamInput; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.test.ElasticsearchTestCase; -import org.junit.Test; - -import java.io.IOException; - -import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.CoreMatchers.*; - -public class MoreLikeThisRequestTests extends ElasticsearchTestCase { - - @Test - public void testSerialization() throws IOException { - - MoreLikeThisRequest mltRequest = new MoreLikeThisRequest(randomAsciiOfLength(randomIntBetween(1, 20))) - .id(randomAsciiOfLength(randomIntBetween(1, 20))).type(randomAsciiOfLength(randomIntBetween(1, 20))); - - if (randomBoolean()) { - mltRequest.boostTerms(randomFloat()); - } - if (randomBoolean()) { - mltRequest.maxDocFreq(randomInt()); - } - if (randomBoolean()) { - mltRequest.minDocFreq(randomInt()); - } - if (randomBoolean()) { - mltRequest.maxQueryTerms(randomInt()); - } - if (randomBoolean()) { - mltRequest.minWordLength(randomInt()); - } - if (randomBoolean()) { - mltRequest.maxWordLength(randomInt()); - } - if (randomBoolean()) { - mltRequest.percentTermsToMatch(randomFloat()); - } - if (randomBoolean()) { - mltRequest.searchTypes(randomStrings(5)); - } - if (randomBoolean()) { - mltRequest.searchType(randomFrom(SearchType.values())); - } - if (randomBoolean()) { - mltRequest.searchIndices(randomStrings(5)); - } - if (randomBoolean()) { - mltRequest.routing(randomAsciiOfLength(randomIntBetween(1, 20))); - } - if (randomBoolean()) { - mltRequest.searchFrom(randomInt()); - } - if (randomBoolean()) { - mltRequest.searchSize(randomInt()); - } - if (randomBoolean()) { - mltRequest.searchScroll(new Scroll(TimeValue.timeValueNanos(randomLong()))); - } - if (randomBoolean()) { - mltRequest.searchSource(SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("term", "value"))); - } - if(randomBoolean()) { - mltRequest.include(randomBoolean()); - } - if (randomBoolean()) { - mltRequest.stopWords(randomStrings(10)); - } - if (randomBoolean()) { - mltRequest.fields(randomStrings(5)); - } - - BytesStreamOutput out = new BytesStreamOutput(); - out.setVersion(randomVersion(random())); - mltRequest.writeTo(out); - - BytesStreamInput in = new BytesStreamInput(out.bytes()); - in.setVersion(out.getVersion()); - MoreLikeThisRequest mltRequest2 = new MoreLikeThisRequest(); - mltRequest2.readFrom(in); - - assertThat(mltRequest2.index(), equalTo(mltRequest.index())); - assertThat(mltRequest2.type(), equalTo(mltRequest.type())); - assertThat(mltRequest2.id(), equalTo(mltRequest.id())); - assertThat(mltRequest2.boostTerms(), equalTo(mltRequest.boostTerms())); - assertThat(mltRequest2.maxDocFreq(), equalTo(mltRequest.maxDocFreq())); - assertThat(mltRequest2.minDocFreq(), equalTo(mltRequest.minDocFreq())); - assertThat(mltRequest2.maxQueryTerms(), equalTo(mltRequest.maxQueryTerms())); - assertThat(mltRequest2.minWordLength(), equalTo(mltRequest.minWordLength())); - assertThat(mltRequest2.maxWordLength(), equalTo(mltRequest.maxWordLength())); - assertThat(mltRequest2.percentTermsToMatch(), equalTo(mltRequest.percentTermsToMatch())); - assertThat(mltRequest2.searchTypes(), equalTo(mltRequest.searchTypes())); - assertThat(mltRequest2.searchType(), equalTo(mltRequest.searchType())); - assertThat(mltRequest2.searchIndices(), equalTo(mltRequest.searchIndices())); - assertThat(mltRequest2.routing(), equalTo(mltRequest.routing())); - assertThat(mltRequest2.searchFrom(), equalTo(mltRequest.searchFrom())); - assertThat(mltRequest2.searchSize(), equalTo(mltRequest.searchSize())); - if (mltRequest.searchScroll() == null) { - assertThat(mltRequest2.searchScroll(), nullValue()); - } else { - assertThat(mltRequest2.searchFrom(), notNullValue()); - assertThat(mltRequest2.searchScroll().keepAlive(), equalTo(mltRequest.searchScroll().keepAlive())); - } - - if (mltRequest.searchSource() == null) { - assertThat(mltRequest2.searchSource().length(), equalTo(0)); - } else { - assertThat(mltRequest2.searchSource().length(), equalTo(mltRequest.searchSource().length())); - } - - if (mltRequest.stopWords() != null && mltRequest.stopWords().length > 0) { - assertThat(mltRequest2.stopWords(), equalTo(mltRequest.stopWords())); - } else { - assertThat(mltRequest2.stopWords(), nullValue()); - } - if (mltRequest.fields() == null) { - assertThat(mltRequest2.fields(), equalTo(Strings.EMPTY_ARRAY)); - } else { - assertThat(mltRequest2.fields(), equalTo(mltRequest.fields())); - } - assertThat(mltRequest2.include(), equalTo(mltRequest.include())); - } - - private static String[] randomStrings(int max) { - int count = randomIntBetween(0, max); - String[] strings = new String[count]; - for (int i = 0; i < strings.length; i++) { - strings[i] = randomAsciiOfLength(randomIntBetween(1, 20)); - } - return strings; - } -} diff --git a/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java b/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java index 7bd0cf373a5..63780a5f5ed 100644 --- a/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java +++ b/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support.replication; import com.google.common.base.Predicate; + import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -41,7 +42,13 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.cluster.routing.ImmutableShardRouting; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; @@ -77,8 +84,15 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import static org.elasticsearch.cluster.metadata.IndexMetaData.*; -import static org.hamcrest.Matchers.*; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class ShardReplicationOperationTests extends ElasticsearchTestCase { @@ -687,7 +701,7 @@ public class ShardReplicationOperationTests extends ElasticsearchTestCase { ClusterService clusterService, ThreadPool threadPool) { super(settings, actionName, transportService, clusterService, null, threadPool, - new ShardStateAction(settings, clusterService, transportService, null, null), + new ShardStateAction(settings, clusterService, transportService, null, null), null, new ActionFilters(new HashSet()), Request.class, Request.class, ThreadPool.Names.SAME); } diff --git a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java index 33433f1494c..c54f763f34f 100644 --- a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java +++ b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java @@ -70,6 +70,7 @@ public class SecurityTests extends ElasticsearchTestCase { settingsBuilder.put("path.plugins", path.resolve("plugins").toString()); settingsBuilder.putArray("path.data", path.resolve("data1").toString(), path.resolve("data2").toString()); settingsBuilder.put("path.logs", path.resolve("logs").toString()); + settingsBuilder.put("pidfile", path.resolve("test.pid").toString()); Settings settings = settingsBuilder.build(); Environment environment = new Environment(settings); @@ -105,5 +106,7 @@ public class SecurityTests extends ElasticsearchTestCase { assertTrue(permissions.implies(new FilePermission(fakeTmpDir.toString(), "read,readlink,write,delete"))); // double check we overwrote java.io.tmpdir correctly for the test assertFalse(permissions.implies(new FilePermission(realTmpDir.toString(), "read"))); + // PID file: r/w + assertTrue(permissions.implies(new FilePermission(environment.pidFile().toString(), "read,readlink,write,delete"))); } } diff --git a/src/test/java/org/elasticsearch/bwcompat/ScriptTransformBackwardsCompatibilityTests.java b/src/test/java/org/elasticsearch/bwcompat/ScriptTransformBackwardsCompatibilityTests.java new file mode 100644 index 00000000000..7d8a26812aa --- /dev/null +++ b/src/test/java/org/elasticsearch/bwcompat/ScriptTransformBackwardsCompatibilityTests.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bwcompat; + +import com.google.common.collect.ImmutableMap; + +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest; +import org.junit.Test; + +import java.io.IOException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; + +public class ScriptTransformBackwardsCompatibilityTests extends ElasticsearchBackwardsCompatIntegrationTest { + + @Test + public void testTransformWithNoLangSpecified() throws Exception { + XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); + builder.field("transform"); + if (getRandom().nextBoolean()) { + // Single transform + builder.startObject(); + buildTransformScript(builder); + builder.endObject(); + } else { + // Multiple transforms + int total = between(1, 10); + int actual = between(0, total - 1); + builder.startArray(); + for (int s = 0; s < total; s++) { + builder.startObject(); + if (s == actual) { + buildTransformScript(builder); + } else { + builder.field("script", "true"); + } + builder.endObject(); + } + builder.endArray(); + } + assertAcked(client().admin().indices().prepareCreate("test").addMapping("test", builder)); + + indexRandom(getRandom().nextBoolean(), client().prepareIndex("test", "test", "notitle").setSource("content", "findme"), client() + .prepareIndex("test", "test", "badtitle").setSource("content", "findme", "title", "cat"), + client().prepareIndex("test", "test", "righttitle").setSource("content", "findme", "title", "table")); + GetResponse response = client().prepareGet("test", "test", "righttitle").get(); + assertExists(response); + assertThat(response.getSource(), both(hasEntry("content", (Object) "findme")).and(not(hasKey("destination")))); + + response = client().prepareGet("test", "test", "righttitle").setTransformSource(true).get(); + assertExists(response); + assertThat(response.getSource(), both(hasEntry("destination", (Object) "findme")).and(not(hasKey("content")))); + } + + private void buildTransformScript(XContentBuilder builder) throws IOException { + String script = "if (ctx._source['title']?.startsWith('t')) { ctx._source['destination'] = ctx._source[sourceField] }; ctx._source.remove(sourceField);"; + if (getRandom().nextBoolean()) { + script = script.replace("sourceField", "'content'"); + } else { + builder.field("params", ImmutableMap.of("sourceField", "content")); + } + builder.field("script", script); + } +} diff --git a/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java b/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java index 5192e6f9e28..5d418d185d2 100644 --- a/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java +++ b/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java @@ -105,6 +105,7 @@ public class IndexCacheableQueryTests extends ElasticsearchTestCase { IndexReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); + reader = searcher.getIndexReader(); // reader might be wrapped searcher.setQueryCache(cache); searcher.setQueryCachingPolicy(policy); @@ -118,8 +119,9 @@ public class IndexCacheableQueryTests extends ElasticsearchTestCase { writer.addDocument(new Document()); - DirectoryReader reader2 = writer.getReader(); + IndexReader reader2 = writer.getReader(); searcher = newSearcher(reader2); + reader2 = searcher.getIndexReader(); // reader might be wrapped searcher.setQueryCache(cache); searcher.setQueryCachingPolicy(policy); diff --git a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java index 816409675af..18d77305a40 100644 --- a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java +++ b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java @@ -203,7 +203,7 @@ public class LuceneTest extends ElasticsearchTestCase { assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits, 0); for (String file : dir.listAll()) { - assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2") || file.startsWith("extra")); + assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2")); } open.close(); dir.close(); diff --git a/src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java b/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java similarity index 95% rename from src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java rename to src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java index 743fcf84aab..d18d7ff3498 100644 --- a/src/test/java/org/elasticsearch/common/lucene/search/MoreLikeThisQueryTests.java +++ b/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.common.lucene.search; +package org.elasticsearch.common.lucene.search.morelikethis; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -30,10 +30,10 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.MoreLikeThisQuery; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; /** diff --git a/src/test/java/org/elasticsearch/mlt/XMoreLikeThisTests.java b/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java similarity index 98% rename from src/test/java/org/elasticsearch/mlt/XMoreLikeThisTests.java rename to src/test/java/org/elasticsearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java index aaf2105b494..cafa2ef9eee 100644 --- a/src/test/java/org/elasticsearch/mlt/XMoreLikeThisTests.java +++ b/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.mlt; +package org.elasticsearch.common.lucene.search.morelikethis; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; diff --git a/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java b/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java index dd4ed24af5a..074d226f4f1 100644 --- a/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java +++ b/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.count.simple; +import org.apache.lucene.util.Constants; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -30,6 +31,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutionException; +import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -142,6 +144,7 @@ public class SimpleCountTests extends ElasticsearchIntegrationTest { @Test public void localDependentDateTests() throws Exception { + assumeFalse("Locals are buggy on JDK9EA", Constants.JRE_IS_MINIMUM_JAVA9 && systemPropertyAsBoolean("tests.security.manager", false)); assertAcked(prepareCreate("test") .addMapping("type1", jsonBuilder().startObject() diff --git a/src/test/java/org/elasticsearch/document/BulkTests.java b/src/test/java/org/elasticsearch/document/BulkTests.java index 380828372bb..2a8e3609302 100644 --- a/src/test/java/org/elasticsearch/document/BulkTests.java +++ b/src/test/java/org/elasticsearch/document/BulkTests.java @@ -344,7 +344,7 @@ public class BulkTests extends ElasticsearchIntegrationTest { ); } response = builder.execute().actionGet(); - assertThat(response.hasFailures(), equalTo(false)); + assertThat(response.buildFailureMessage(), response.hasFailures(), equalTo(false)); assertThat(response.getItems().length, equalTo(numDocs)); for (int i = 0; i < numDocs; i++) { assertThat(response.getItems()[i].getItemId(), equalTo(i)); diff --git a/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java b/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java index 9d3c1943887..0382693921f 100644 --- a/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java +++ b/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java @@ -314,7 +314,7 @@ public class RecoveryFromGatewayTests extends ElasticsearchIntegrationTest { logger.info("--> add some metadata, additional type and template"); client.admin().indices().preparePutMapping("test").setType("type2") - .setSource(jsonBuilder().startObject().startObject("type2").startObject("_source").field("enabled", false).endObject().endObject().endObject()) + .setSource(jsonBuilder().startObject().startObject("type2").endObject().endObject()) .execute().actionGet(); client.admin().indices().preparePutTemplate("template_1") .setTemplate("te*") diff --git a/src/test/java/org/elasticsearch/get/GetActionTests.java b/src/test/java/org/elasticsearch/get/GetActionTests.java index 94b83d94424..578c043b10b 100644 --- a/src/test/java/org/elasticsearch/get/GetActionTests.java +++ b/src/test/java/org/elasticsearch/get/GetActionTests.java @@ -228,8 +228,9 @@ public class GetActionTests extends ElasticsearchIntegrationTest { } @Test - public void realtimeGetWithCompress() throws Exception { - assertAcked(prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)) + public void realtimeGetWithCompressBackcompat() throws Exception { + assertAcked(prepareCreate("test") + .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1).put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)) .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("_source").field("compress", true).endObject().endObject().endObject())); ensureGreen(); @@ -249,9 +250,8 @@ public class GetActionTests extends ElasticsearchIntegrationTest { @Test public void getFieldsWithDifferentTypes() throws Exception { assertAcked(prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)) - .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("_source").field("enabled", true).endObject().endObject().endObject()) + .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject()) .addMapping("type2", jsonBuilder().startObject().startObject("type2") - .startObject("_source").field("enabled", false).endObject() .startObject("properties") .startObject("str").field("type", "string").field("store", "yes").endObject() .startObject("strs").field("type", "string").field("store", "yes").endObject() @@ -339,7 +339,6 @@ public class GetActionTests extends ElasticsearchIntegrationTest { .startObject("properties") .startObject("field").field("type", "string").field("store", "yes").endObject() .endObject() - .startObject("_source").field("enabled", false).endObject() .endObject().endObject().string(); assertAcked(prepareCreate("test") .addMapping("type1", mapping1) @@ -397,7 +396,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { } @Test - public void testThatGetFromTranslogShouldWorkWithExclude() throws Exception { + public void testThatGetFromTranslogShouldWorkWithExcludeBackcompat() throws Exception { String index = "test"; String type = "type1"; @@ -431,7 +430,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { } @Test - public void testThatGetFromTranslogShouldWorkWithInclude() throws Exception { + public void testThatGetFromTranslogShouldWorkWithIncludeBackcompat() throws Exception { String index = "test"; String type = "type1"; @@ -466,7 +465,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { @SuppressWarnings("unchecked") @Test - public void testThatGetFromTranslogShouldWorkWithIncludeExcludeAndFields() throws Exception { + public void testThatGetFromTranslogShouldWorkWithIncludeExcludeAndFieldsBackcompat() throws Exception { String index = "test"; String type = "type1"; @@ -925,9 +924,6 @@ public class GetActionTests extends ElasticsearchIntegrationTest { " },\n" + " \"mappings\": {\n" + " \"doc\": {\n" + - " \"_source\": {\n" + - " \"enabled\": \"" + randomBoolean() + "\"\n" + - " },\n" + " \"properties\": {\n" + " \"suggest\": {\n" + " \"type\": \"completion\"\n" + @@ -970,9 +966,6 @@ public class GetActionTests extends ElasticsearchIntegrationTest { " \"mappings\": {\n" + " \"parentdoc\": {},\n" + " \"doc\": {\n" + - " \"_source\": {\n" + - " \"enabled\": " + randomBoolean() + "\n" + - " },\n" + " \"_parent\": {\n" + " \"type\": \"parentdoc\"\n" + " },\n" + @@ -1002,7 +995,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { } @Test - public void testUngeneratedFieldsPartOfSourceUnstoredSourceDisabled() throws IOException { + public void testUngeneratedFieldsPartOfSourceUnstoredSourceDisabledBackcompat() throws IOException { indexSingleDocumentWithUngeneratedFieldsThatArePartOf_source(false, false); String[] fieldsList = {}; // before refresh - document is only in translog @@ -1016,7 +1009,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { } @Test - public void testUngeneratedFieldsPartOfSourceEitherStoredOrSourceEnabled() throws IOException { + public void testUngeneratedFieldsPartOfSourceEitherStoredOrSourceEnabledBackcompat() throws IOException { boolean stored = randomBoolean(); boolean sourceEnabled = true; if (stored) { @@ -1039,7 +1032,8 @@ public class GetActionTests extends ElasticsearchIntegrationTest { String createIndexSource = "{\n" + " \"settings\": {\n" + " \"index.translog.disable_flush\": true,\n" + - " \"refresh_interval\": \"-1\"\n" + + " \"refresh_interval\": \"-1\",\n" + + " \"" + IndexMetaData.SETTING_VERSION_CREATED + "\": " + Version.V_1_4_2.id + "\n" + " },\n" + " \"mappings\": {\n" + " \"doc\": {\n" + @@ -1161,7 +1155,8 @@ public class GetActionTests extends ElasticsearchIntegrationTest { String createIndexSource = "{\n" + " \"settings\": {\n" + " \"index.translog.disable_flush\": true,\n" + - " \"refresh_interval\": \"-1\"\n" + + " \"refresh_interval\": \"-1\",\n" + + " \"" + IndexMetaData.SETTING_VERSION_CREATED + "\": " + Version.V_1_4_2.id + "\n" + " },\n" + " \"mappings\": {\n" + " \"doc\": {\n" + @@ -1215,7 +1210,8 @@ public class GetActionTests extends ElasticsearchIntegrationTest { String createIndexSource = "{\n" + " \"settings\": {\n" + " \"index.translog.disable_flush\": true,\n" + - " \"refresh_interval\": \"-1\"\n" + + " \"refresh_interval\": \"-1\",\n" + + " \"" + IndexMetaData.SETTING_VERSION_CREATED + "\": " + Version.V_1_4_2.id + "\n" + " },\n" + " \"mappings\": {\n" + " \"doc\": {\n" + diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index c63e4b256ae..5c0b3e6ab30 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -79,7 +79,6 @@ import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryUtils; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -216,15 +215,15 @@ public class InternalEngineTests extends ElasticsearchTestCase { return new Store(shardId, EMPTY_SETTINGS, directoryService, new DummyShardLock(shardId)); } - protected FsTranslog createTranslog() throws IOException { + protected Translog createTranslog() throws IOException { return createTranslog(primaryTranslogDir); } - protected FsTranslog createTranslog(Path translogPath) throws IOException { - return new FsTranslog(shardId, EMPTY_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, translogPath); + protected Translog createTranslog(Path translogPath) throws IOException { + return new Translog(shardId, EMPTY_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, translogPath); } - protected FsTranslog createTranslogReplica() throws IOException { + protected Translog createTranslogReplica() throws IOException { return createTranslog(replicaTranslogDir); } @@ -1566,7 +1565,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { // test that we can force start the engine , even if the translog is missing. engine.close(); // fake a new translog, causing the engine to point to a missing one. - FsTranslog translog = createTranslog(); + Translog translog = createTranslog(); translog.markCommitted(translog.currentId()); // we have to re-open the translog because o.w. it will complain about commit information going backwards, which is OK as we did a fake markComitted translog.close(); diff --git a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 21a9d851eed..558b095d29a 100644 --- a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -63,7 +63,6 @@ import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryUtils; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.threadpool.ThreadPool; diff --git a/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java b/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java index 6b2180c840e..443ce1b39fb 100644 --- a/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java +++ b/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java @@ -130,7 +130,7 @@ public class TransformOnIndexMapperIntegrationTest extends ElasticsearchIntegrat // Single transform builder.startObject(); buildTransformScript(builder); - builder.field("lang", GroovyScriptEngineService.NAME); + builder.field("lang", randomFrom(null, GroovyScriptEngineService.NAME)); builder.endObject(); } else { // Multiple transforms @@ -144,7 +144,7 @@ public class TransformOnIndexMapperIntegrationTest extends ElasticsearchIntegrat } else { builder.field("script", "true"); } - builder.field("lang", GroovyScriptEngineService.NAME); + builder.field("lang", randomFrom(null, GroovyScriptEngineService.NAME)); builder.endObject(); } builder.endArray(); diff --git a/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java index 01d7846740f..0b9a370b958 100644 --- a/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java @@ -423,7 +423,6 @@ public class SimpleAllMapperTests extends ElasticsearchSingleNodeTest { //just pick some example from DocumentMapperParser.rootTypeParsers rootTypes.put(SizeFieldMapper.NAME, "{\"enabled\" : true}"); rootTypes.put(IndexFieldMapper.NAME, "{\"enabled\" : true}"); - rootTypes.put(SourceFieldMapper.NAME, "{\"enabled\" : true}"); rootTypes.put("include_in_all", "true"); rootTypes.put("dynamic_date_formats", "[\"yyyy-MM-dd\", \"dd-MM-yyyy\"]"); rootTypes.put("numeric_detection", "true"); diff --git a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java index b4ecfeeb220..7bb6868241e 100644 --- a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java @@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.QueryWrapperFilter; +import org.apache.lucene.util.Constants; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.LocaleUtils; @@ -58,6 +59,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.index.mapper.string.SimpleStringMappingTests.docValuesType; import static org.hamcrest.Matchers.equalTo; @@ -117,6 +119,7 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest { } public void testLocale() throws IOException { + assumeFalse("Locals are buggy on JDK9EA", Constants.JRE_IS_MINIMUM_JAVA9 && systemPropertyAsBoolean("tests.security.manager", false)); String mapping = XContentFactory.jsonBuilder() .startObject() .startObject("type") @@ -431,5 +434,13 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest { .endObject() .bytes()); assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(43000L)); + + // but formatted dates still parse as milliseconds + doc = defaultMapper.parse("type", "2", XContentFactory.jsonBuilder() + .startObject() + .field("date_field", "1970-01-01T00:00:44.000Z") + .endObject() + .bytes()); + assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(44000L)); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java index 2d84e490462..bef5fa044bc 100644 --- a/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java @@ -20,7 +20,11 @@ package org.elasticsearch.index.mapper.source; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.ParsedDocument; @@ -33,6 +37,7 @@ import static org.hamcrest.Matchers.equalTo; * */ public class CompressSourceMappingTests extends ElasticsearchSingleNodeTest { + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); @Test public void testCompressDisabled() throws Exception { @@ -40,7 +45,7 @@ public class CompressSourceMappingTests extends ElasticsearchSingleNodeTest { .startObject("_source").field("compress", false).endObject() .endObject().endObject().string(); - DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() .field("field1", "value1") @@ -56,7 +61,7 @@ public class CompressSourceMappingTests extends ElasticsearchSingleNodeTest { .startObject("_source").field("compress", true).endObject() .endObject().endObject().string(); - DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() .field("field1", "value1") @@ -73,7 +78,7 @@ public class CompressSourceMappingTests extends ElasticsearchSingleNodeTest { .startObject("_source").field("compress_threshold", "200b").endObject() .endObject().endObject().string(); - DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() .field("field1", "value1") diff --git a/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index fb50de2205d..7b84424633c 100644 --- a/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -42,6 +42,7 @@ import java.util.Map; import static org.hamcrest.Matchers.*; public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { + Settings backcompatSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); public void testNoFormat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") @@ -85,12 +86,12 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); } - public void testJsonFormatCompressed() throws Exception { + public void testJsonFormatCompressedBackcompat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("format", "json").field("compress", true).endObject() .endObject().endObject().string(); - DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); + DocumentMapperParser parser = createIndex("test", backcompatSettings).mapperService().documentMapperParser(); DocumentMapper documentMapper = parser.parse(mapping); ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() .field("field", "value") @@ -122,8 +123,7 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertTrue(e.getMessage().contains("unsupported parameters")); } - Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); + DocumentMapper documentMapper = createIndex("test", backcompatSettings).mapperService().documentMapperParser().parse(mapping); ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() .startObject("path1").field("field1", "value1").endObject() @@ -148,8 +148,7 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertTrue(e.getMessage().contains("unsupported parameters")); } - Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); + DocumentMapper documentMapper = createIndex("test", backcompatSettings).mapperService().documentMapperParser().parse(mapping); ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() .startObject("path1").field("field1", "value1").endObject() @@ -162,12 +161,12 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(sourceAsMap.containsKey("path2"), equalTo(true)); } - public void testDefaultMappingAndNoMapping() throws Exception { + public void testDefaultMappingAndNoMappingBackcompat() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() .endObject().endObject().string(); - DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); + DocumentMapperParser parser = createIndex("test", backcompatSettings).mapperService().documentMapperParser(); DocumentMapper mapper = parser.parse("my_type", null, defaultMapping); assertThat(mapper.type(), equalTo("my_type")); assertThat(mapper.sourceMapper().enabled(), equalTo(false)); @@ -190,7 +189,7 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { } } - public void testDefaultMappingAndWithMappingOverride() throws Exception { + public void testDefaultMappingAndWithMappingOverrideBackcompat() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() .endObject().endObject().string(); @@ -199,17 +198,17 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { .startObject("_source").field("enabled", true).endObject() .endObject().endObject().string(); - DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse("my_type", mapping, defaultMapping); + DocumentMapper mapper = createIndex("test", backcompatSettings).mapperService().documentMapperParser().parse("my_type", mapping, defaultMapping); assertThat(mapper.type(), equalTo("my_type")); assertThat(mapper.sourceMapper().enabled(), equalTo(true)); } - public void testDefaultMappingAndNoMappingWithMapperService() throws Exception { + public void testDefaultMappingAndNoMappingWithMapperServiceBackcompat() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() .endObject().endObject().string(); - MapperService mapperService = createIndex("test").mapperService(); + MapperService mapperService = createIndex("test", backcompatSettings).mapperService(); mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true); DocumentMapper mapper = mapperService.documentMapperWithAutoCreate("my_type").v1(); @@ -217,12 +216,12 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(mapper.sourceMapper().enabled(), equalTo(false)); } - public void testDefaultMappingAndWithMappingOverrideWithMapperService() throws Exception { + public void testDefaultMappingAndWithMappingOverrideWithMapperServiceBackcompat() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() .endObject().endObject().string(); - MapperService mapperService = createIndex("test").mapperService(); + MapperService mapperService = createIndex("test", backcompatSettings).mapperService(); mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true); String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") diff --git a/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java index 97c279cb66f..072cc80271a 100644 --- a/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java @@ -27,24 +27,22 @@ import org.junit.Test; import static org.hamcrest.Matchers.equalTo; -/** - * - */ +// TODO: move this test...it doesn't need to be by itself public class ParseMappingTypeLevelTests extends ElasticsearchSingleNodeTest { @Test public void testTypeLevel() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("enabled", false).endObject() + .startObject("_index").field("enabled", true).endObject() .endObject().endObject().string(); DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); DocumentMapper mapper = parser.parse("type", mapping); assertThat(mapper.type(), equalTo("type")); - assertThat(mapper.sourceMapper().enabled(), equalTo(false)); + assertThat(mapper.indexMapper().enabled(), equalTo(true)); mapper = parser.parse(mapping); assertThat(mapper.type(), equalTo("type")); - assertThat(mapper.sourceMapper().enabled(), equalTo(false)); + assertThat(mapper.indexMapper().enabled(), equalTo(true)); } } diff --git a/src/test/java/org/elasticsearch/index/query/mlt.json b/src/test/java/org/elasticsearch/index/query/mlt.json index 3f45bb4bdda..d3d98bee5aa 100644 --- a/src/test/java/org/elasticsearch/index/query/mlt.json +++ b/src/test/java/org/elasticsearch/index/query/mlt.json @@ -1,8 +1,8 @@ { - more_like_this:{ - fields:["name.first", "name.last"], - like_text:"something", - min_term_freq:1, - max_query_terms:12 + "more_like_this" : { + "fields" : ["name.first", "name.last"], + "like_text" : "something", + "min_term_freq" : 1, + "max_query_terms" : 12 } } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/index/query/mltField.json b/src/test/java/org/elasticsearch/index/query/mltField.json deleted file mode 100644 index 9f9eb591206..00000000000 --- a/src/test/java/org/elasticsearch/index/query/mltField.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - more_like_this_field:{ - "name.first":{ - like_text:"something", - min_term_freq:1, - max_query_terms:12 - } - } -} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 544a3472f96..b783f731bec 100644 --- a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -28,11 +28,11 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; -import java.io.Closeable; import java.io.IOException; import java.nio.file.Path; import java.util.HashSet; @@ -43,6 +43,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; /** @@ -251,4 +252,42 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { Path... shardPaths) throws IOException { ShardStateMetaData.FORMAT.write(shardStateMetaData, shardStateMetaData.version, shardPaths); } + + public void testDurableFlagHasEffect() { + createIndex("test"); + ensureGreen(); + client().prepareIndex("test", "bar", "1").setSource("{}").get(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService("test"); + IndexShard shard = test.shard(0); + setDurability(shard, Translog.Durabilty.REQUEST); + assertFalse(shard.engine().getTranslog().syncNeeded()); + setDurability(shard, Translog.Durabilty.ASYNC); + client().prepareIndex("test", "bar", "2").setSource("{}").get(); + assertTrue(shard.engine().getTranslog().syncNeeded()); + setDurability(shard, Translog.Durabilty.REQUEST); + client().prepareDelete("test", "bar", "1").get(); + assertFalse(shard.engine().getTranslog().syncNeeded()); + + setDurability(shard, Translog.Durabilty.ASYNC); + client().prepareDelete("test", "bar", "2").get(); + assertTrue(shard.engine().getTranslog().syncNeeded()); + setDurability(shard, Translog.Durabilty.REQUEST); + assertNoFailures(client().prepareBulk() + .add(client().prepareIndex("test", "bar", "3").setSource("{}")) + .add(client().prepareDelete("test", "bar", "1")).get()); + assertFalse(shard.engine().getTranslog().syncNeeded()); + + setDurability(shard, Translog.Durabilty.ASYNC); + assertNoFailures(client().prepareBulk() + .add(client().prepareIndex("test", "bar", "4").setSource("{}")) + .add(client().prepareDelete("test", "bar", "3")).get()); + setDurability(shard, Translog.Durabilty.REQUEST); + assertTrue(shard.engine().getTranslog().syncNeeded()); + } + + private void setDurability(IndexShard shard, Translog.Durabilty durabilty) { + client().admin().indices().prepareUpdateSettings(shard.shardId.getIndex()).setSettings(settingsBuilder().put(Translog.INDEX_TRANSLOG_DURABILITY, durabilty.name()).build()).get(); + assertEquals(durabilty, shard.getTranslogDurability()); + } } diff --git a/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java similarity index 78% rename from src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java rename to src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java index d87c2d53e8d..1f317bad7a3 100644 --- a/src/test/java/org/elasticsearch/index/translog/fs/FsBufferedTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.translog.fs; +package org.elasticsearch.index.translog; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.util.BigArrays; @@ -28,14 +28,13 @@ import java.io.IOException; /** * */ -@TestLogging("index.translog.fs:TRACE") -public class FsBufferedTranslogTests extends AbstractTranslogTests { +public class BufferedTranslogTests extends TranslogTests { @Override - protected FsTranslog create() throws IOException { - return new FsTranslog(shardId, + protected Translog create() throws IOException { + return new Translog(shardId, ImmutableSettings.settingsBuilder() - .put("index.translog.fs.type", FsTranslogFile.Type.BUFFERED.name()) + .put("index.translog.fs.type", TranslogFile.Type.BUFFERED.name()) .put("index.translog.fs.buffer_size", 10 + randomInt(128 * 1024)) .build(), BigArrays.NON_RECYCLING_INSTANCE, translogDir diff --git a/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/FsSimpleTranslogTests.java similarity index 74% rename from src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java rename to src/test/java/org/elasticsearch/index/translog/FsSimpleTranslogTests.java index 8ee569fe27c..cdbeeaaab60 100644 --- a/src/test/java/org/elasticsearch/index/translog/fs/FsSimpleTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/FsSimpleTranslogTests.java @@ -17,24 +17,22 @@ * under the License. */ -package org.elasticsearch.index.translog.fs; +package org.elasticsearch.index.translog; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; /** * */ -@TestLogging("index.translog.fs:TRACE") -public class FsSimpleTranslogTests extends AbstractTranslogTests { +public class FsSimpleTranslogTests extends TranslogTests { @Override - protected FsTranslog create() throws IOException { - return new FsTranslog(shardId, - ImmutableSettings.settingsBuilder().put("index.translog.fs.type", FsTranslogFile.Type.SIMPLE.name()).build(), + protected Translog create() throws IOException { + return new Translog(shardId, + ImmutableSettings.settingsBuilder().put("index.translog.fs.type", TranslogFile.Type.SIMPLE.name()).build(), BigArrays.NON_RECYCLING_INSTANCE, translogDir); } } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/index/translog/fs/AbstractTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/TranslogTests.java similarity index 88% rename from src/test/java/org/elasticsearch/index/translog/fs/AbstractTranslogTests.java rename to src/test/java/org/elasticsearch/index/translog/TranslogTests.java index d7e3c25edce..583058d019b 100644 --- a/src/test/java/org/elasticsearch/index/translog/fs/AbstractTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.translog.fs; +package org.elasticsearch.index.translog; import org.apache.lucene.index.Term; import org.apache.lucene.util.IOUtils; @@ -27,12 +27,13 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.*; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; import org.junit.After; @@ -43,6 +44,7 @@ import java.io.EOFException; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; @@ -60,11 +62,11 @@ import static org.hamcrest.Matchers.*; * */ @LuceneTestCase.SuppressFileSystems("ExtrasFS") -public abstract class AbstractTranslogTests extends ElasticsearchTestCase { +public class TranslogTests extends ElasticsearchTestCase { protected final ShardId shardId = new ShardId(new Index("index"), 1); - protected FsTranslog translog; + protected Translog translog; protected Path translogDir; @Override @@ -102,9 +104,11 @@ public abstract class AbstractTranslogTests extends ElasticsearchTestCase { } } - - protected abstract FsTranslog create() throws IOException; - + protected Translog create() throws IOException { + return new Translog(shardId, + ImmutableSettings.settingsBuilder().put("index.translog.fs.type", TranslogFile.Type.SIMPLE.name()).build(), + BigArrays.NON_RECYCLING_INSTANCE, translogDir); + } protected void addToTranslogAndList(Translog translog, ArrayList list, Translog.Operation op) { list.add(op); @@ -114,24 +118,24 @@ public abstract class AbstractTranslogTests extends ElasticsearchTestCase { public void testIdParsingFromFile() { long id = randomIntBetween(0, Integer.MAX_VALUE); - Path file = translogDir.resolve(FsTranslog.TRANSLOG_FILE_PREFIX + id); - assertThat(FsTranslog.parseIdFromFileName(file), equalTo(id)); + Path file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + id); + assertThat(Translog.parseIdFromFileName(file), equalTo(id)); - file = translogDir.resolve(FsTranslog.TRANSLOG_FILE_PREFIX + id + ".recovering"); - assertThat(FsTranslog.parseIdFromFileName(file), equalTo(id)); + file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + id + ".recovering"); + assertThat(Translog.parseIdFromFileName(file), equalTo(id)); - file = translogDir.resolve(FsTranslog.TRANSLOG_FILE_PREFIX + randomNonTranslogPatternString(1, 10) + id); - assertThat(FsTranslog.parseIdFromFileName(file), equalTo(-1l)); + file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + randomNonTranslogPatternString(1, 10) + id); + assertThat(Translog.parseIdFromFileName(file), equalTo(-1l)); - file = translogDir.resolve(randomNonTranslogPatternString(1, FsTranslog.TRANSLOG_FILE_PREFIX.length() - 1)); - assertThat(FsTranslog.parseIdFromFileName(file), equalTo(-1l)); + file = translogDir.resolve(randomNonTranslogPatternString(1, Translog.TRANSLOG_FILE_PREFIX.length() - 1)); + assertThat(Translog.parseIdFromFileName(file), equalTo(-1l)); } private static String randomNonTranslogPatternString(int min, int max) { String string; do { string = randomRealisticUnicodeOfCodepointLength(randomIntBetween(min, max)); - } while (FsTranslog.PARSE_ID_PATTERN.matcher(string).matches()); + } while (Translog.PARSE_ID_PATTERN.matcher(string).matches()); return string; } @@ -361,14 +365,14 @@ public abstract class AbstractTranslogTests extends ElasticsearchTestCase { } - public void assertFileIsPresent(FsTranslog translog, long id) { + public void assertFileIsPresent(Translog translog, long id) { if (Files.exists(translogDir.resolve(translog.getFilename(id)))) { return; } fail(translog.getFilename(id) + " is not present in any location: " + translog.location()); } - public void assertFileDeleted(FsTranslog translog, long id) { + public void assertFileDeleted(Translog translog, long id) { assertFalse("translog [" + id + "] still exists", Files.exists(translog.location().resolve(translog.getFilename(id)))); } @@ -771,4 +775,59 @@ public abstract class AbstractTranslogTests extends ElasticsearchTestCase { } } + + public void testSyncUpTo() throws IOException { + int translogOperations = randomIntBetween(10, 100); + int count = 0; + for (int op = 0; op < translogOperations; op++) { + final Translog.Location location = translog.add(new Translog.Create("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); + if (randomBoolean()) { + assertTrue("at least one operation pending", translog.syncNeeded()); + assertTrue("this operation has not been synced", translog.ensureSynced(location)); + assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced + translog.add(new Translog.Create("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); + assertTrue("one pending operation", translog.syncNeeded()); + assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now + assertTrue("we only synced a previous operation yet", translog.syncNeeded()); + } + if (rarely()) { + translog.newTranslog(); + assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now + assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); + } + + if (randomBoolean()) { + translog.sync(); + assertFalse("translog has been synced already", translog.ensureSynced(location)); + } + } + } + + public void testLocationComparison() throws IOException { + List locations = newArrayList(); + int translogOperations = randomIntBetween(10, 100); + int count = 0; + for (int op = 0; op < translogOperations; op++) { + locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); + if (rarely()) { + translog.newTranslog(); + } + } + Collections.shuffle(locations, random()); + Translog.Location max = locations.get(0); + for (Translog.Location location : locations) { + max = max(max, location); + } + + assertEquals(max.translogId, translog.currentId()); + final Translog.Operation read = translog.read(max); + assertEquals(read.getSource().source.toUtf8(), Integer.toString(count)); + } + + public static Translog.Location max(Translog.Location a, Translog.Location b) { + if (a.compareTo(b) > 0) { + return a; + } + return b; + } } diff --git a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java index 11638c74660..03c8bbe56e1 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java @@ -126,14 +126,14 @@ public class UpdateMappingIntegrationTests extends ElasticsearchIntegrationTest client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("doc") - .setSource("{\"_source\":{\"enabled\":false},\"properties\":{\"date\":{\"type\":\"integer\"}}}") + .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}") .execute().actionGet(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet(); assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(), - equalTo("{\"doc\":{\"_source\":{\"enabled\":false},\"properties\":{\"date\":{\"type\":\"integer\"}}}}")); + equalTo("{\"doc\":{\"properties\":{\"date\":{\"type\":\"integer\"}}}}")); } @Test(expected = MergeMappingException.class) diff --git a/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java b/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java index f09562f690e..ab6061856b2 100644 --- a/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java +++ b/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.memory.breaker; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; diff --git a/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java b/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java index a46a273b2ba..81ef1030f76 100644 --- a/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java +++ b/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; @@ -35,7 +36,9 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; @@ -49,10 +52,13 @@ import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; +import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -130,7 +136,125 @@ public class RareClusterStateTests extends ElasticsearchIntegrationTest { assertHitCount(client().prepareSearch("test").get(), 0); } + public void testDelayedMappingPropagationOnPrimary() throws Exception { + // Here we want to test that things go well if there is a first request + // that adds mappings but before mappings are propagated to all nodes + // another index request introduces the same mapping. The master node + // will reply immediately since it did not change the cluster state + // but the change might not be on the node that performed the indexing + // operation yet + + Settings settings = ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0ms").build(); + final List nodeNames = internalCluster().startNodesAsync(2, settings).get(); + assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); + + final String master = internalCluster().getMasterName(); + assertThat(nodeNames, hasItem(master)); + String otherNode = null; + for (String node : nodeNames) { + if (node.equals(master) == false) { + otherNode = node; + break; + } + } + assertNotNull(otherNode); + + // Don't allocate the shard on the master node + assertAcked(prepareCreate("index").setSettings(ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.exclude._name", master)).get()); + ensureGreen(); + + // Check routing tables + ClusterState state = client().admin().cluster().prepareState().get().getState(); + assertEquals(master, state.nodes().masterNode().name()); + List shards = state.routingTable().allShards("index"); + assertThat(shards, hasSize(1)); + for (ShardRouting shard : shards) { + if (shard.primary()) { + // primary must not be on the master node + assertFalse(state.nodes().masterNodeId().equals(shard.currentNodeId())); + } else { + fail(); // only primaries + } + } + + // Block cluster state processing where our shard is + BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, getRandom()); + internalCluster().setDisruptionScheme(disruption); + disruption.startDisrupting(); + + // Add a new mapping... + final AtomicReference putMappingResponse = new AtomicReference<>(); + client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute(new ActionListener() { + @Override + public void onResponse(PutMappingResponse response) { + putMappingResponse.set(response); + } + @Override + public void onFailure(Throwable e) { + putMappingResponse.set(e); + } + }); + // ...and wait for mappings to be available on master + assertBusy(new Runnable() { + @Override + public void run() { + ImmutableOpenMap indexMappings = client().admin().indices().prepareGetMappings("index").get().getMappings().get("index"); + assertNotNull(indexMappings); + MappingMetaData typeMappings = indexMappings.get("type"); + assertNotNull(typeMappings); + Object properties; + try { + properties = typeMappings.getSourceAsMap().get("properties"); + } catch (IOException e) { + throw new AssertionError(e); + } + assertNotNull(properties); + Object fieldMapping = ((Map) properties).get("field"); + assertNotNull(fieldMapping); + } + }); + + final AtomicReference docIndexResponse = new AtomicReference<>(); + client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener() { + @Override + public void onResponse(IndexResponse response) { + docIndexResponse.set(response); + } + @Override + public void onFailure(Throwable e) { + docIndexResponse.set(e); + } + }); + + // Wait a bit to make sure that the reason why we did not get a response + // is that cluster state processing is blocked and not just that it takes + // time to process the indexing request + Thread.sleep(100); + assertThat(putMappingResponse.get(), equalTo(null)); + assertThat(docIndexResponse.get(), equalTo(null)); + + // Now make sure the indexing request finishes successfully + disruption.stopDisrupting(); + assertBusy(new Runnable() { + @Override + public void run() { + assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class)); + PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get(); + assertTrue(resp.isAcknowledged()); + assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); + IndexResponse docResp = (IndexResponse) docIndexResponse.get(); + assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), + 1, docResp.getShardInfo().getTotal()); + } + }); + } + public void testDelayedMappingPropagationOnReplica() throws Exception { + // This is essentially the same thing as testDelayedMappingPropagationOnPrimary + // but for replicas // Here we want to test that everything goes well if the mappings that // are needed for a document are not available on the replica at the // time of indexing it diff --git a/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java b/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java index 2cfd711787f..af800643e8f 100644 --- a/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java +++ b/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java @@ -51,6 +51,10 @@ public class TTLPercolatorTests extends ElasticsearchIntegrationTest { private static final long PURGE_INTERVAL = 200; + @Override + protected void beforeIndexDeletion() { + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return settingsBuilder() diff --git a/src/test/java/org/elasticsearch/river/RiverTests.java b/src/test/java/org/elasticsearch/river/RiverTests.java index 6587ca84c72..1a3e0e70168 100644 --- a/src/test/java/org/elasticsearch/river/RiverTests.java +++ b/src/test/java/org/elasticsearch/river/RiverTests.java @@ -43,6 +43,10 @@ import static org.hamcrest.Matchers.equalTo; @AwaitsFix(bugUrl="occasionally fails apparently due to synchronous mappings updates") public class RiverTests extends ElasticsearchIntegrationTest { + @Override + protected void beforeIndexDeletion() { + } + @Test public void testRiverStart() throws Exception { startAndCheckRiverIsStarted("dummy-river-test"); diff --git a/src/test/java/org/elasticsearch/script/GroovySecurityTests.java b/src/test/java/org/elasticsearch/script/GroovySecurityTests.java new file mode 100644 index 00000000000..1fc65e9e9c9 --- /dev/null +++ b/src/test/java/org/elasticsearch/script/GroovySecurityTests.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import org.apache.lucene.util.Constants; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.nio.file.Path; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.CoreMatchers.equalTo; + +/** + * Tests for the Groovy security permissions + */ +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0) +public class GroovySecurityTests extends ElasticsearchIntegrationTest { + + @Override + public void setUp() throws Exception { + super.setUp(); + assumeTrue("test requires security manager to be enabled", System.getSecurityManager() != null); + } + + @Test + public void testEvilGroovyScripts() throws Exception { + int nodes = randomIntBetween(1, 3); + Settings nodeSettings = ImmutableSettings.builder() + .put("script.inline", true) + .put("script.indexed", true) + .build(); + internalCluster().startNodesAsync(nodes, nodeSettings).get(); + client().admin().cluster().prepareHealth().setWaitForNodes(nodes + "").get(); + + client().prepareIndex("test", "doc", "1").setSource("foo", 5, "bar", "baz").setRefresh(true).get(); + + // Plain test + assertSuccess(""); + // numeric field access + assertSuccess("def foo = doc['foo'].value; if (foo == null) { return 5; }"); + // string field access + assertSuccess("def bar = doc['bar'].value; if (bar == null) { return 5; }"); + // List + assertSuccess("def list = [doc['foo'].value, 3, 4]; def v = list.get(1); list.add(10)"); + // Ranges + assertSuccess("def range = 1..doc['foo'].value; def v = range.get(0)"); + // Maps + assertSuccess("def v = doc['foo'].value; def m = [:]; m.put(\\\"value\\\", v)"); + // Times + assertSuccess("def t = Instant.now().getMillis()"); + // GroovyCollections + assertSuccess("def n = [1,2,3]; GroovyCollections.max(n)"); + + // Fail cases: + // AccessControlException[access denied ("java.io.FilePermission" "<>" "execute")] + assertFailure("pr = Runtime.getRuntime().exec(\\\"touch /tmp/gotcha\\\"); pr.waitFor()"); + + // AccessControlException[access denied ("java.lang.RuntimePermission" "accessClassInPackage.sun.reflect")] + assertFailure("d = new DateTime(); d.getClass().getDeclaredMethod(\\\"year\\\").setAccessible(true)"); + assertFailure("d = new DateTime(); d.\\\"${'get' + 'Class'}\\\"()." + + "\\\"${'getDeclared' + 'Method'}\\\"(\\\"year\\\").\\\"${'set' + 'Accessible'}\\\"(false)"); + assertFailure("Class.forName(\\\"org.joda.time.DateTime\\\").getDeclaredMethod(\\\"year\\\").setAccessible(true)"); + + // AccessControlException[access denied ("groovy.security.GroovyCodeSourcePermission" "/groovy/shell")] + assertFailure("Eval.me('2 + 2')"); + assertFailure("Eval.x(5, 'x + 2')"); + + // AccessControlException[access denied ("java.lang.RuntimePermission" "accessDeclaredMembers")] + assertFailure("d = new Date(); java.lang.reflect.Field f = Date.class.getDeclaredField(\\\"fastTime\\\");" + + " f.setAccessible(true); f.get(\\\"fastTime\\\")"); + + // AccessControlException[access denied ("java.io.FilePermission" "<>" "execute")] + assertFailure("def methodName = 'ex'; Runtime.\\\"${'get' + 'Runtime'}\\\"().\\\"${methodName}ec\\\"(\\\"touch /tmp/gotcha2\\\")"); + + // test a directory we normally have access to, but the groovy script does not. + Path dir = createTempDir(); + // TODO: figure out the necessary escaping for windows paths here :) + if (!Constants.WINDOWS) { + // access denied ("java.io.FilePermission" ".../tempDir-00N" "read") + assertFailure("new File(\\\"" + dir + "\\\").exists()"); + } + } + + private void assertSuccess(String script) { + logger.info("--> script: " + script); + SearchResponse resp = client().prepareSearch("test") + .setSource("{\"query\": {\"match_all\": {}}," + + "\"sort\":{\"_script\": {\"script\": \""+ script + + "; doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); + assertNoFailures(resp); + assertEquals(1, resp.getHits().getTotalHits()); + assertThat(resp.getHits().getAt(0).getSortValues(), equalTo(new Object[]{7.0})); + } + + private void assertFailure(String script) { + logger.info("--> script: " + script); + SearchResponse resp = client().prepareSearch("test") + .setSource("{\"query\": {\"match_all\": {}}," + + "\"sort\":{\"_script\": {\"script\": \""+ script + + "; doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); + assertEquals(0, resp.getHits().getTotalHits()); + ShardSearchFailure fails[] = resp.getShardFailures(); + // TODO: GroovyScriptExecutionException needs work + for (ShardSearchFailure fail : fails) { + assertTrue(fail.getCause().toString().contains("AccessControlException[access denied")); + } + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/AvgBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/AvgBucketTests.java new file mode 100644 index 00000000000..2e5e8258561 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/AvgBucketTests.java @@ -0,0 +1,399 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.avgBucket; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.core.IsNull.notNullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class AvgBucketTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + + static int numDocs; + static int interval; + static int minRandomValue; + static int maxRandomValue; + static int numValueBuckets; + static long[] valueCounts; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + + numDocs = randomIntBetween(6, 20); + interval = randomIntBetween(2, 5); + + minRandomValue = 0; + maxRandomValue = 20; + + numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + valueCounts = new long[numValueBuckets]; + + List builders = new ArrayList<>(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + builders.add(client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) + .endObject())); + final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); + valueCounts[bucket]++; + } + + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < 2; i++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + } + indexRandom(true, builders); + ensureSearchable(); + } + + @Test + public void testDocCount_topLevel() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .addAggregation(avgBucket("avg_bucket").setBucketsPaths("histo>_count")).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + double sum = 0; + int count = 0; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + count++; + sum += bucket.getDocCount(); + } + + double avgValue = count == 0 ? Double.NaN : (sum / count); + InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket"); + assertThat(avgBucketValue, notNullValue()); + assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); + assertThat(avgBucketValue.value(), equalTo(avgValue)); + } + + @Test + public void testDocCount_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(avgBucket("avg_bucket").setBucketsPaths("histo>_count"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + double sum = 0; + int count = 0; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + count++; + sum += bucket.getDocCount(); + } + + double avgValue = count == 0 ? Double.NaN : (sum / count); + InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_bucket"); + assertThat(avgBucketValue, notNullValue()); + assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); + assertThat(avgBucketValue.value(), equalTo(avgValue)); + } + } + + @Test + public void testMetric_topLevel() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(avgBucket("avg_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(interval)); + + double bucketSum = 0; + int count = 0; + for (int i = 0; i < interval; ++i) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); + assertThat(bucket.getDocCount(), greaterThan(0l)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + count++; + bucketSum += sum.value(); + } + + double avgValue = count == 0 ? Double.NaN : (bucketSum / count); + InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket"); + assertThat(avgBucketValue, notNullValue()); + assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); + assertThat(avgBucketValue.value(), equalTo(avgValue)); + } + + @Test + public void testMetric_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(avgBucket("avg_bucket").setBucketsPaths("histo>sum"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + double bucketSum = 0; + int count = 0; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() != 0) { + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + count++; + bucketSum += sum.value(); + } + } + + double avgValue = count == 0 ? Double.NaN : (bucketSum / count); + InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_bucket"); + assertThat(avgBucketValue, notNullValue()); + assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); + assertThat(avgBucketValue.value(), equalTo(avgValue)); + } + } + + @Test + public void testMetric_asSubAggWithInsertZeros() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(avgBucket("avg_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) + .execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + double bucketSum = 0; + int count = 0; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + + count++; + bucketSum += sum.value(); + } + + double avgValue = count == 0 ? Double.NaN : (bucketSum / count); + InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_bucket"); + assertThat(avgBucketValue, notNullValue()); + assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); + assertThat(avgBucketValue.value(), equalTo(avgValue)); + } + } + + @Test + public void testNoBuckets() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(avgBucket("avg_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + + InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket"); + assertThat(avgBucketValue, notNullValue()); + assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); + assertThat(avgBucketValue.value(), equalTo(Double.NaN)); + } + + @Test + public void testNested() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(avgBucket("avg_histo_bucket").setBucketsPaths("histo>_count"))) + .addAggregation(avgBucket("avg_terms_bucket").setBucketsPaths("terms>avg_histo_bucket")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + double aggTermsSum = 0; + int aggTermsCount = 0; + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + double aggHistoSum = 0; + int aggHistoCount = 0; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + + aggHistoCount++; + aggHistoSum += bucket.getDocCount(); + } + + double avgHistoValue = aggHistoCount == 0 ? Double.NaN : (aggHistoSum / aggHistoCount); + InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_histo_bucket"); + assertThat(avgBucketValue, notNullValue()); + assertThat(avgBucketValue.getName(), equalTo("avg_histo_bucket")); + assertThat(avgBucketValue.value(), equalTo(avgHistoValue)); + + aggTermsCount++; + aggTermsSum += avgHistoValue; + } + + double avgTermsValue = aggTermsCount == 0 ? Double.NaN : (aggTermsSum / aggTermsCount); + InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_terms_bucket"); + assertThat(avgBucketValue, notNullValue()); + assertThat(avgBucketValue.getName(), equalTo("avg_terms_bucket")); + assertThat(avgBucketValue.value(), equalTo(avgTermsValue)); + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java index b1ac6756f1e..1125918171b 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.derivative.Derivative; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.joda.time.DateTime; @@ -45,6 +46,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHist import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; @@ -146,6 +148,52 @@ public class DateDerivativeTests extends ElasticsearchIntegrationTest { assertThat(docCountDeriv.value(), equalTo(1d)); } + @Test + public void singleValuedField_normalised() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count").unit(DateHistogramInterval.DAY))).execute() + .actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1l)); + Derivative docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, nullValue()); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), closeTo(1d, 0.00001)); + assertThat(docCountDeriv.normalizedValue(), closeTo(1d / 31d, 0.00001)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), closeTo(1d, 0.00001)); + assertThat(docCountDeriv.normalizedValue(), closeTo(1d / 29d, 0.00001)); + } + @Test public void singleValuedField_WithSubAggregation() throws Exception { SearchResponse response = client() diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java index 0135f72b4be..8ad0605d210 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram. import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.derivative.Derivative; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; @@ -199,6 +200,49 @@ public class DerivativeTests extends ElasticsearchIntegrationTest { } } + /** + * test first and second derivative on the sing + */ + @Test + public void singleValuedField_normalised() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count").unit("1")) + .subAggregation(derivative("2nd_deriv").setBucketsPaths("deriv").unit("10"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); + Derivative docCountDeriv = bucket.getAggregations().get("deriv"); + if (i > 0) { + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), closeTo((double) (firstDerivValueCounts[i - 1]), 0.00001)); + assertThat(docCountDeriv.normalizedValue(), closeTo((double) (firstDerivValueCounts[i - 1]) / 5, 0.00001)); + } else { + assertThat(docCountDeriv, nullValue()); + } + Derivative docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv"); + if (i > 1) { + assertThat(docCount2ndDeriv, notNullValue()); + assertThat(docCount2ndDeriv.value(), closeTo((double) (secondDerivValueCounts[i - 2]), 0.00001)); + assertThat(docCount2ndDeriv.normalizedValue(), closeTo((double) (secondDerivValueCounts[i - 2]) * 2, 0.00001)); + } else { + assertThat(docCount2ndDeriv, nullValue()); + } + } + } + @Test public void singleValueAggDerivative() throws Exception { SearchResponse response = client() diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java index 0b0f720344f..5e1acdef317 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java @@ -101,13 +101,13 @@ public class ReducerHelperTests extends ElasticsearchTestCase { public static double calculateMetric(double[] values, ValuesSourceMetricsAggregationBuilder metric) { if (metric instanceof MinBuilder) { - double accumulator = Double.MAX_VALUE; + double accumulator = Double.POSITIVE_INFINITY; for (double value : values) { accumulator = Math.min(accumulator, value); } return accumulator; } else if (metric instanceof MaxBuilder) { - double accumulator = Double.MIN_VALUE; + double accumulator = Double.NEGATIVE_INFINITY; for (double value : values) { accumulator = Math.max(accumulator, value); } diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/SumBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/SumBucketTests.java new file mode 100644 index 00000000000..a4cc26bed79 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/SumBucketTests.java @@ -0,0 +1,378 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.sumBucket; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.core.IsNull.notNullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class SumBucketTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + + static int numDocs; + static int interval; + static int minRandomValue; + static int maxRandomValue; + static int numValueBuckets; + static long[] valueCounts; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + + numDocs = randomIntBetween(6, 20); + interval = randomIntBetween(2, 5); + + minRandomValue = 0; + maxRandomValue = 20; + + numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + valueCounts = new long[numValueBuckets]; + + List builders = new ArrayList<>(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + builders.add(client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) + .endObject())); + final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); + valueCounts[bucket]++; + } + + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < 2; i++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + } + indexRandom(true, builders); + ensureSearchable(); + } + + @Test + public void testDocCount_topLevel() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .addAggregation(sumBucket("sum_bucket").setBucketsPaths("histo>_count")).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + double sum = 0; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + sum += bucket.getDocCount(); + } + + InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket"); + assertThat(sumBucketValue, notNullValue()); + assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); + assertThat(sumBucketValue.value(), equalTo(sum)); + } + + @Test + public void testDocCount_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(sumBucket("sum_bucket").setBucketsPaths("histo>_count"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + double sum = 0; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + sum += bucket.getDocCount(); + } + + InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_bucket"); + assertThat(sumBucketValue, notNullValue()); + assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); + assertThat(sumBucketValue.value(), equalTo(sum)); + } + } + + @Test + public void testMetric_topLevel() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(sumBucket("sum_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(interval)); + + double bucketSum = 0; + for (int i = 0; i < interval; ++i) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); + assertThat(bucket.getDocCount(), greaterThan(0l)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + bucketSum += sum.value(); + } + + InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket"); + assertThat(sumBucketValue, notNullValue()); + assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); + assertThat(sumBucketValue.value(), equalTo(bucketSum)); + } + + @Test + public void testMetric_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(sumBucket("sum_bucket").setBucketsPaths("histo>sum"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + double bucketSum = 0; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() != 0) { + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + bucketSum += sum.value(); + } + } + + InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_bucket"); + assertThat(sumBucketValue, notNullValue()); + assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); + assertThat(sumBucketValue.value(), equalTo(bucketSum)); + } + } + + @Test + public void testMetric_asSubAggWithInsertZeros() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(sumBucket("sum_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) + .execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + double bucketSum = 0; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + + bucketSum += sum.value(); + } + + InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_bucket"); + assertThat(sumBucketValue, notNullValue()); + assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); + assertThat(sumBucketValue.value(), equalTo(bucketSum)); + } + } + + @Test + public void testNoBuckets() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(sumBucket("sum_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + + InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket"); + assertThat(sumBucketValue, notNullValue()); + assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); + assertThat(sumBucketValue.value(), equalTo(0.0)); + } + + @Test + public void testNested() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(sumBucket("sum_histo_bucket").setBucketsPaths("histo>_count"))) + .addAggregation(sumBucket("sum_terms_bucket").setBucketsPaths("terms>sum_histo_bucket")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + double aggTermsSum = 0; + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + double aggHistoSum = 0; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + + aggHistoSum += bucket.getDocCount(); + } + + InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_histo_bucket"); + assertThat(sumBucketValue, notNullValue()); + assertThat(sumBucketValue.getName(), equalTo("sum_histo_bucket")); + assertThat(sumBucketValue.value(), equalTo(aggHistoSum)); + + aggTermsSum += aggHistoSum; + } + + InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_terms_bucket"); + assertThat(sumBucketValue, notNullValue()); + assertThat(sumBucketValue.getName(), equalTo("sum_terms_bucket")); + assertThat(sumBucketValue.value(), equalTo(aggTermsSum)); + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java index b87de1cbb8a..9973ee0fc69 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -35,11 +35,11 @@ import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregat import org.elasticsearch.search.aggregations.reducers.BucketHelpers; import org.elasticsearch.search.aggregations.reducers.ReducerHelperTests; import org.elasticsearch.search.aggregations.reducers.SimpleValue; -import org.elasticsearch.search.aggregations.reducers.movavg.models.DoubleExpModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.HoltLinearModel; import org.elasticsearch.search.aggregations.reducers.movavg.models.LinearModel; import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelBuilder; import org.elasticsearch.search.aggregations.reducers.movavg.models.SimpleModel; -import org.elasticsearch.search.aggregations.reducers.movavg.models.SingleExpModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.EwmaModel; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; import org.junit.Test; @@ -65,7 +65,6 @@ import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; @ElasticsearchIntegrationTest.SuiteScopeTest -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/10972") public class MovAvgTests extends ElasticsearchIntegrationTest { private static final String INTERVAL_FIELD = "l_value"; @@ -85,7 +84,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { enum MovAvgType { - SIMPLE ("simple"), LINEAR("linear"), SINGLE("single"), DOUBLE("double"); + SIMPLE ("simple"), LINEAR("linear"), EWMA("ewma"), HOLT("holt"); private final String name; @@ -200,11 +199,11 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { case LINEAR: values.add(linear(window)); break; - case SINGLE: - values.add(singleExp(window)); + case EWMA: + values.add(ewma(window)); break; - case DOUBLE: - values.add(doubleExp(window)); + case HOLT: + values.add(holt(window)); break; } @@ -247,12 +246,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } /** - * Single exponential moving avg + * Exponentionally weighted (EWMA, Single exponential) moving avg * * @param window Window of values to compute movavg for * @return */ - private double singleExp(Collection window) { + private double ewma(Collection window) { double avg = 0; boolean first = true; @@ -268,11 +267,11 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } /** - * Double exponential moving avg + * Holt-Linear (Double exponential) moving avg * @param window Window of values to compute movavg for * @return */ - private double doubleExp(Collection window) { + private double holt(Collection window) { double s = 0; double last_s = 0; @@ -412,7 +411,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } @Test - public void singleSingleValuedField() { + public void ewmaSingleValuedField() { SearchResponse response = client() .prepareSearch("idx").setTypes("type") @@ -422,12 +421,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") .window(windowSize) - .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(alpha)) + .modelBuilder(new EwmaModel.EWMAModelBuilder().alpha(alpha)) .gapPolicy(gapPolicy) .setBucketsPaths("_count")) .subAggregation(movingAvg("movavg_values") .window(windowSize) - .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(alpha)) + .modelBuilder(new EwmaModel.EWMAModelBuilder().alpha(alpha)) .gapPolicy(gapPolicy) .setBucketsPaths("the_metric")) ).execute().actionGet(); @@ -440,8 +439,8 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { List buckets = histo.getBuckets(); assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); - List expectedCounts = testValues.get(MovAvgType.SINGLE.toString() + "_" + MetricTarget.COUNT.toString()); - List expectedValues = testValues.get(MovAvgType.SINGLE.toString() + "_" + MetricTarget.VALUE.toString()); + List expectedCounts = testValues.get(MovAvgType.EWMA.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.EWMA.toString() + "_" + MetricTarget.VALUE.toString()); Iterator actualIter = buckets.iterator(); Iterator expectedBucketIter = mockHisto.iterator(); @@ -464,7 +463,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { } @Test - public void doubleSingleValuedField() { + public void holtSingleValuedField() { SearchResponse response = client() .prepareSearch("idx").setTypes("type") @@ -474,12 +473,12 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { .subAggregation(metric) .subAggregation(movingAvg("movavg_counts") .window(windowSize) - .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(alpha).beta(beta)) + .modelBuilder(new HoltLinearModel.HoltLinearModelBuilder().alpha(alpha).beta(beta)) .gapPolicy(gapPolicy) .setBucketsPaths("_count")) .subAggregation(movingAvg("movavg_values") .window(windowSize) - .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(alpha).beta(beta)) + .modelBuilder(new HoltLinearModel.HoltLinearModelBuilder().alpha(alpha).beta(beta)) .gapPolicy(gapPolicy) .setBucketsPaths("the_metric")) ).execute().actionGet(); @@ -492,8 +491,8 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { List buckets = histo.getBuckets(); assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); - List expectedCounts = testValues.get(MovAvgType.DOUBLE.toString() + "_" + MetricTarget.COUNT.toString()); - List expectedValues = testValues.get(MovAvgType.DOUBLE.toString() + "_" + MetricTarget.VALUE.toString()); + List expectedCounts = testValues.get(MovAvgType.HOLT.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.HOLT.toString() + "_" + MetricTarget.VALUE.toString()); Iterator actualIter = buckets.iterator(); Iterator expectedBucketIter = mockHisto.iterator(); @@ -732,7 +731,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { currentValue = current.value(); if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) { - // if we are ignoring, movavg could go up (double_exp) or stay the same (simple, linear, single_exp) + // if we are ignoring, movavg could go up (holt) or stay the same (simple, linear, ewma) assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { // If we insert zeros, this should always increase the moving avg since the last bucket has a real value @@ -791,7 +790,7 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { currentValue = current.value(); if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) { - // if we are ignoring, movavg could go up (double_exp) or stay the same (simple, linear, single_exp) + // if we are ignoring, movavg could go up (holt) or stay the same (simple, linear, ewma) assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { // If we insert zeros, this should always increase the moving avg since the last bucket has a real value @@ -1057,9 +1056,9 @@ public class MovAvgTests extends ElasticsearchIntegrationTest { case 1: return new LinearModel.LinearModelBuilder(); case 2: - return new SingleExpModel.SingleExpModelBuilder().alpha(alpha); + return new EwmaModel.EWMAModelBuilder().alpha(alpha); case 3: - return new DoubleExpModel.DoubleExpModelBuilder().alpha(alpha).beta(beta); + return new HoltLinearModel.HoltLinearModelBuilder().alpha(alpha).beta(beta); default: return new SimpleModel.SimpleModelBuilder(); } diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java index 156f4f873a7..2a3f862c321 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java @@ -140,9 +140,9 @@ public class MovAvgUnitTests extends ElasticsearchTestCase { } @Test - public void testSingleExpMovAvgModel() { + public void testEWMAMovAvgModel() { double alpha = randomDouble(); - MovAvgModel model = new SingleExpModel(alpha); + MovAvgModel model = new EwmaModel(alpha); int numValues = randomIntBetween(1, 100); int windowSize = randomIntBetween(1, 50); @@ -170,9 +170,9 @@ public class MovAvgUnitTests extends ElasticsearchTestCase { } @Test - public void testSinglePredictionModel() { + public void testEWMAPredictionModel() { double alpha = randomDouble(); - MovAvgModel model = new SingleExpModel(alpha); + MovAvgModel model = new EwmaModel(alpha); int windowSize = randomIntBetween(1, 50); int numPredictions = randomIntBetween(1,50); @@ -206,10 +206,10 @@ public class MovAvgUnitTests extends ElasticsearchTestCase { } @Test - public void testDoubleExpMovAvgModel() { + public void testHoltLinearMovAvgModel() { double alpha = randomDouble(); double beta = randomDouble(); - MovAvgModel model = new DoubleExpModel(alpha, beta); + MovAvgModel model = new HoltLinearModel(alpha, beta); int numValues = randomIntBetween(1, 100); int windowSize = randomIntBetween(1, 50); @@ -250,10 +250,10 @@ public class MovAvgUnitTests extends ElasticsearchTestCase { } @Test - public void testDoublePredictionModel() { + public void testHoltLinearPredictionModel() { double alpha = randomDouble(); double beta = randomDouble(); - MovAvgModel model = new DoubleExpModel(alpha, beta); + MovAvgModel model = new HoltLinearModel(alpha, beta); int windowSize = randomIntBetween(1, 50); int numPredictions = randomIntBetween(1,50); diff --git a/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java b/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java index a16acc1e8e8..34c826379a0 100644 --- a/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java +++ b/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java @@ -19,25 +19,26 @@ package org.elasticsearch.search.compress; +import org.elasticsearch.Version; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Priority; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.compress.lzf.LZFCompressor; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -/** - * - */ -public class SearchSourceCompressTests extends ElasticsearchIntegrationTest { +public class SearchSourceCompressTests extends ElasticsearchSingleNodeTest { @Test public void testSourceCompressionLZF() throws IOException { @@ -53,7 +54,8 @@ public class SearchSourceCompressTests extends ElasticsearchIntegrationTest { } catch (Exception e) { // ignore } - createIndex("test"); + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); + createIndex("test", settings); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") diff --git a/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java index b67a5c07494..9729d8e0439 100644 --- a/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java +++ b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java @@ -23,10 +23,11 @@ import com.google.common.base.Joiner; import com.google.common.collect.Iterables; import org.apache.lucene.util.LuceneTestCase.Slow; -import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings.Builder; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -90,9 +91,6 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -/** - * - */ @Slow public class HighlighterSearchTests extends ElasticsearchIntegrationTest { @@ -564,9 +562,10 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testForceSourceWithSourceDisabled() throws Exception { + public void testForceSourceWithSourceDisabledBackcompat() throws Exception { assertAcked(prepareCreate("test") + .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) .addMapping("type1", jsonBuilder().startObject().startObject("type1") .startObject("_source").field("enabled", false).endObject() .startObject("properties") @@ -2180,8 +2179,6 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { public void testPostingsHighlighterMultiMapperWithStore() throws Exception { assertAcked(prepareCreate("test") .addMapping("type1", jsonBuilder().startObject().startObject("type1") - //just to make sure that we hit the stored fields rather than the _source - .startObject("_source").field("enabled", false).endObject() .startObject("properties") .startObject("title").field("type", "multi_field").startObject("fields") .startObject("title").field("type", "string").field("store", "yes").field("index_options", "offsets").field("analyzer", "classic").endObject() @@ -2199,7 +2196,6 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { assertHitCount(searchResponse, 1l); SearchHit hit = searchResponse.getHits().getAt(0); - assertThat(hit.source(), nullValue()); //stopwords are not highlighted since not indexed assertHighlight(hit, "title", 0, 1, equalTo("this is a test .")); diff --git a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java index 8792b2cb6a8..2de5348149d 100644 --- a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java +++ b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java @@ -697,8 +697,9 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { } @Test - public void testNestedInnerHitsWithStoredFieldsAndNoSource() throws Exception { + public void testNestedInnerHitsWithStoredFieldsAndNoSourceBackcompat() throws Exception { assertAcked(prepareCreate("articles") + .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) .addMapping("article", jsonBuilder().startObject() .startObject("_source").field("enabled", false).endObject() .startObject("properties") @@ -735,8 +736,9 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { } @Test - public void testNestedInnerHitsWithHighlightOnStoredField() throws Exception { + public void testNestedInnerHitsWithHighlightOnStoredFieldBackcompat() throws Exception { assertAcked(prepareCreate("articles") + .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) .addMapping("article", jsonBuilder().startObject() .startObject("_source").field("enabled", false).endObject() .startObject("properties") @@ -773,7 +775,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { } @Test - public void testNestedInnerHitsWithExcludeSource() throws Exception { + public void testNestedInnerHitsWithExcludeSourceBackcompat() throws Exception { assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) .addMapping("article", jsonBuilder().startObject() .startObject("_source").field("excludes", new String[]{"comments"}).endObject() @@ -811,7 +813,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { } @Test - public void testNestedInnerHitsHiglightWithExcludeSource() throws Exception { + public void testNestedInnerHitsHiglightWithExcludeSourceBackcompat() throws Exception { assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) .addMapping("article", jsonBuilder().startObject() .startObject("_source").field("excludes", new String[]{"comments"}).endObject() diff --git a/src/test/java/org/elasticsearch/index/query/ItemSerializationTests.java b/src/test/java/org/elasticsearch/search/morelikethis/ItemSerializationTests.java similarity index 96% rename from src/test/java/org/elasticsearch/index/query/ItemSerializationTests.java rename to src/test/java/org/elasticsearch/search/morelikethis/ItemSerializationTests.java index 5bc1b284c78..d1fbb9cf546 100644 --- a/src/test/java/org/elasticsearch/index/query/ItemSerializationTests.java +++ b/src/test/java/org/elasticsearch/search/morelikethis/ItemSerializationTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.query; +package org.elasticsearch.search.morelikethis; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.elasticsearch.action.get.MultiGetRequest; @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.test.ElasticsearchTestCase; @@ -134,7 +135,7 @@ public class ItemSerializationTests extends ElasticsearchTestCase { public void testSimpleItemSerializationFromFile() throws Exception { // test items from JSON List itemsFromJSON = testItemsFromJSON( - copyToStringFromClasspath("/org/elasticsearch/index/query/items.json")); + copyToStringFromClasspath("/org/elasticsearch/search/morelikethis/items.json")); // create builder from items XContentBuilder builder = XContentFactory.jsonBuilder(); diff --git a/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java b/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisTests.java similarity index 71% rename from src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java rename to src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisTests.java index 1530a35f713..db8a88d601d 100644 --- a/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java +++ b/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisTests.java @@ -17,16 +17,13 @@ * under the License. */ -package org.elasticsearch.mlt; +package org.elasticsearch.search.morelikethis; -import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.mlt.MoreLikeThisRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -34,14 +31,11 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; import java.io.IOException; import java.util.ArrayList; -import java.util.Comparator; import java.util.List; import java.util.concurrent.ExecutionException; @@ -58,7 +52,7 @@ import static org.hamcrest.Matchers.notNullValue; /** * */ -public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { +public class MoreLikeThisTests extends ElasticsearchIntegrationTest { @Test public void testSimpleMoreLikeThis() throws Exception { @@ -77,8 +71,9 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { client().admin().indices().refresh(refreshRequest()).actionGet(); logger.info("Running moreLikeThis"); - SearchResponse mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet(); - assertHitCount(mltResponse, 1l); + SearchResponse response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get(); + assertHitCount(response, 1l); } @@ -98,8 +93,9 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { client().admin().indices().refresh(refreshRequest()).actionGet(); logger.info("Running moreLikeThis"); - SearchResponse mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet(); - assertHitCount(mltResponse, 0l); + SearchResponse response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get(); + assertHitCount(response, 0l); } @@ -125,23 +121,27 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { client().admin().indices().refresh(refreshRequest()).actionGet(); logger.info("Running moreLikeThis on index"); - SearchResponse mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet(); - assertHitCount(mltResponse, 2l); + SearchResponse response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get(); + assertHitCount(response, 2l); logger.info("Running moreLikeThis on beta shard"); - mltResponse = client().moreLikeThis(moreLikeThisRequest("beta").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet(); - assertHitCount(mltResponse, 1l); - assertThat(mltResponse.getHits().getAt(0).id(), equalTo("3")); + response = client().prepareSearch("beta").setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get(); + assertHitCount(response, 1l); + assertThat(response.getHits().getAt(0).id(), equalTo("3")); logger.info("Running moreLikeThis on release shard"); - mltResponse = client().moreLikeThis(moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1).searchIndices("release")).actionGet(); - assertHitCount(mltResponse, 1l); - assertThat(mltResponse.getHits().getAt(0).id(), equalTo("2")); + response = client().prepareSearch("release").setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get(); + assertHitCount(response, 1l); + assertThat(response.getHits().getAt(0).id(), equalTo("2")); logger.info("Running moreLikeThis on alias with node client"); - mltResponse = internalCluster().clientNodeClient().moreLikeThis(moreLikeThisRequest("beta").type("type1").id("1").minTermFreq(1).minDocFreq(1)).actionGet(); - assertHitCount(mltResponse, 1l); - assertThat(mltResponse.getHits().getAt(0).id(), equalTo("3")); + response = internalCluster().clientNodeClient().prepareSearch("beta").setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get(); + assertHitCount(response, 1l); + assertThat(response.getHits().getAt(0).id(), equalTo("3")); } @@ -159,12 +159,14 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { client().admin().indices().prepareRefresh("foo").execute().actionGet(); assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); - SearchResponse searchResponse = client().prepareMoreLikeThis("foo", "bar", "1").execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse, notNullValue()); - searchResponse = client.prepareMoreLikeThis("foo", "bar", "1").execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse, notNullValue()); + SearchResponse response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("foo", "bar", "1"))).get(); + assertNoFailures(response); + assertThat(response, notNullValue()); + response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("foo", "bar", "1"))).get(); + assertNoFailures(response); + assertThat(response, notNullValue()); } @Test @@ -183,9 +185,10 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { .execute().actionGet(); client().admin().indices().prepareRefresh("foo").execute().actionGet(); - SearchResponse searchResponse = client().prepareMoreLikeThis("foo", "bar", "1").setRouting("2").execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse, notNullValue()); + SearchResponse response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem((Item) new Item("foo", "bar", "1").routing("2"))).get(); + assertNoFailures(response); + assertThat(response, notNullValue()); } @Test @@ -205,9 +208,10 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { .setRouting("4000") .execute().actionGet(); client().admin().indices().prepareRefresh("foo").execute().actionGet(); - SearchResponse searchResponse = client().prepareMoreLikeThis("foo", "bar", "1").setRouting("4000").execute().actionGet(); - assertNoFailures(searchResponse); - assertThat(searchResponse, notNullValue()); + SearchResponse response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem((Item) new Item("foo", "bar", "1").routing("4000"))).get(); + assertNoFailures(response); + assertThat(response, notNullValue()); } @Test @@ -232,11 +236,13 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { refresh(); // Implicit list of fields -> ignore numeric fields - SearchResponse searchResponse = client().prepareMoreLikeThis("test", "type", "1").setMinDocFreq(1).setMinTermFreq(1).execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type", "1")).minTermFreq(1).minDocFreq(1)).get(); assertHitCount(searchResponse, 1l); // Explicit list of fields including numeric fields -> fail - assertThrows(client().prepareMoreLikeThis("test", "type", "1").setField("string_value", "int_value"), SearchPhaseExecutionException.class); + assertThrows(client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder("string_value", "int_value").addItem(new Item("test", "type", "1")).minTermFreq(1).minDocFreq(1)), SearchPhaseExecutionException.class); // mlt query with no field -> OK searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery().likeText("index").minTermFreq(1).minDocFreq(1)).execute().actionGet(); @@ -292,65 +298,18 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { client().admin().indices().refresh(refreshRequest()).actionGet(); logger.info("Running More Like This with include true"); - SearchResponse mltResponse = client().moreLikeThis( - moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1).include(true).percentTermsToMatch(0)) - .actionGet(); - assertOrderedSearchHits(mltResponse, "1", "2"); + SearchResponse response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1).include(true).percentTermsToMatch(0)).get(); + assertOrderedSearchHits(response, "1", "2"); - mltResponse = client().moreLikeThis( - moreLikeThisRequest("test").type("type1").id("2").minTermFreq(1).minDocFreq(1).include(true).percentTermsToMatch(0)) - .actionGet(); - assertOrderedSearchHits(mltResponse, "2", "1"); + response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "2")).minTermFreq(1).minDocFreq(1).include(true).percentTermsToMatch(0)).get(); + assertOrderedSearchHits(response, "2", "1"); logger.info("Running More Like This with include false"); - mltResponse = client().moreLikeThis( - moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1).percentTermsToMatch(0)) - .actionGet(); - assertSearchHits(mltResponse, "2"); - } - - @Test - public void testMoreLikeThisBodyFromSize() throws Exception { - logger.info("Creating index test"); - assertAcked(prepareCreate("test").addMapping("type1", - jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("text").field("type", "string").endObject() - .endObject().endObject().endObject())); - - logger.info("Running Cluster Health"); - assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); - - logger.info("Indexing..."); - List builders = new ArrayList<>(10); - for (int i = 1; i <= 10; i++) { - builders.add(client().prepareIndex("test", "type1").setSource("text", "lucene").setId(String.valueOf(i))); - } - indexRandom(true, builders); - - logger.info("'size' set but 'search_from' and 'search_size' kept to defaults"); - SearchResponse mltResponse = client().moreLikeThis( - moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1).include(true) - .searchSource(SearchSourceBuilder.searchSource().size(5))) - .actionGet(); - assertSearchResponse(mltResponse); - assertEquals(mltResponse.getHits().hits().length, 5); - - logger.info("'from' set but 'search_from' and 'search_size' kept to defaults"); - mltResponse = client().moreLikeThis( - moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1).include(true) - .searchSource(SearchSourceBuilder.searchSource().from(5))) - .actionGet(); - assertSearchResponse(mltResponse); - assertEquals(mltResponse.getHits().hits().length, 5); - - logger.info("When set, 'search_from' and 'search_size' should override 'from' and 'size'"); - mltResponse = client().moreLikeThis( - moreLikeThisRequest("test").type("type1").id("1").minTermFreq(1).minDocFreq(1).include(true) - .searchSize(10).searchFrom(2) - .searchSource(SearchSourceBuilder.searchSource().size(1).from(1))) - .actionGet(); - assertSearchResponse(mltResponse); - assertEquals(mltResponse.getHits().hits().length, 8); + response = client().prepareSearch().setQuery( + new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1).percentTermsToMatch(0)).get(); + assertSearchHits(response, "2"); } public void testSimpleMoreLikeThisIds() throws Exception { @@ -376,85 +335,6 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { assertHitCount(mltResponse, 3l); } - @Test - public void testCompareMoreLikeThisDSLWithAPI() throws Exception { - logger.info("Creating index test"); - assertAcked(prepareCreate("test").addMapping("type1", - jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("text").field("type", "string").endObject() - .endObject().endObject().endObject())); - - logger.info("Running Cluster Health"); - assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); - - logger.info("Indexing..."); - String[] texts = new String[] { - "Apache Lucene", - "free and open source", - "information retrieval", - "software library", - "programmed in Java", - "Doug Cutting", - "Apache Software Foundation", - "Apache Software License", - "Lucene programming languages", - "Delphi, Perl, C#, C++, Python, Ruby, and PHP" - }; - List builders = new ArrayList<>(10); - for (int i = 0; i < texts.length; i++) { - builders.add(client().prepareIndex("test", "type1").setSource("text", texts[i]).setId(String.valueOf(i))); - } - indexRandom(true, false, builders); - - int iters = between(10, 20); - for (int j = 0; j < iters; j++) { - logger.info("Running MoreLikeThis DSL with IDs"); - String id = String.valueOf(getRandom().nextInt(texts.length)); - Client client = client(); - MoreLikeThisQueryBuilder queryBuilder = QueryBuilders.moreLikeThisQuery("text").ids(id).minTermFreq(1).minDocFreq(1) - .minimumShouldMatch("0%"); - SearchResponse mltResponseDSL = client.prepareSearch() - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setTypes("type1") - .setQuery(queryBuilder) - .setSize(texts.length) - .execute().actionGet(); - assertSearchResponse(mltResponseDSL); - - logger.info("Running MoreLikeThis API"); - MoreLikeThisRequest mltRequest = moreLikeThisRequest("test").type("type1").searchSize(texts.length).id(id).minTermFreq(1).minDocFreq(1) - .minimumShouldMatch("0%"); - SearchResponse mltResponseAPI = client.moreLikeThis(mltRequest).actionGet(); - assertSearchResponse(mltResponseAPI); - - logger.info("Ensure the documents and scores returned are the same."); - SearchHit[] hitsDSL = mltResponseDSL.getHits().hits(); - SearchHit[] hitsAPI = mltResponseAPI.getHits().hits(); - - // we have to resort since the results might come from - // different shards and docIDs that are used for tie-breaking might not be the same on the shards - Comparator cmp = new Comparator() { - - @Override - public int compare(SearchHit o1, SearchHit o2) { - if (Float.compare(o1.getScore(), o2.getScore()) == 0) { - return o1.getId().compareTo(o2.getId()); - } - return Float.compare(o1.getScore(), o2.getScore()); - } - }; - ArrayUtil.timSort(hitsDSL, cmp); - ArrayUtil.timSort(hitsAPI, cmp); - assertThat("Not the same number of results.", hitsAPI.length, equalTo(hitsDSL.length)); - for (int i = 0; i < hitsDSL.length; i++) { - assertThat("Expected id: " + hitsDSL[i].getId() + " at position " + i + " but wasn't.", - hitsAPI[i].getId(), equalTo(hitsDSL[i].getId())); - assertThat("Expected score: " + hitsDSL[i].getScore() + " at position " + i + " but wasn't.", - hitsAPI[i].getScore(), equalTo(hitsDSL[i].getScore())); - } - } - } - @Test public void testSimpleMoreLikeThisIdsMultipleTypes() throws Exception { logger.info("Creating index test"); @@ -519,14 +399,6 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { .setQuery(mltQuery).execute().actionGet(); assertSearchResponse(response); assertHitCount(response, max_query_terms); - - logger.info("Running More Like This API with with max_query_terms = %s returns all docs!", max_query_terms); - response = client().moreLikeThis(moreLikeThisRequest("test").type("type1") - .id("0").fields("text").minTermFreq(1).minDocFreq(1) - .maxQueryTerms(max_query_terms).percentTermsToMatch(0)) - .actionGet(); - assertSearchResponse(response); - assertHitCount(response, values.length); } } diff --git a/src/test/java/org/elasticsearch/index/query/items.json b/src/test/java/org/elasticsearch/search/morelikethis/items.json similarity index 100% rename from src/test/java/org/elasticsearch/index/query/items.json rename to src/test/java/org/elasticsearch/search/morelikethis/items.json diff --git a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java index f95a1422786..57964e950b1 100644 --- a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java +++ b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.simple; +import org.apache.lucene.util.Constants; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; @@ -31,6 +32,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutionException; +import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -151,6 +153,7 @@ public class SimpleSearchTests extends ElasticsearchIntegrationTest { @Test public void localeDependentDateTests() throws Exception { + assumeFalse("Locals are buggy on JDK9EA", Constants.JRE_IS_MINIMUM_JAVA9 && systemPropertyAsBoolean("tests.security.manager", false)); assertAcked(prepareCreate("test") .addMapping("type1", jsonBuilder().startObject() diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 10ad832eef2..67991a896ea 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -99,7 +99,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper.Loading; import org.elasticsearch.index.mapper.internal.SizeFieldMapper; -import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import org.elasticsearch.index.merge.policy.AbstractMergePolicyProvider; import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider; @@ -109,10 +108,9 @@ import org.elasticsearch.index.merge.policy.MergePolicyProvider; import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.merge.scheduler.MergeSchedulerModule; -import org.elasticsearch.index.store.StoreModule; import org.elasticsearch.index.translog.TranslogService; -import org.elasticsearch.index.translog.fs.FsTranslog; -import org.elasticsearch.index.translog.fs.FsTranslogFile; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogFile; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -394,11 +392,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase .field("enabled", randomBoolean()) .endObject(); } - if (randomBoolean()) { - mappings.startObject(SourceFieldMapper.NAME) - .field("compress", randomBoolean()) - .endObject(); - } mappings.startArray("dynamic_templates") .startObject() .startObject("template-strings") @@ -505,7 +498,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } if (random.nextBoolean()) { - builder.put(FsTranslog.INDEX_TRANSLOG_FS_TYPE, RandomPicks.randomFrom(random, FsTranslogFile.Type.values()).name()); + builder.put(Translog.INDEX_TRANSLOG_FS_TYPE, RandomPicks.randomFrom(random, TranslogFile.Type.values()).name()); } if (random.nextBoolean()) { @@ -592,6 +585,9 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase if (random.nextBoolean()) { builder.put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, random.nextBoolean()); } + if (random.nextBoolean()) { + builder.put(Translog.INDEX_TRANSLOG_DURABILITY, RandomPicks.randomFrom(random, Translog.Durabilty.values())); + } return builder; } @@ -648,7 +644,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } ensureClusterSizeConsistency(); ensureClusterStateConsistency(); - cluster().beforeIndexDeletion(); + beforeIndexDeletion(); cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete if (afterClass || currentClusterScope == Scope.TEST) { cluster().close(); @@ -671,6 +667,10 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } } + protected void beforeIndexDeletion() { + cluster().beforeIndexDeletion(); + } + public static TestCluster cluster() { return currentCluster; } @@ -1732,15 +1732,12 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } final String nodePrefix; - final LifecycleScope nodeDirScope; switch (scope) { case TEST: nodePrefix = TEST_CLUSTER_NODE_PREFIX; - nodeDirScope = LifecycleScope.TEST; break; case SUITE: nodePrefix = SUITE_CLUSTER_NODE_PREFIX; - nodeDirScope = LifecycleScope.SUITE; break; default: throw new ElasticsearchException("Scope not supported: " + scope); diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 92797894e38..1ce5b7e3066 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -79,6 +79,8 @@ import org.elasticsearch.index.shard.IndexShardModule; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreModule; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogFile; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; @@ -437,6 +439,15 @@ public final class InternalTestCluster extends TestCluster { builder.put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, random.nextBoolean()); } + if (random.nextBoolean()) { + builder.put(Translog.INDEX_TRANSLOG_FS_TYPE, RandomPicks.randomFrom(random, TranslogFile.Type.values())); + if (random.nextBoolean()) { + builder.put(Translog.INDEX_TRANSLOG_SYNC_INTERVAL, 0); // 0 has special meaning to sync each op + } else { + builder.put(Translog.INDEX_TRANSLOG_SYNC_INTERVAL, RandomInts.randomIntBetween(random, 100, 5000)); + } + } + return builder.build(); } @@ -960,6 +971,11 @@ public final class InternalTestCluster extends TestCluster { @Override public void beforeIndexDeletion() { + // Check that the operations counter on index shard has reached 1. + // The assumption here is that after a test there are no ongoing write operations. + // test that have ongoing write operations after the test (for example because ttl is used + // and not all docs have been purged after the test) and inherit from + // ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures. assertShardIndexCounter(); } diff --git a/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java b/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java index 0adb6380e77..160bf26ce19 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java +++ b/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.test.engine; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.translog.fs.FsTranslog; /** * diff --git a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java index 7c7bd96a31f..ed4dc95795e 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java +++ b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; -import org.elasticsearch.index.translog.fs.FsTranslog; import java.io.IOException;