From 6fc1a229770af82a1424eecca03faf57ccacd607 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 27 Jun 2016 09:55:16 -0400 Subject: [PATCH 01/43] cutover some docs to painless --- docs/java-api/indexed-scripts.asciidoc | 8 +- docs/java-api/query-dsl/script-query.asciidoc | 10 +- .../diversified-sampler-aggregation.asciidoc | 9 +- .../bucket/range-aggregation.asciidoc | 16 +- .../metrics/avg-aggregation.asciidoc | 14 +- .../metrics/cardinality-aggregation.asciidoc | 7 +- .../extendedstats-aggregation.asciidoc | 14 +- .../metrics/max-aggregation.asciidoc | 14 +- .../metrics/min-aggregation.asciidoc | 14 +- .../metrics/percentile-aggregation.asciidoc | 5 +- .../percentile-rank-aggregation.asciidoc | 5 +- .../metrics/stats-aggregation.asciidoc | 14 +- .../metrics/sum-aggregation.asciidoc | 16 +- .../metrics/tophits-aggregation.asciidoc | 5 +- .../metrics/valuecount-aggregation.asciidoc | 11 +- docs/reference/docs/bulk.asciidoc | 2 +- docs/reference/docs/reindex.asciidoc | 3 +- docs/reference/docs/update-by-query.asciidoc | 3 +- docs/reference/docs/update.asciidoc | 12 +- .../mapping/fields/field-names-field.asciidoc | 7 +- .../mapping/fields/id-field.asciidoc | 8 +- .../mapping/fields/index-field.asciidoc | 7 +- .../mapping/fields/parent-field.asciidoc | 7 +- .../mapping/fields/routing-field.asciidoc | 7 +- .../mapping/fields/type-field.asciidoc | 7 +- .../mapping/fields/uid-field.asciidoc | 8 +- docs/reference/mapping/types/boolean.asciidoc | 6 +- .../scripting/painless-syntax.asciidoc | 9 + .../query-dsl/function-score-query.asciidoc | 9 +- .../reference/query-dsl/script-query.asciidoc | 8 +- .../reference/search/request/rescore.asciidoc | 5 +- .../search/request/script-fields.asciidoc | 6 +- docs/reference/search/request/sort.asciidoc | 3 +- .../src/main/antlr/PainlessLexer.g4 | 1 + .../src/main/antlr/PainlessLexer.tokens | 258 ++-- .../src/main/antlr/PainlessParser.g4 | 1 + .../src/main/antlr/PainlessParser.tokens | 258 ++-- .../org/elasticsearch/painless/Locals.java | 6 +- .../painless/SimpleChecksAdapter.java | 60 + .../painless/WriterConstants.java | 2 + .../painless/antlr/PainlessLexer.java | 452 +++--- .../painless/antlr/PainlessParser.java | 1217 +++++++++-------- .../antlr/PainlessParserBaseVisitor.java | 7 + .../painless/antlr/PainlessParserVisitor.java | 7 + .../elasticsearch/painless/antlr/Walker.java | 12 + .../painless/node/SFunction.java | 1 + .../elasticsearch/painless/node/SSource.java | 10 +- .../painless/BasicStatementTests.java | 26 + .../elasticsearch/painless/LambdaTests.java | 3 +- 49 files changed, 1451 insertions(+), 1149 deletions(-) create mode 100644 modules/lang-painless/src/main/java/org/elasticsearch/painless/SimpleChecksAdapter.java diff --git a/docs/java-api/indexed-scripts.asciidoc b/docs/java-api/indexed-scripts.asciidoc index 45d19ae662d..a1259649a77 100644 --- a/docs/java-api/indexed-scripts.asciidoc +++ b/docs/java-api/indexed-scripts.asciidoc @@ -8,20 +8,20 @@ and delete indexed scripts and templates. [source,java] -------------------------------------------------- PutIndexedScriptResponse = client.preparePutIndexedScript() - .setScriptLang("groovy") + .setScriptLang("painless") .setId("script1") .setSource("script", "_score * doc['my_numeric_field'].value") .execute() .actionGet(); GetIndexedScriptResponse = client.prepareGetIndexedScript() - .setScriptLang("groovy") + .setScriptLang("painless") .setId("script1") .execute() .actionGet(); DeleteIndexedScriptResponse = client.prepareDeleteIndexedScript() - .setScriptLang("groovy") + .setScriptLang("painless") .setId("script1") .execute() .actionGet(); @@ -33,4 +33,4 @@ To store templates simply use "mustache" for the scriptLang. The API allows one to set the language of the indexed script being interacted with. If one is not provided the default scripting language -will be used. \ No newline at end of file +will be used. diff --git a/docs/java-api/query-dsl/script-query.asciidoc b/docs/java-api/query-dsl/script-query.asciidoc index 534c803ab08..33786b693d2 100644 --- a/docs/java-api/query-dsl/script-query.asciidoc +++ b/docs/java-api/query-dsl/script-query.asciidoc @@ -12,11 +12,11 @@ QueryBuilder qb = scriptQuery( <1> inlined script -If you have stored on each data node a script named `mygroovyscript.groovy` with: +If you have stored on each data node a script named `myscript.painless` with: -[source,groovy] +[source,js] -------------------------------------------------- -doc['num1'].value > param1 +doc['num1'].value > params.param1 -------------------------------------------------- You can use it then with: @@ -25,9 +25,9 @@ You can use it then with: -------------------------------------------------- QueryBuilder qb = scriptQuery( new Script( - "mygroovyscript", <1> + "myscript", <1> ScriptType.FILE, <2> - "groovy", <3> + "painless", <3> ImmutableMap.of("param1", 5)) <4> ); -------------------------------------------------- diff --git a/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc b/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc index 1a3180962b0..0307840d685 100644 --- a/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/diversified-sampler-aggregation.asciidoc @@ -115,8 +115,11 @@ Controlling diversity using a script: { "aggs" : { "sample" : { - "diverisfied_sampler" : { - "script" : "doc['author'].value + '/' + doc['genre'].value" + "diversified_sampler" : { + "script" : { + "lang" : "painless", + "inline" : "doc['author'].value + '/' + doc['genre'].value" + } } } } @@ -151,4 +154,4 @@ The de-duplication logic in the diversify settings applies only at a shard level ===== No specialized syntax for geo/date fields Currently the syntax for defining the diversifying values is defined by a choice of `field` or `script` - there is no added syntactical sugar for expressing geo or date units such as "1w" (1 week). -This support may be added in a later release and users will currently have to create these sorts of values using a script. \ No newline at end of file +This support may be added in a later release and users will currently have to create these sorts of values using a script. diff --git a/docs/reference/aggregations/bucket/range-aggregation.asciidoc b/docs/reference/aggregations/bucket/range-aggregation.asciidoc index 44760fc728f..efcbd9715e1 100644 --- a/docs/reference/aggregations/bucket/range-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/range-aggregation.asciidoc @@ -134,7 +134,10 @@ It is also possible to customize the key for each range: "aggs" : { "price_ranges" : { "range" : { - "script" : "doc['price'].value", + "script" : { + "lang": "painless", + "inline": "doc['price'].value" + }, "ranges" : [ { "to" : 50 }, { "from" : 50, "to" : 100 }, @@ -146,7 +149,7 @@ It is also possible to customize the key for each range: } -------------------------------------------------- -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- @@ -184,9 +187,12 @@ Lets say the product prices are in USD but we would like to get the price ranges "price_ranges" : { "range" : { "field" : "price", - "script" : "_value * conversion_rate", - "params" : { - "conversion_rate" : 0.8 + "script" : { + "lang": "painless", + "inline": "_value * params.conversion_rate", + "params" : { + "conversion_rate" : 0.8 + } }, "ranges" : [ { "to" : 35 }, diff --git a/docs/reference/aggregations/metrics/avg-aggregation.asciidoc b/docs/reference/aggregations/metrics/avg-aggregation.asciidoc index f81cd3eee33..9967f4c9d9e 100644 --- a/docs/reference/aggregations/metrics/avg-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/avg-aggregation.asciidoc @@ -42,12 +42,19 @@ Computing the average grade based on a script: ..., "aggs" : { - "avg_grade" : { "avg" : { "script" : "doc['grade'].value" } } + "avg_grade" : { + "avg" : { + "script" : { + "inline" : "doc['grade'].value", + "lang" : "painless" + } + } + } } } -------------------------------------------------- -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- @@ -86,7 +93,8 @@ It turned out that the exam was way above the level of the students and a grade "avg" : { "field" : "grade", "script" : { - "inline": "_value * correction", + "lang": "painless", + "inline": "_value * params.correction", "params" : { "correction" : 1.2 } diff --git a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc index 1eb0c08772f..9c4ee59cccf 100644 --- a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc @@ -104,14 +104,17 @@ however since hashes need to be computed on the fly. "aggs" : { "author_count" : { "cardinality" : { - "script": "doc['author.first_name'].value + ' ' + doc['author.last_name'].value" + "script": { + "lang": "painless", + "inline": "doc['author.first_name'].value + ' ' + doc['author.last_name'].value" + } } } } } -------------------------------------------------- -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- diff --git a/docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc b/docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc index 30a5acf6809..0e324089dc7 100644 --- a/docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/extendedstats-aggregation.asciidoc @@ -86,12 +86,19 @@ Computing the grades stats based on a script: ..., "aggs" : { - "grades_stats" : { "extended_stats" : { "script" : "doc['grade'].value" } } + "grades_stats" : { + "extended_stats" : { + "script" : { + "inline" : "doc['grade'].value", + "lang" : "painless" + } + } + } } } -------------------------------------------------- -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- @@ -130,7 +137,8 @@ It turned out that the exam was way above the level of the students and a grade "extended_stats" : { "field" : "grade", "script" : { - "inline": "_value * correction", + "lang" : "painless", + "inline": "_value * params.correction", "params" : { "correction" : 1.2 } diff --git a/docs/reference/aggregations/metrics/max-aggregation.asciidoc b/docs/reference/aggregations/metrics/max-aggregation.asciidoc index 2a641fda5dc..8cfc0bd998e 100644 --- a/docs/reference/aggregations/metrics/max-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/max-aggregation.asciidoc @@ -39,12 +39,19 @@ Computing the max price value across all document, this time using a script: -------------------------------------------------- { "aggs" : { - "max_price" : { "max" : { "script" : "doc['price'].value" } } + "max_price" : { + "max" : { + "script" : { + "inline" : "doc['price'].value", + "lang" : "painless" + } + } + } } } -------------------------------------------------- -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- @@ -78,7 +85,8 @@ Let's say that the prices of the documents in our index are in USD, but we would "max" : { "field" : "price", "script" : { - "inline": "_value * conversion_rate", + "lang": "painless", + "inline": "_value * params.conversion_rate", "params" : { "conversion_rate" : 1.2 } diff --git a/docs/reference/aggregations/metrics/min-aggregation.asciidoc b/docs/reference/aggregations/metrics/min-aggregation.asciidoc index 7698a41202c..819d70343fb 100644 --- a/docs/reference/aggregations/metrics/min-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/min-aggregation.asciidoc @@ -39,12 +39,19 @@ Computing the min price value across all document, this time using a script: -------------------------------------------------- { "aggs" : { - "min_price" : { "min" : { "script" : "doc['price'].value" } } + "min_price" : { + "min" : { + "script" : { + "inline" : "doc['price'].value", + "lang" : "painless" + } + } + } } } -------------------------------------------------- -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- @@ -78,7 +85,8 @@ Let's say that the prices of the documents in our index are in USD, but we would "min" : { "field" : "price", "script" : - "inline": "_value * conversion_rate", + "lang" : "painless", + "inline": "_value * params.conversion_rate", "params" : { "conversion_rate" : 1.2 } diff --git a/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc b/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc index 5357d00461e..dc8c8837344 100644 --- a/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc @@ -101,7 +101,8 @@ a script to convert them on-the-fly: "load_time_outlier" : { "percentiles" : { "script" : { - "inline": "doc['load_time'].value / timeUnit", <1> + "lang": "painless", + "inline": "doc['load_time'].value / params.timeUnit", <1> "params" : { "timeUnit" : 1000 <2> } @@ -115,7 +116,7 @@ a script to convert them on-the-fly: script to generate values which percentiles are calculated on <2> Scripting supports parameterized input just like any other script -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- diff --git a/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc b/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc index 759abb22be5..dcb953ae252 100644 --- a/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc @@ -73,7 +73,8 @@ a script to convert them on-the-fly: "percentile_ranks" : { "values" : [3, 5], "script" : { - "inline": "doc['load_time'].value / timeUnit", <1> + "lang": "painless", + "inline": "doc['load_time'].value / params.timeUnit", <1> "params" : { "timeUnit" : 1000 <2> } @@ -87,7 +88,7 @@ a script to convert them on-the-fly: script to generate values which percentile ranks are calculated on <2> Scripting supports parameterized input just like any other script -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- diff --git a/docs/reference/aggregations/metrics/stats-aggregation.asciidoc b/docs/reference/aggregations/metrics/stats-aggregation.asciidoc index 852c1c3f7a9..a442fb12150 100644 --- a/docs/reference/aggregations/metrics/stats-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/stats-aggregation.asciidoc @@ -48,12 +48,19 @@ Computing the grades stats based on a script: ..., "aggs" : { - "grades_stats" : { "stats" : { "script" : "doc['grade'].value" } } + "grades_stats" : { + "stats" : { + "script" : { + "lang": "painless", + "inline": "doc['grade'].value" + } + } + } } } -------------------------------------------------- -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- @@ -92,7 +99,8 @@ It turned out that the exam was way above the level of the students and a grade "stats" : { "field" : "grade", "script" : - "inline": "_value * correction", + "lang": "painless", + "inline": "_value * params.correction", "params" : { "correction" : 1.2 } diff --git a/docs/reference/aggregations/metrics/sum-aggregation.asciidoc b/docs/reference/aggregations/metrics/sum-aggregation.asciidoc index d55fcd01018..b9aee74b320 100644 --- a/docs/reference/aggregations/metrics/sum-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/sum-aggregation.asciidoc @@ -49,12 +49,19 @@ Computing the intraday return based on a script: ..., "aggs" : { - "intraday_return" : { "sum" : { "script" : "doc['change'].value" } } + "intraday_return" : { + "sum" : { + "script" : { + "lang": "painless", + "inline": "doc['change'].value" + } + } + } } } -------------------------------------------------- -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- @@ -92,7 +99,10 @@ Computing the sum of squares over all stock tick changes: "daytime_return" : { "sum" : { "field" : "change", - "script" : "_value * _value" + "script" : { + "lang": "painless", + "inline": "_value * _value" + } } } } diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index 2bad60ba0ec..1b955d2a898 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -182,7 +182,10 @@ relevancy order of the most relevant document in a bucket. }, "top_hit" : { "max": { - "script": "_score" + "script": { + "lang": "painless", + "inline": "_score" + } } } } diff --git a/docs/reference/aggregations/metrics/valuecount-aggregation.asciidoc b/docs/reference/aggregations/metrics/valuecount-aggregation.asciidoc index fa2bfdbbb9d..925f5426187 100644 --- a/docs/reference/aggregations/metrics/valuecount-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/valuecount-aggregation.asciidoc @@ -43,12 +43,19 @@ Counting the values generated by a script: ..., "aggs" : { - "grades_count" : { "value_count" : { "script" : "doc['grade'].value" } } + "grades_count" : { + "value_count" : { + "script" : { + "inline" : "doc['grade'].value", + "lang" : "painless" + } + } + } } } -------------------------------------------------- -This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a file script use the following syntax: +This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a file script use the following syntax: [source,js] -------------------------------------------------- diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 1fbd0419e2e..c9189b57c01 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -168,7 +168,7 @@ the options. Curl example with update actions: { "update" : {"_id" : "1", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} } { "doc" : {"field" : "value"} } { "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} } -{ "script" : { "inline": "ctx._source.counter += param1", "lang" : "javascript", "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}} +{ "script" : { "inline": "ctx._source.counter += params.param1", "lang" : "painless", "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}} { "update" : {"_id" : "2", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} } { "doc" : {"field" : "value"}, "doc_as_upsert" : true } { "update" : {"_id" : "3", "_type" : "type1", "_index" : "index1", "fields" : ["_source"]} } diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 525552ae85a..aac722e96a3 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -238,7 +238,8 @@ POST _reindex "version_type": "external" }, "script": { - "inline": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}" + "inline": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}", + "lang": "painless" } } -------------------------------------------------- diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 6d6bfe64ecf..56ad1c7cd9a 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -104,7 +104,8 @@ will increment the `likes` field on all of kimchy's tweets: POST twitter/_update_by_query { "script": { - "inline": "ctx._source.likes++" + "inline": "ctx._source.likes++", + "lang": "painless" }, "query": { "term": { diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 85e5bf2f67f..2edb0a71a3f 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -32,7 +32,8 @@ Now, we can execute a script that would increment the counter: -------------------------------------------------- curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{ "script" : { - "inline": "ctx._source.counter += count", + "inline": "ctx._source.counter += params.count", + "lang": "painless", "params" : { "count" : 4 } @@ -47,7 +48,8 @@ will still add it, since its a list): -------------------------------------------------- curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{ "script" : { - "inline": "ctx._source.tags += tag", + "inline": "ctx._source.tags += params.tag", + "lang": "painless", "params" : { "tag" : "blue" } @@ -85,7 +87,8 @@ the doc if the `tags` field contain `blue`, otherwise it does nothing -------------------------------------------------- curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{ "script" : { - "inline": "ctx._source.tags.contains(tag) ? ctx.op = \"delete\" : ctx.op = \"none\"", + "inline": "ctx._source.tags.contains(params.tag) ? ctx.op = \"delete\" : ctx.op = \"none\"", + "lang": "painless", "params" : { "tag" : "blue" } @@ -144,7 +147,8 @@ will be inserted as a new document. If the document does exist, then the -------------------------------------------------- curl -XPOST 'localhost:9200/test/type1/1/_update' -d '{ "script" : { - "inline": "ctx._source.counter += count", + "inline": "ctx._source.counter += params.count", + "lang": "painless", "params" : { "count" : 4 } diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index a56a9081e88..cf8c6398d2e 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -31,7 +31,10 @@ GET my_index/_search }, "script_fields": { "Field names": { - "script": "doc['_field_names']" <2> + "script": { + "lang": "painless", + "inline": "doc['_field_names']" <2> + } } } } @@ -40,4 +43,4 @@ GET my_index/_search // CONSOLE <1> Querying on the `_field_names` field (also see the <> query) -<2> Accessing the `_field_names` field in scripts (inline scripts must be <> for this example to work) +<2> Accessing the `_field_names` field in scripts diff --git a/docs/reference/mapping/fields/id-field.asciidoc b/docs/reference/mapping/fields/id-field.asciidoc index a852f4f4770..f99f1ec9723 100644 --- a/docs/reference/mapping/fields/id-field.asciidoc +++ b/docs/reference/mapping/fields/id-field.asciidoc @@ -33,7 +33,10 @@ GET my_index/_search }, "script_fields": { "UID": { - "script": "doc['_id']" <2> + "script": { + "lang": "painless", + "inline": "doc['_id']" <2> + } } } } @@ -41,5 +44,4 @@ GET my_index/_search // CONSOLE <1> Querying on the `_id` field (also see the <>) -<2> Accessing the `_id` field in scripts (inline scripts must be <> for this example to work) - +<2> Accessing the `_id` field in scripts diff --git a/docs/reference/mapping/fields/index-field.asciidoc b/docs/reference/mapping/fields/index-field.asciidoc index c31b6f1d0f7..599fedba62c 100644 --- a/docs/reference/mapping/fields/index-field.asciidoc +++ b/docs/reference/mapping/fields/index-field.asciidoc @@ -50,7 +50,10 @@ GET index_1,index_2/_search ], "script_fields": { "index_name": { - "script": "doc['_index']" <4> + "script": { + "lang": "painless", + "inline": "doc['_index']" <4> + } } } } @@ -60,4 +63,4 @@ GET index_1,index_2/_search <1> Querying on the `_index` field <2> Aggregating on the `_index` field <3> Sorting on the `_index` field -<4> Accessing the `_index` field in scripts (inline scripts must be <> for this example to work) +<4> Accessing the `_index` field in scripts diff --git a/docs/reference/mapping/fields/parent-field.asciidoc b/docs/reference/mapping/fields/parent-field.asciidoc index 9dd8646172d..30b2e1a2086 100644 --- a/docs/reference/mapping/fields/parent-field.asciidoc +++ b/docs/reference/mapping/fields/parent-field.asciidoc @@ -81,7 +81,10 @@ GET my_index/_search }, "script_fields": { "parent": { - "script": "doc['_parent']" <3> + "script": { + "lang": "painless", + "inline": "doc['_parent']" <3> + } } } } @@ -91,7 +94,7 @@ GET my_index/_search <1> Querying on the `_parent` field (also see the <> and the <>) <2> Aggregating on the `_parent` field (also see the <> aggregation) -<3> Accessing the `_parent` field in scripts (inline scripts must be <> for this example to work) +<3> Accessing the `_parent` field in scripts ==== Parent-child restrictions diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc index 49f269511dc..c8a92de1368 100644 --- a/docs/reference/mapping/fields/routing-field.asciidoc +++ b/docs/reference/mapping/fields/routing-field.asciidoc @@ -42,7 +42,10 @@ GET my_index/_search }, "script_fields": { "Routing value": { - "script": "doc['_routing']" <2> + "script": { + "lang": "painless", + "inline": "doc['_routing']" <2> + } } } } @@ -50,7 +53,7 @@ GET my_index/_search // CONSOLE <1> Querying on the `_routing` field (also see the <>) -<2> Accessing the `_routing` field in scripts (inline scripts must be <> for this example to work) +<2> Accessing the `_routing` field in scripts ==== Searching with custom routing diff --git a/docs/reference/mapping/fields/type-field.asciidoc b/docs/reference/mapping/fields/type-field.asciidoc index 703ee9930d2..fecc0143e94 100644 --- a/docs/reference/mapping/fields/type-field.asciidoc +++ b/docs/reference/mapping/fields/type-field.asciidoc @@ -45,7 +45,10 @@ GET my_index/type_*/_search ], "script_fields": { "type": { - "script": "doc['_type']" <4> + "script": { + "lang": "painless", + "inline": "doc['_type']" <4> + } } } } @@ -56,5 +59,5 @@ GET my_index/type_*/_search <1> Querying on the `_type` field <2> Aggregating on the `_type` field <3> Sorting on the `_type` field -<4> Accessing the `_type` field in scripts (inline scripts must be <> for this example to work) +<4> Accessing the `_type` field in scripts diff --git a/docs/reference/mapping/fields/uid-field.asciidoc b/docs/reference/mapping/fields/uid-field.asciidoc index 43d5751d426..82095160646 100644 --- a/docs/reference/mapping/fields/uid-field.asciidoc +++ b/docs/reference/mapping/fields/uid-field.asciidoc @@ -45,7 +45,10 @@ GET my_index/_search ], "script_fields": { "UID": { - "script": "doc['_uid']" <4> + "script": { + "lang": "painless", + "inline": "doc['_uid']" <4> + } } } } @@ -55,5 +58,4 @@ GET my_index/_search <1> Querying on the `_uid` field (also see the <>) <2> Aggregating on the `_uid` field <3> Sorting on the `_uid` field -<4> Accessing the `_uid` field in scripts (inline scripts must be <> for this example to work) - +<4> Accessing the `_uid` field in scripts diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index 914646a7220..a5f559facc2 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -76,13 +76,15 @@ GET my_index/_search }, "script_fields": { "is_published": { - "script": "doc['is_published'].value" <1> + "script": { + "lang": "painless", + "inline": "doc['is_published'].value" + } } } } -------------------------------------------------- // CONSOLE -<1> Inline scripts must be <> for this example to work. [[boolean-params]] ==== Parameters for `boolean` fields diff --git a/docs/reference/modules/scripting/painless-syntax.asciidoc b/docs/reference/modules/scripting/painless-syntax.asciidoc index 500a276c852..88c2cdb11a9 100644 --- a/docs/reference/modules/scripting/painless-syntax.asciidoc +++ b/docs/reference/modules/scripting/painless-syntax.asciidoc @@ -132,6 +132,15 @@ There are only a few minor differences and add-ons: Java's https://docs.oracle.com/javase/tutorial/java/nutsandbolts/flow.html[control flow statements] are supported, with the exception of the `switch` statement. +In addition to Java's `enhanced for` loop, the `for in` syntax from groovy can also be used: + +[source,js] +--------------------------------------------------------- +for (item : list) { + ... +} +--------------------------------------------------------- + [float] [[painless-functions]] === Functions diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index e7e4b8f5877..b6e4dedbc88 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -124,7 +124,10 @@ simple sample: [source,js] -------------------------------------------------- "script_score" : { - "script" : "_score * doc['my_numeric_field'].value" + "script" : { + "lang": "painless", + "inline": "_score * doc['my_numeric_field'].value" + } } -------------------------------------------------- @@ -140,12 +143,12 @@ script, and provide parameters to it: -------------------------------------------------- "script_score": { "script": { - "lang": "lang", + "lang": "painless", "params": { "param1": value1, "param2": value2 }, - "inline": "_score * doc['my_numeric_field'].value / pow(param1, param2)" + "inline": "_score * doc['my_numeric_field'].value / Math.pow(params.param1, params.param2)" } } -------------------------------------------------- diff --git a/docs/reference/query-dsl/script-query.asciidoc b/docs/reference/query-dsl/script-query.asciidoc index ee06a1b64bd..db82375abe0 100644 --- a/docs/reference/query-dsl/script-query.asciidoc +++ b/docs/reference/query-dsl/script-query.asciidoc @@ -13,7 +13,10 @@ GET /_search "bool" : { "must" : { "script" : { - "script" : "doc['num1'].value > 1" + "script" : { + "inline": "doc['num1'].value > 1", + "lang": "painless" + } } } } @@ -38,7 +41,8 @@ GET /_search "must" : { "script" : { "script" : { - "inline" : "doc['num1'].value > param1", + "inline" : "doc['num1'].value > params.param1", + "lang" : "painless", "params" : { "param1" : 5 } diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc index 8bab1cf1f5c..488884ef8da 100644 --- a/docs/reference/search/request/rescore.asciidoc +++ b/docs/reference/search/request/rescore.asciidoc @@ -118,7 +118,10 @@ curl -s -XPOST 'localhost:9200/_search' -d '{ "rescore_query" : { "function_score" : { "script_score": { - "script": "log10(doc['numeric'].value + 2)" + "script": { + "lang": "painless", + "inline": "Math.log10(doc['numeric'].value + 2)" + } } } } diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index 6e054f02e1c..b544c79e4f2 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -13,10 +13,14 @@ GET /_search }, "script_fields" : { "test1" : { - "script" : "doc['my_field_name'].value * 2" + "script" : { + "lang": "painless", + "inline": "doc['my_field_name'].value * 2" + } }, "test2" : { "script" : { + "lang": "painless", "inline": "doc['my_field_name'].value * factor", "params" : { "factor" : 2.0 diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index fd3dbffc1d6..d0a50f0ebfd 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -378,7 +378,8 @@ GET /_search "_script" : { "type" : "number", "script" : { - "inline": "doc['field_name'].value * factor", + "lang": "painless", + "inline": "doc['field_name'].value * params.factor", "params" : { "factor" : 1.1 } diff --git a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 index 027b1b370d1..a2e7b921300 100644 --- a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 @@ -40,6 +40,7 @@ DOT: '.' -> mode(AFTER_DOT); COMMA: ','; SEMICOLON: ';'; IF: 'if'; +IN: 'in'; ELSE: 'else'; WHILE: 'while'; DO: 'do'; diff --git a/modules/lang-painless/src/main/antlr/PainlessLexer.tokens b/modules/lang-painless/src/main/antlr/PainlessLexer.tokens index 8bb8c7e4448..dc62fe36b28 100644 --- a/modules/lang-painless/src/main/antlr/PainlessLexer.tokens +++ b/modules/lang-painless/src/main/antlr/PainlessLexer.tokens @@ -10,75 +10,76 @@ DOT=9 COMMA=10 SEMICOLON=11 IF=12 -ELSE=13 -WHILE=14 -DO=15 -FOR=16 -CONTINUE=17 -BREAK=18 -RETURN=19 -NEW=20 -TRY=21 -CATCH=22 -THROW=23 -THIS=24 -INSTANCEOF=25 -BOOLNOT=26 -BWNOT=27 -MUL=28 -DIV=29 -REM=30 -ADD=31 -SUB=32 -LSH=33 -RSH=34 -USH=35 -LT=36 -LTE=37 -GT=38 -GTE=39 -EQ=40 -EQR=41 -NE=42 -NER=43 -BWAND=44 -XOR=45 -BWOR=46 -BOOLAND=47 -BOOLOR=48 -COND=49 -COLON=50 -REF=51 -ARROW=52 -FIND=53 -MATCH=54 -INCR=55 -DECR=56 -ASSIGN=57 -AADD=58 -ASUB=59 -AMUL=60 -ADIV=61 -AREM=62 -AAND=63 -AXOR=64 -AOR=65 -ALSH=66 -ARSH=67 -AUSH=68 -OCTAL=69 -HEX=70 -INTEGER=71 -DECIMAL=72 -STRING=73 -REGEX=74 -TRUE=75 -FALSE=76 -NULL=77 -TYPE=78 -ID=79 -DOTINTEGER=80 -DOTID=81 +IN=13 +ELSE=14 +WHILE=15 +DO=16 +FOR=17 +CONTINUE=18 +BREAK=19 +RETURN=20 +NEW=21 +TRY=22 +CATCH=23 +THROW=24 +THIS=25 +INSTANCEOF=26 +BOOLNOT=27 +BWNOT=28 +MUL=29 +DIV=30 +REM=31 +ADD=32 +SUB=33 +LSH=34 +RSH=35 +USH=36 +LT=37 +LTE=38 +GT=39 +GTE=40 +EQ=41 +EQR=42 +NE=43 +NER=44 +BWAND=45 +XOR=46 +BWOR=47 +BOOLAND=48 +BOOLOR=49 +COND=50 +COLON=51 +REF=52 +ARROW=53 +FIND=54 +MATCH=55 +INCR=56 +DECR=57 +ASSIGN=58 +AADD=59 +ASUB=60 +AMUL=61 +ADIV=62 +AREM=63 +AAND=64 +AXOR=65 +AOR=66 +ALSH=67 +ARSH=68 +AUSH=69 +OCTAL=70 +HEX=71 +INTEGER=72 +DECIMAL=73 +STRING=74 +REGEX=75 +TRUE=76 +FALSE=77 +NULL=78 +TYPE=79 +ID=80 +DOTINTEGER=81 +DOTID=82 '{'=3 '}'=4 '['=5 @@ -89,62 +90,63 @@ DOTID=81 ','=10 ';'=11 'if'=12 -'else'=13 -'while'=14 -'do'=15 -'for'=16 -'continue'=17 -'break'=18 -'return'=19 -'new'=20 -'try'=21 -'catch'=22 -'throw'=23 -'this'=24 -'instanceof'=25 -'!'=26 -'~'=27 -'*'=28 -'/'=29 -'%'=30 -'+'=31 -'-'=32 -'<<'=33 -'>>'=34 -'>>>'=35 -'<'=36 -'<='=37 -'>'=38 -'>='=39 -'=='=40 -'==='=41 -'!='=42 -'!=='=43 -'&'=44 -'^'=45 -'|'=46 -'&&'=47 -'||'=48 -'?'=49 -':'=50 -'::'=51 -'->'=52 -'=~'=53 -'==~'=54 -'++'=55 -'--'=56 -'='=57 -'+='=58 -'-='=59 -'*='=60 -'/='=61 -'%='=62 -'&='=63 -'^='=64 -'|='=65 -'<<='=66 -'>>='=67 -'>>>='=68 -'true'=75 -'false'=76 -'null'=77 +'in'=13 +'else'=14 +'while'=15 +'do'=16 +'for'=17 +'continue'=18 +'break'=19 +'return'=20 +'new'=21 +'try'=22 +'catch'=23 +'throw'=24 +'this'=25 +'instanceof'=26 +'!'=27 +'~'=28 +'*'=29 +'/'=30 +'%'=31 +'+'=32 +'-'=33 +'<<'=34 +'>>'=35 +'>>>'=36 +'<'=37 +'<='=38 +'>'=39 +'>='=40 +'=='=41 +'==='=42 +'!='=43 +'!=='=44 +'&'=45 +'^'=46 +'|'=47 +'&&'=48 +'||'=49 +'?'=50 +':'=51 +'::'=52 +'->'=53 +'=~'=54 +'==~'=55 +'++'=56 +'--'=57 +'='=58 +'+='=59 +'-='=60 +'*='=61 +'/='=62 +'%='=63 +'&='=64 +'^='=65 +'|='=66 +'<<='=67 +'>>='=68 +'>>>='=69 +'true'=76 +'false'=77 +'null'=78 diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.g4 b/modules/lang-painless/src/main/antlr/PainlessParser.g4 index 7e28b0ad258..b102734a4f4 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessParser.g4 @@ -42,6 +42,7 @@ statement | DO block WHILE LP expression RP delimiter # do | FOR LP initializer? SEMICOLON expression? SEMICOLON afterthought? RP ( trailer | empty ) # for | FOR LP decltype ID COLON expression RP trailer # each + | FOR LP ID IN expression RP trailer # ineach | declaration delimiter # decl | CONTINUE delimiter # continue | BREAK delimiter # break diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.tokens b/modules/lang-painless/src/main/antlr/PainlessParser.tokens index 8bb8c7e4448..dc62fe36b28 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.tokens +++ b/modules/lang-painless/src/main/antlr/PainlessParser.tokens @@ -10,75 +10,76 @@ DOT=9 COMMA=10 SEMICOLON=11 IF=12 -ELSE=13 -WHILE=14 -DO=15 -FOR=16 -CONTINUE=17 -BREAK=18 -RETURN=19 -NEW=20 -TRY=21 -CATCH=22 -THROW=23 -THIS=24 -INSTANCEOF=25 -BOOLNOT=26 -BWNOT=27 -MUL=28 -DIV=29 -REM=30 -ADD=31 -SUB=32 -LSH=33 -RSH=34 -USH=35 -LT=36 -LTE=37 -GT=38 -GTE=39 -EQ=40 -EQR=41 -NE=42 -NER=43 -BWAND=44 -XOR=45 -BWOR=46 -BOOLAND=47 -BOOLOR=48 -COND=49 -COLON=50 -REF=51 -ARROW=52 -FIND=53 -MATCH=54 -INCR=55 -DECR=56 -ASSIGN=57 -AADD=58 -ASUB=59 -AMUL=60 -ADIV=61 -AREM=62 -AAND=63 -AXOR=64 -AOR=65 -ALSH=66 -ARSH=67 -AUSH=68 -OCTAL=69 -HEX=70 -INTEGER=71 -DECIMAL=72 -STRING=73 -REGEX=74 -TRUE=75 -FALSE=76 -NULL=77 -TYPE=78 -ID=79 -DOTINTEGER=80 -DOTID=81 +IN=13 +ELSE=14 +WHILE=15 +DO=16 +FOR=17 +CONTINUE=18 +BREAK=19 +RETURN=20 +NEW=21 +TRY=22 +CATCH=23 +THROW=24 +THIS=25 +INSTANCEOF=26 +BOOLNOT=27 +BWNOT=28 +MUL=29 +DIV=30 +REM=31 +ADD=32 +SUB=33 +LSH=34 +RSH=35 +USH=36 +LT=37 +LTE=38 +GT=39 +GTE=40 +EQ=41 +EQR=42 +NE=43 +NER=44 +BWAND=45 +XOR=46 +BWOR=47 +BOOLAND=48 +BOOLOR=49 +COND=50 +COLON=51 +REF=52 +ARROW=53 +FIND=54 +MATCH=55 +INCR=56 +DECR=57 +ASSIGN=58 +AADD=59 +ASUB=60 +AMUL=61 +ADIV=62 +AREM=63 +AAND=64 +AXOR=65 +AOR=66 +ALSH=67 +ARSH=68 +AUSH=69 +OCTAL=70 +HEX=71 +INTEGER=72 +DECIMAL=73 +STRING=74 +REGEX=75 +TRUE=76 +FALSE=77 +NULL=78 +TYPE=79 +ID=80 +DOTINTEGER=81 +DOTID=82 '{'=3 '}'=4 '['=5 @@ -89,62 +90,63 @@ DOTID=81 ','=10 ';'=11 'if'=12 -'else'=13 -'while'=14 -'do'=15 -'for'=16 -'continue'=17 -'break'=18 -'return'=19 -'new'=20 -'try'=21 -'catch'=22 -'throw'=23 -'this'=24 -'instanceof'=25 -'!'=26 -'~'=27 -'*'=28 -'/'=29 -'%'=30 -'+'=31 -'-'=32 -'<<'=33 -'>>'=34 -'>>>'=35 -'<'=36 -'<='=37 -'>'=38 -'>='=39 -'=='=40 -'==='=41 -'!='=42 -'!=='=43 -'&'=44 -'^'=45 -'|'=46 -'&&'=47 -'||'=48 -'?'=49 -':'=50 -'::'=51 -'->'=52 -'=~'=53 -'==~'=54 -'++'=55 -'--'=56 -'='=57 -'+='=58 -'-='=59 -'*='=60 -'/='=61 -'%='=62 -'&='=63 -'^='=64 -'|='=65 -'<<='=66 -'>>='=67 -'>>>='=68 -'true'=75 -'false'=76 -'null'=77 +'in'=13 +'else'=14 +'while'=15 +'do'=16 +'for'=17 +'continue'=18 +'break'=19 +'return'=20 +'new'=21 +'try'=22 +'catch'=23 +'throw'=24 +'this'=25 +'instanceof'=26 +'!'=27 +'~'=28 +'*'=29 +'/'=30 +'%'=31 +'+'=32 +'-'=33 +'<<'=34 +'>>'=35 +'>>>'=36 +'<'=37 +'<='=38 +'>'=39 +'>='=40 +'=='=41 +'==='=42 +'!='=43 +'!=='=44 +'&'=45 +'^'=46 +'|'=47 +'&&'=48 +'||'=49 +'?'=50 +':'=51 +'::'=52 +'->'=53 +'=~'=54 +'==~'=55 +'++'=56 +'--'=57 +'='=58 +'+='=59 +'-='=60 +'*='=61 +'/='=62 +'%='=63 +'&='=64 +'^='=65 +'|='=66 +'<<='=67 +'>>='=68 +'>>>='=69 +'true'=76 +'false'=77 +'null'=78 diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java index ae54b6e6271..b02ea085904 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Locals.java @@ -74,7 +74,11 @@ public final class Locals { Locals locals = new Locals(programScope, returnType); for (int i = 0; i < parameters.size(); i++) { Parameter parameter = parameters.get(i); - boolean isCapture = i < captureCount; + // TODO: allow non-captures to be r/w: + // boolean isCapture = i < captureCount; + // currently, this cannot be allowed, as we swap in real types, + // but that can prevent a store of a different type... + boolean isCapture = true; locals.addVariable(parameter.location, parameter.type, parameter.name, isCapture); } // Loop counter to catch infinite loops. Internal use only. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/SimpleChecksAdapter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/SimpleChecksAdapter.java new file mode 100644 index 00000000000..aa6d121945b --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/SimpleChecksAdapter.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.objectweb.asm.ClassVisitor; +import org.objectweb.asm.Label; +import org.objectweb.asm.MethodVisitor; +import org.objectweb.asm.Opcodes; +import org.objectweb.asm.util.CheckClassAdapter; +import org.objectweb.asm.util.CheckMethodAdapter; + +import java.util.HashMap; + +/** + * A CheckClassAdapter that does not use setAccessible to try to access private fields of Label! + *

+ * This means jump insns are not checked, but we still get all the other checking. + */ +// TODO: we should really try to get this fixed in ASM! +public class SimpleChecksAdapter extends CheckClassAdapter { + + public SimpleChecksAdapter(ClassVisitor cv) { + super(WriterConstants.ASM_VERSION, cv, false); + } + + @Override + public MethodVisitor visitMethod(int access, String name, String desc, String signature, String[] exceptions) { + MethodVisitor in = cv.visitMethod(access, name, desc, signature, exceptions); + CheckMethodAdapter checker = new CheckMethodAdapter(WriterConstants.ASM_VERSION, in, new HashMap()) { + @Override + public void visitJumpInsn(int opcode, Label label) { + mv.visitJumpInsn(opcode, label); + } + + @Override + public void visitTryCatchBlock(Label start, Label end, Label handler, String type) { + mv.visitTryCatchBlock(start, end, handler, type); + } + }; + checker.version = WriterConstants.CLASS_VERSION; + return checker; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index e2bf804c181..ca549522018 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -43,6 +43,8 @@ import java.util.regex.Pattern; */ public final class WriterConstants { + public final static int CLASS_VERSION = Opcodes.V1_8; + public final static int ASM_VERSION = Opcodes.ASM5; public final static String BASE_CLASS_NAME = Executable.class.getName(); public final static Type BASE_CLASS_TYPE = Type.getType(Executable.class); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java index 6d791979243..eae7150b9be 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java @@ -21,15 +21,16 @@ class PainlessLexer extends Lexer { new PredictionContextCache(); public static final int WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, - COMMA=10, SEMICOLON=11, IF=12, ELSE=13, WHILE=14, DO=15, FOR=16, CONTINUE=17, - BREAK=18, RETURN=19, NEW=20, TRY=21, CATCH=22, THROW=23, THIS=24, INSTANCEOF=25, - BOOLNOT=26, BWNOT=27, MUL=28, DIV=29, REM=30, ADD=31, SUB=32, LSH=33, - RSH=34, USH=35, LT=36, LTE=37, GT=38, GTE=39, EQ=40, EQR=41, NE=42, NER=43, - BWAND=44, XOR=45, BWOR=46, BOOLAND=47, BOOLOR=48, COND=49, COLON=50, REF=51, - ARROW=52, FIND=53, MATCH=54, INCR=55, DECR=56, ASSIGN=57, AADD=58, ASUB=59, - AMUL=60, ADIV=61, AREM=62, AAND=63, AXOR=64, AOR=65, ALSH=66, ARSH=67, - AUSH=68, OCTAL=69, HEX=70, INTEGER=71, DECIMAL=72, STRING=73, REGEX=74, - TRUE=75, FALSE=76, NULL=77, TYPE=78, ID=79, DOTINTEGER=80, DOTID=81; + COMMA=10, SEMICOLON=11, IF=12, IN=13, ELSE=14, WHILE=15, DO=16, FOR=17, + CONTINUE=18, BREAK=19, RETURN=20, NEW=21, TRY=22, CATCH=23, THROW=24, + THIS=25, INSTANCEOF=26, BOOLNOT=27, BWNOT=28, MUL=29, DIV=30, REM=31, + ADD=32, SUB=33, LSH=34, RSH=35, USH=36, LT=37, LTE=38, GT=39, GTE=40, + EQ=41, EQR=42, NE=43, NER=44, BWAND=45, XOR=46, BWOR=47, BOOLAND=48, BOOLOR=49, + COND=50, COLON=51, REF=52, ARROW=53, FIND=54, MATCH=55, INCR=56, DECR=57, + ASSIGN=58, AADD=59, ASUB=60, AMUL=61, ADIV=62, AREM=63, AAND=64, AXOR=65, + AOR=66, ALSH=67, ARSH=68, AUSH=69, OCTAL=70, HEX=71, INTEGER=72, DECIMAL=73, + STRING=74, REGEX=75, TRUE=76, FALSE=77, NULL=78, TYPE=79, ID=80, DOTINTEGER=81, + DOTID=82; public static final int AFTER_DOT = 1; public static String[] modeNames = { "DEFAULT_MODE", "AFTER_DOT" @@ -37,7 +38,7 @@ class PainlessLexer extends Lexer { public static final String[] ruleNames = { "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT", - "COMMA", "SEMICOLON", "IF", "ELSE", "WHILE", "DO", "FOR", "CONTINUE", + "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", "XOR", "BWOR", @@ -50,25 +51,25 @@ class PainlessLexer extends Lexer { private static final String[] _LITERAL_NAMES = { null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "','", - "';'", "'if'", "'else'", "'while'", "'do'", "'for'", "'continue'", "'break'", - "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", "'instanceof'", - "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", "'>>'", "'>>>'", - "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", "'!=='", "'&'", - "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'::'", "'->'", "'=~'", "'==~'", - "'++'", "'--'", "'='", "'+='", "'-='", "'*='", "'/='", "'%='", "'&='", - "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, null, null, null, - null, "'true'", "'false'", "'null'" + "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'::'", "'->'", + "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", "'/='", + "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, + null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", - "DOT", "COMMA", "SEMICOLON", "IF", "ELSE", "WHILE", "DO", "FOR", "CONTINUE", - "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", "INSTANCEOF", - "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", - "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", "XOR", "BWOR", - "BOOLAND", "BOOLOR", "COND", "COLON", "REF", "ARROW", "FIND", "MATCH", - "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", "AREM", "AAND", - "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", "INTEGER", "DECIMAL", - "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", + "DOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", + "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", + "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", + "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", + "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "REF", "ARROW", "FIND", + "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", "AREM", + "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", "INTEGER", + "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -128,11 +129,11 @@ class PainlessLexer extends Lexer { @Override public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { switch (ruleIndex) { - case 28: + case 29: return DIV_sempred((RuleContext)_localctx, predIndex); - case 73: + case 74: return REGEX_sempred((RuleContext)_localctx, predIndex); - case 77: + case 78: return TYPE_sempred((RuleContext)_localctx, predIndex); } return true; @@ -160,7 +161,7 @@ class PainlessLexer extends Lexer { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2S\u0246\b\1\b\1\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2T\u024b\b\1\b\1\4"+ "\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n"+ "\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22"+ "\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31"+ @@ -169,201 +170,202 @@ class PainlessLexer extends Lexer { "+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64"+ "\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t"+ "=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4"+ - "I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\3\2\6\2\u00a8"+ - "\n\2\r\2\16\2\u00a9\3\2\3\2\3\3\3\3\3\3\3\3\7\3\u00b2\n\3\f\3\16\3\u00b5"+ - "\13\3\3\3\3\3\3\3\3\3\3\3\7\3\u00bc\n\3\f\3\16\3\u00bf\13\3\3\3\3\3\5"+ - "\3\u00c3\n\3\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3"+ - "\n\3\n\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16"+ - "\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\22"+ - "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23"+ - "\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\26\3\26\3\26"+ - "\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\31"+ - "\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32"+ - "\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\36\3\37\3\37\3 \3 \3!"+ - "\3!\3\"\3\"\3\"\3#\3#\3#\3$\3$\3$\3$\3%\3%\3&\3&\3&\3\'\3\'\3(\3(\3(\3"+ - ")\3)\3)\3*\3*\3*\3*\3+\3+\3+\3,\3,\3,\3,\3-\3-\3.\3.\3/\3/\3\60\3\60\3"+ - "\60\3\61\3\61\3\61\3\62\3\62\3\63\3\63\3\64\3\64\3\64\3\65\3\65\3\65\3"+ - "\66\3\66\3\66\3\67\3\67\3\67\3\67\38\38\38\39\39\39\3:\3:\3;\3;\3;\3<"+ - "\3<\3<\3=\3=\3=\3>\3>\3>\3?\3?\3?\3@\3@\3@\3A\3A\3A\3B\3B\3B\3C\3C\3C"+ - "\3C\3D\3D\3D\3D\3E\3E\3E\3E\3E\3F\3F\6F\u01a7\nF\rF\16F\u01a8\3F\5F\u01ac"+ - "\nF\3G\3G\3G\6G\u01b1\nG\rG\16G\u01b2\3G\5G\u01b6\nG\3H\3H\3H\7H\u01bb"+ - "\nH\fH\16H\u01be\13H\5H\u01c0\nH\3H\5H\u01c3\nH\3I\3I\3I\7I\u01c8\nI\f"+ - "I\16I\u01cb\13I\5I\u01cd\nI\3I\3I\6I\u01d1\nI\rI\16I\u01d2\5I\u01d5\n"+ - "I\3I\3I\5I\u01d9\nI\3I\6I\u01dc\nI\rI\16I\u01dd\5I\u01e0\nI\3I\5I\u01e3"+ - "\nI\3J\3J\3J\3J\3J\3J\7J\u01eb\nJ\fJ\16J\u01ee\13J\3J\3J\3J\3J\3J\3J\3"+ - "J\7J\u01f7\nJ\fJ\16J\u01fa\13J\3J\5J\u01fd\nJ\3K\3K\3K\3K\6K\u0203\nK"+ - "\rK\16K\u0204\3K\3K\7K\u0209\nK\fK\16K\u020c\13K\3K\3K\3L\3L\3L\3L\3L"+ - "\3M\3M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3O\3O\3O\3O\7O\u0224\nO\fO\16O\u0227"+ - "\13O\3O\3O\3P\3P\7P\u022d\nP\fP\16P\u0230\13P\3Q\3Q\3Q\7Q\u0235\nQ\fQ"+ - "\16Q\u0238\13Q\5Q\u023a\nQ\3Q\3Q\3R\3R\7R\u0240\nR\fR\16R\u0243\13R\3"+ - "R\3R\6\u00b3\u00bd\u01ec\u01f8\2S\4\3\6\4\b\5\n\6\f\7\16\b\20\t\22\n\24"+ - "\13\26\f\30\r\32\16\34\17\36\20 \21\"\22$\23&\24(\25*\26,\27.\30\60\31"+ - "\62\32\64\33\66\348\35:\36<\37> @!B\"D#F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60"+ - "`\61b\62d\63f\64h\65j\66l\67n8p9r:t;v|?~@\u0080A\u0082B\u0084C\u0086"+ - "D\u0088E\u008aF\u008cG\u008eH\u0090I\u0092J\u0094K\u0096L\u0098M\u009a"+ - "N\u009cO\u009eP\u00a0Q\u00a2R\u00a4S\4\2\3\24\5\2\13\f\17\17\"\"\4\2\f"+ - "\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5\2\62;CHch\3\2\63;\3\2\62;\b\2FFHHN"+ - "Nffhhnn\4\2GGgg\4\2--//\4\2HHhh\4\2$$^^\4\2\f\f\61\61\3\2\f\f\t\2WWee"+ - "kknouuwwzz\5\2C\\aac|\6\2\62;C\\aac|\u0266\2\4\3\2\2\2\2\6\3\2\2\2\2\b"+ - "\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16\3\2\2\2\2\20\3\2\2\2\2\22\3\2\2"+ - "\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2\2\2\2\32\3\2\2\2\2\34\3\2\2\2\2"+ - "\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3\2\2\2\2(\3\2\2\2\2"+ - "*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2\62\3\2\2\2\2\64\3\2\2\2"+ - "\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2<\3\2\2\2\2>\3\2\2\2\2@\3\2\2\2\2"+ - "B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2\2\2L\3\2\2\2\2N\3"+ - "\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2\2X\3\2\2\2\2Z\3\2\2"+ - "\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2\2b\3\2\2\2\2d\3\2\2\2\2f\3\2\2\2"+ - "\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2\2\2r\3\2\2\2\2t"+ - "\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2~\3\2\2\2\2\u0080"+ - "\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2\2\2\2\u0086\3\2\2\2\2\u0088\3\2\2"+ - "\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2\u008e\3\2\2\2\2\u0090\3\2\2\2\2\u0092"+ - "\3\2\2\2\2\u0094\3\2\2\2\2\u0096\3\2\2\2\2\u0098\3\2\2\2\2\u009a\3\2\2"+ - "\2\2\u009c\3\2\2\2\2\u009e\3\2\2\2\2\u00a0\3\2\2\2\3\u00a2\3\2\2\2\3\u00a4"+ - "\3\2\2\2\4\u00a7\3\2\2\2\6\u00c2\3\2\2\2\b\u00c6\3\2\2\2\n\u00c8\3\2\2"+ - "\2\f\u00ca\3\2\2\2\16\u00cc\3\2\2\2\20\u00ce\3\2\2\2\22\u00d0\3\2\2\2"+ - "\24\u00d2\3\2\2\2\26\u00d6\3\2\2\2\30\u00d8\3\2\2\2\32\u00da\3\2\2\2\34"+ - "\u00dd\3\2\2\2\36\u00e2\3\2\2\2 \u00e8\3\2\2\2\"\u00eb\3\2\2\2$\u00ef"+ - "\3\2\2\2&\u00f8\3\2\2\2(\u00fe\3\2\2\2*\u0105\3\2\2\2,\u0109\3\2\2\2."+ - "\u010d\3\2\2\2\60\u0113\3\2\2\2\62\u0119\3\2\2\2\64\u011e\3\2\2\2\66\u0129"+ - "\3\2\2\28\u012b\3\2\2\2:\u012d\3\2\2\2<\u012f\3\2\2\2>\u0132\3\2\2\2@"+ - "\u0134\3\2\2\2B\u0136\3\2\2\2D\u0138\3\2\2\2F\u013b\3\2\2\2H\u013e\3\2"+ - "\2\2J\u0142\3\2\2\2L\u0144\3\2\2\2N\u0147\3\2\2\2P\u0149\3\2\2\2R\u014c"+ - "\3\2\2\2T\u014f\3\2\2\2V\u0153\3\2\2\2X\u0156\3\2\2\2Z\u015a\3\2\2\2\\"+ - "\u015c\3\2\2\2^\u015e\3\2\2\2`\u0160\3\2\2\2b\u0163\3\2\2\2d\u0166\3\2"+ - "\2\2f\u0168\3\2\2\2h\u016a\3\2\2\2j\u016d\3\2\2\2l\u0170\3\2\2\2n\u0173"+ - "\3\2\2\2p\u0177\3\2\2\2r\u017a\3\2\2\2t\u017d\3\2\2\2v\u017f\3\2\2\2x"+ - "\u0182\3\2\2\2z\u0185\3\2\2\2|\u0188\3\2\2\2~\u018b\3\2\2\2\u0080\u018e"+ - "\3\2\2\2\u0082\u0191\3\2\2\2\u0084\u0194\3\2\2\2\u0086\u0197\3\2\2\2\u0088"+ - "\u019b\3\2\2\2\u008a\u019f\3\2\2\2\u008c\u01a4\3\2\2\2\u008e\u01ad\3\2"+ - "\2\2\u0090\u01bf\3\2\2\2\u0092\u01cc\3\2\2\2\u0094\u01fc\3\2\2\2\u0096"+ - "\u01fe\3\2\2\2\u0098\u020f\3\2\2\2\u009a\u0214\3\2\2\2\u009c\u021a\3\2"+ - "\2\2\u009e\u021f\3\2\2\2\u00a0\u022a\3\2\2\2\u00a2\u0239\3\2\2\2\u00a4"+ - "\u023d\3\2\2\2\u00a6\u00a8\t\2\2\2\u00a7\u00a6\3\2\2\2\u00a8\u00a9\3\2"+ - "\2\2\u00a9\u00a7\3\2\2\2\u00a9\u00aa\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab"+ - "\u00ac\b\2\2\2\u00ac\5\3\2\2\2\u00ad\u00ae\7\61\2\2\u00ae\u00af\7\61\2"+ - "\2\u00af\u00b3\3\2\2\2\u00b0\u00b2\13\2\2\2\u00b1\u00b0\3\2\2\2\u00b2"+ - "\u00b5\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b3\u00b1\3\2\2\2\u00b4\u00b6\3\2"+ - "\2\2\u00b5\u00b3\3\2\2\2\u00b6\u00c3\t\3\2\2\u00b7\u00b8\7\61\2\2\u00b8"+ - "\u00b9\7,\2\2\u00b9\u00bd\3\2\2\2\u00ba\u00bc\13\2\2\2\u00bb\u00ba\3\2"+ - "\2\2\u00bc\u00bf\3\2\2\2\u00bd\u00be\3\2\2\2\u00bd\u00bb\3\2\2\2\u00be"+ - "\u00c0\3\2\2\2\u00bf\u00bd\3\2\2\2\u00c0\u00c1\7,\2\2\u00c1\u00c3\7\61"+ - "\2\2\u00c2\u00ad\3\2\2\2\u00c2\u00b7\3\2\2\2\u00c3\u00c4\3\2\2\2\u00c4"+ - "\u00c5\b\3\2\2\u00c5\7\3\2\2\2\u00c6\u00c7\7}\2\2\u00c7\t\3\2\2\2\u00c8"+ - "\u00c9\7\177\2\2\u00c9\13\3\2\2\2\u00ca\u00cb\7]\2\2\u00cb\r\3\2\2\2\u00cc"+ - "\u00cd\7_\2\2\u00cd\17\3\2\2\2\u00ce\u00cf\7*\2\2\u00cf\21\3\2\2\2\u00d0"+ - "\u00d1\7+\2\2\u00d1\23\3\2\2\2\u00d2\u00d3\7\60\2\2\u00d3\u00d4\3\2\2"+ - "\2\u00d4\u00d5\b\n\3\2\u00d5\25\3\2\2\2\u00d6\u00d7\7.\2\2\u00d7\27\3"+ - "\2\2\2\u00d8\u00d9\7=\2\2\u00d9\31\3\2\2\2\u00da\u00db\7k\2\2\u00db\u00dc"+ - "\7h\2\2\u00dc\33\3\2\2\2\u00dd\u00de\7g\2\2\u00de\u00df\7n\2\2\u00df\u00e0"+ - "\7u\2\2\u00e0\u00e1\7g\2\2\u00e1\35\3\2\2\2\u00e2\u00e3\7y\2\2\u00e3\u00e4"+ - "\7j\2\2\u00e4\u00e5\7k\2\2\u00e5\u00e6\7n\2\2\u00e6\u00e7\7g\2\2\u00e7"+ - "\37\3\2\2\2\u00e8\u00e9\7f\2\2\u00e9\u00ea\7q\2\2\u00ea!\3\2\2\2\u00eb"+ - "\u00ec\7h\2\2\u00ec\u00ed\7q\2\2\u00ed\u00ee\7t\2\2\u00ee#\3\2\2\2\u00ef"+ - "\u00f0\7e\2\2\u00f0\u00f1\7q\2\2\u00f1\u00f2\7p\2\2\u00f2\u00f3\7v\2\2"+ - "\u00f3\u00f4\7k\2\2\u00f4\u00f5\7p\2\2\u00f5\u00f6\7w\2\2\u00f6\u00f7"+ - "\7g\2\2\u00f7%\3\2\2\2\u00f8\u00f9\7d\2\2\u00f9\u00fa\7t\2\2\u00fa\u00fb"+ - "\7g\2\2\u00fb\u00fc\7c\2\2\u00fc\u00fd\7m\2\2\u00fd\'\3\2\2\2\u00fe\u00ff"+ - "\7t\2\2\u00ff\u0100\7g\2\2\u0100\u0101\7v\2\2\u0101\u0102\7w\2\2\u0102"+ - "\u0103\7t\2\2\u0103\u0104\7p\2\2\u0104)\3\2\2\2\u0105\u0106\7p\2\2\u0106"+ - "\u0107\7g\2\2\u0107\u0108\7y\2\2\u0108+\3\2\2\2\u0109\u010a\7v\2\2\u010a"+ - "\u010b\7t\2\2\u010b\u010c\7{\2\2\u010c-\3\2\2\2\u010d\u010e\7e\2\2\u010e"+ - "\u010f\7c\2\2\u010f\u0110\7v\2\2\u0110\u0111\7e\2\2\u0111\u0112\7j\2\2"+ - "\u0112/\3\2\2\2\u0113\u0114\7v\2\2\u0114\u0115\7j\2\2\u0115\u0116\7t\2"+ - "\2\u0116\u0117\7q\2\2\u0117\u0118\7y\2\2\u0118\61\3\2\2\2\u0119\u011a"+ - "\7v\2\2\u011a\u011b\7j\2\2\u011b\u011c\7k\2\2\u011c\u011d\7u\2\2\u011d"+ - "\63\3\2\2\2\u011e\u011f\7k\2\2\u011f\u0120\7p\2\2\u0120\u0121\7u\2\2\u0121"+ - "\u0122\7v\2\2\u0122\u0123\7c\2\2\u0123\u0124\7p\2\2\u0124\u0125\7e\2\2"+ - "\u0125\u0126\7g\2\2\u0126\u0127\7q\2\2\u0127\u0128\7h\2\2\u0128\65\3\2"+ - "\2\2\u0129\u012a\7#\2\2\u012a\67\3\2\2\2\u012b\u012c\7\u0080\2\2\u012c"+ - "9\3\2\2\2\u012d\u012e\7,\2\2\u012e;\3\2\2\2\u012f\u0130\7\61\2\2\u0130"+ - "\u0131\6\36\2\2\u0131=\3\2\2\2\u0132\u0133\7\'\2\2\u0133?\3\2\2\2\u0134"+ - "\u0135\7-\2\2\u0135A\3\2\2\2\u0136\u0137\7/\2\2\u0137C\3\2\2\2\u0138\u0139"+ - "\7>\2\2\u0139\u013a\7>\2\2\u013aE\3\2\2\2\u013b\u013c\7@\2\2\u013c\u013d"+ - "\7@\2\2\u013dG\3\2\2\2\u013e\u013f\7@\2\2\u013f\u0140\7@\2\2\u0140\u0141"+ - "\7@\2\2\u0141I\3\2\2\2\u0142\u0143\7>\2\2\u0143K\3\2\2\2\u0144\u0145\7"+ - ">\2\2\u0145\u0146\7?\2\2\u0146M\3\2\2\2\u0147\u0148\7@\2\2\u0148O\3\2"+ - "\2\2\u0149\u014a\7@\2\2\u014a\u014b\7?\2\2\u014bQ\3\2\2\2\u014c\u014d"+ - "\7?\2\2\u014d\u014e\7?\2\2\u014eS\3\2\2\2\u014f\u0150\7?\2\2\u0150\u0151"+ - "\7?\2\2\u0151\u0152\7?\2\2\u0152U\3\2\2\2\u0153\u0154\7#\2\2\u0154\u0155"+ - "\7?\2\2\u0155W\3\2\2\2\u0156\u0157\7#\2\2\u0157\u0158\7?\2\2\u0158\u0159"+ - "\7?\2\2\u0159Y\3\2\2\2\u015a\u015b\7(\2\2\u015b[\3\2\2\2\u015c\u015d\7"+ - "`\2\2\u015d]\3\2\2\2\u015e\u015f\7~\2\2\u015f_\3\2\2\2\u0160\u0161\7("+ - "\2\2\u0161\u0162\7(\2\2\u0162a\3\2\2\2\u0163\u0164\7~\2\2\u0164\u0165"+ - "\7~\2\2\u0165c\3\2\2\2\u0166\u0167\7A\2\2\u0167e\3\2\2\2\u0168\u0169\7"+ - "<\2\2\u0169g\3\2\2\2\u016a\u016b\7<\2\2\u016b\u016c\7<\2\2\u016ci\3\2"+ - "\2\2\u016d\u016e\7/\2\2\u016e\u016f\7@\2\2\u016fk\3\2\2\2\u0170\u0171"+ - "\7?\2\2\u0171\u0172\7\u0080\2\2\u0172m\3\2\2\2\u0173\u0174\7?\2\2\u0174"+ - "\u0175\7?\2\2\u0175\u0176\7\u0080\2\2\u0176o\3\2\2\2\u0177\u0178\7-\2"+ - "\2\u0178\u0179\7-\2\2\u0179q\3\2\2\2\u017a\u017b\7/\2\2\u017b\u017c\7"+ - "/\2\2\u017cs\3\2\2\2\u017d\u017e\7?\2\2\u017eu\3\2\2\2\u017f\u0180\7-"+ - "\2\2\u0180\u0181\7?\2\2\u0181w\3\2\2\2\u0182\u0183\7/\2\2\u0183\u0184"+ - "\7?\2\2\u0184y\3\2\2\2\u0185\u0186\7,\2\2\u0186\u0187\7?\2\2\u0187{\3"+ - "\2\2\2\u0188\u0189\7\61\2\2\u0189\u018a\7?\2\2\u018a}\3\2\2\2\u018b\u018c"+ - "\7\'\2\2\u018c\u018d\7?\2\2\u018d\177\3\2\2\2\u018e\u018f\7(\2\2\u018f"+ - "\u0190\7?\2\2\u0190\u0081\3\2\2\2\u0191\u0192\7`\2\2\u0192\u0193\7?\2"+ - "\2\u0193\u0083\3\2\2\2\u0194\u0195\7~\2\2\u0195\u0196\7?\2\2\u0196\u0085"+ - "\3\2\2\2\u0197\u0198\7>\2\2\u0198\u0199\7>\2\2\u0199\u019a\7?\2\2\u019a"+ - "\u0087\3\2\2\2\u019b\u019c\7@\2\2\u019c\u019d\7@\2\2\u019d\u019e\7?\2"+ - "\2\u019e\u0089\3\2\2\2\u019f\u01a0\7@\2\2\u01a0\u01a1\7@\2\2\u01a1\u01a2"+ - "\7@\2\2\u01a2\u01a3\7?\2\2\u01a3\u008b\3\2\2\2\u01a4\u01a6\7\62\2\2\u01a5"+ - "\u01a7\t\4\2\2\u01a6\u01a5\3\2\2\2\u01a7\u01a8\3\2\2\2\u01a8\u01a6\3\2"+ - "\2\2\u01a8\u01a9\3\2\2\2\u01a9\u01ab\3\2\2\2\u01aa\u01ac\t\5\2\2\u01ab"+ - "\u01aa\3\2\2\2\u01ab\u01ac\3\2\2\2\u01ac\u008d\3\2\2\2\u01ad\u01ae\7\62"+ - "\2\2\u01ae\u01b0\t\6\2\2\u01af\u01b1\t\7\2\2\u01b0\u01af\3\2\2\2\u01b1"+ - "\u01b2\3\2\2\2\u01b2\u01b0\3\2\2\2\u01b2\u01b3\3\2\2\2\u01b3\u01b5\3\2"+ - "\2\2\u01b4\u01b6\t\5\2\2\u01b5\u01b4\3\2\2\2\u01b5\u01b6\3\2\2\2\u01b6"+ - "\u008f\3\2\2\2\u01b7\u01c0\7\62\2\2\u01b8\u01bc\t\b\2\2\u01b9\u01bb\t"+ - "\t\2\2\u01ba\u01b9\3\2\2\2\u01bb\u01be\3\2\2\2\u01bc\u01ba\3\2\2\2\u01bc"+ - "\u01bd\3\2\2\2\u01bd\u01c0\3\2\2\2\u01be\u01bc\3\2\2\2\u01bf\u01b7\3\2"+ - "\2\2\u01bf\u01b8\3\2\2\2\u01c0\u01c2\3\2\2\2\u01c1\u01c3\t\n\2\2\u01c2"+ - "\u01c1\3\2\2\2\u01c2\u01c3\3\2\2\2\u01c3\u0091\3\2\2\2\u01c4\u01cd\7\62"+ - "\2\2\u01c5\u01c9\t\b\2\2\u01c6\u01c8\t\t\2\2\u01c7\u01c6\3\2\2\2\u01c8"+ - "\u01cb\3\2\2\2\u01c9\u01c7\3\2\2\2\u01c9\u01ca\3\2\2\2\u01ca\u01cd\3\2"+ - "\2\2\u01cb\u01c9\3\2\2\2\u01cc\u01c4\3\2\2\2\u01cc\u01c5\3\2\2\2\u01cd"+ - "\u01d4\3\2\2\2\u01ce\u01d0\5\24\n\2\u01cf\u01d1\t\t\2\2\u01d0\u01cf\3"+ - "\2\2\2\u01d1\u01d2\3\2\2\2\u01d2\u01d0\3\2\2\2\u01d2\u01d3\3\2\2\2\u01d3"+ - "\u01d5\3\2\2\2\u01d4\u01ce\3\2\2\2\u01d4\u01d5\3\2\2\2\u01d5\u01df\3\2"+ - "\2\2\u01d6\u01d8\t\13\2\2\u01d7\u01d9\t\f\2\2\u01d8\u01d7\3\2\2\2\u01d8"+ - "\u01d9\3\2\2\2\u01d9\u01db\3\2\2\2\u01da\u01dc\t\t\2\2\u01db\u01da\3\2"+ - "\2\2\u01dc\u01dd\3\2\2\2\u01dd\u01db\3\2\2\2\u01dd\u01de\3\2\2\2\u01de"+ - "\u01e0\3\2\2\2\u01df\u01d6\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0\u01e2\3\2"+ - "\2\2\u01e1\u01e3\t\r\2\2\u01e2\u01e1\3\2\2\2\u01e2\u01e3\3\2\2\2\u01e3"+ - "\u0093\3\2\2\2\u01e4\u01ec\7$\2\2\u01e5\u01e6\7^\2\2\u01e6\u01eb\7$\2"+ - "\2\u01e7\u01e8\7^\2\2\u01e8\u01eb\7^\2\2\u01e9\u01eb\n\16\2\2\u01ea\u01e5"+ - "\3\2\2\2\u01ea\u01e7\3\2\2\2\u01ea\u01e9\3\2\2\2\u01eb\u01ee\3\2\2\2\u01ec"+ - "\u01ed\3\2\2\2\u01ec\u01ea\3\2\2\2\u01ed\u01ef\3\2\2\2\u01ee\u01ec\3\2"+ - "\2\2\u01ef\u01fd\7$\2\2\u01f0\u01f8\7)\2\2\u01f1\u01f2\7^\2\2\u01f2\u01f7"+ - "\7)\2\2\u01f3\u01f4\7^\2\2\u01f4\u01f7\7^\2\2\u01f5\u01f7\n\16\2\2\u01f6"+ - "\u01f1\3\2\2\2\u01f6\u01f3\3\2\2\2\u01f6\u01f5\3\2\2\2\u01f7\u01fa\3\2"+ - "\2\2\u01f8\u01f9\3\2\2\2\u01f8\u01f6\3\2\2\2\u01f9\u01fb\3\2\2\2\u01fa"+ - "\u01f8\3\2\2\2\u01fb\u01fd\7)\2\2\u01fc\u01e4\3\2\2\2\u01fc\u01f0\3\2"+ - "\2\2\u01fd\u0095\3\2\2\2\u01fe\u0202\7\61\2\2\u01ff\u0203\n\17\2\2\u0200"+ - "\u0201\7^\2\2\u0201\u0203\n\20\2\2\u0202\u01ff\3\2\2\2\u0202\u0200\3\2"+ - "\2\2\u0203\u0204\3\2\2\2\u0204\u0202\3\2\2\2\u0204\u0205\3\2\2\2\u0205"+ - "\u0206\3\2\2\2\u0206\u020a\7\61\2\2\u0207\u0209\t\21\2\2\u0208\u0207\3"+ - "\2\2\2\u0209\u020c\3\2\2\2\u020a\u0208\3\2\2\2\u020a\u020b\3\2\2\2\u020b"+ - "\u020d\3\2\2\2\u020c\u020a\3\2\2\2\u020d\u020e\6K\3\2\u020e\u0097\3\2"+ - "\2\2\u020f\u0210\7v\2\2\u0210\u0211\7t\2\2\u0211\u0212\7w\2\2\u0212\u0213"+ - "\7g\2\2\u0213\u0099\3\2\2\2\u0214\u0215\7h\2\2\u0215\u0216\7c\2\2\u0216"+ - "\u0217\7n\2\2\u0217\u0218\7u\2\2\u0218\u0219\7g\2\2\u0219\u009b\3\2\2"+ - "\2\u021a\u021b\7p\2\2\u021b\u021c\7w\2\2\u021c\u021d\7n\2\2\u021d\u021e"+ - "\7n\2\2\u021e\u009d\3\2\2\2\u021f\u0225\5\u00a0P\2\u0220\u0221\5\24\n"+ - "\2\u0221\u0222\5\u00a0P\2\u0222\u0224\3\2\2\2\u0223\u0220\3\2\2\2\u0224"+ - "\u0227\3\2\2\2\u0225\u0223\3\2\2\2\u0225\u0226\3\2\2\2\u0226\u0228\3\2"+ - "\2\2\u0227\u0225\3\2\2\2\u0228\u0229\6O\4\2\u0229\u009f\3\2\2\2\u022a"+ - "\u022e\t\22\2\2\u022b\u022d\t\23\2\2\u022c\u022b\3\2\2\2\u022d\u0230\3"+ - "\2\2\2\u022e\u022c\3\2\2\2\u022e\u022f\3\2\2\2\u022f\u00a1\3\2\2\2\u0230"+ - "\u022e\3\2\2\2\u0231\u023a\7\62\2\2\u0232\u0236\t\b\2\2\u0233\u0235\t"+ - "\t\2\2\u0234\u0233\3\2\2\2\u0235\u0238\3\2\2\2\u0236\u0234\3\2\2\2\u0236"+ - "\u0237\3\2\2\2\u0237\u023a\3\2\2\2\u0238\u0236\3\2\2\2\u0239\u0231\3\2"+ - "\2\2\u0239\u0232\3\2\2\2\u023a\u023b\3\2\2\2\u023b\u023c\bQ\4\2\u023c"+ - "\u00a3\3\2\2\2\u023d\u0241\t\22\2\2\u023e\u0240\t\23\2\2\u023f\u023e\3"+ - "\2\2\2\u0240\u0243\3\2\2\2\u0241\u023f\3\2\2\2\u0241\u0242\3\2\2\2\u0242"+ - "\u0244\3\2\2\2\u0243\u0241\3\2\2\2\u0244\u0245\bR\4\2\u0245\u00a5\3\2"+ - "\2\2$\2\3\u00a9\u00b3\u00bd\u00c2\u01a8\u01ab\u01b2\u01b5\u01bc\u01bf"+ - "\u01c2\u01c9\u01cc\u01d2\u01d4\u01d8\u01dd\u01df\u01e2\u01ea\u01ec\u01f6"+ - "\u01f8\u01fc\u0202\u0204\u020a\u0225\u022e\u0236\u0239\u0241\5\b\2\2\4"+ - "\3\2\4\2\2"; + "I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\3\2\6"+ + "\2\u00aa\n\2\r\2\16\2\u00ab\3\2\3\2\3\3\3\3\3\3\3\3\7\3\u00b4\n\3\f\3"+ + "\16\3\u00b7\13\3\3\3\3\3\3\3\3\3\3\3\7\3\u00be\n\3\f\3\16\3\u00c1\13\3"+ + "\3\3\3\3\5\3\u00c5\n\3\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b"+ + "\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16"+ + "\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21"+ + "\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24"+ + "\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26"+ + "\3\26\3\26\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\31\3\31"+ + "\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33"+ + "\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37"+ + "\3\37\3 \3 \3!\3!\3\"\3\"\3#\3#\3#\3$\3$\3$\3%\3%\3%\3%\3&\3&\3\'\3\'"+ + "\3\'\3(\3(\3)\3)\3)\3*\3*\3*\3+\3+\3+\3+\3,\3,\3,\3-\3-\3-\3-\3.\3.\3"+ + "/\3/\3\60\3\60\3\61\3\61\3\61\3\62\3\62\3\62\3\63\3\63\3\64\3\64\3\65"+ + "\3\65\3\65\3\66\3\66\3\66\3\67\3\67\3\67\38\38\38\38\39\39\39\3:\3:\3"+ + ":\3;\3;\3<\3<\3<\3=\3=\3=\3>\3>\3>\3?\3?\3?\3@\3@\3@\3A\3A\3A\3B\3B\3"+ + "B\3C\3C\3C\3D\3D\3D\3D\3E\3E\3E\3E\3F\3F\3F\3F\3F\3G\3G\6G\u01ac\nG\r"+ + "G\16G\u01ad\3G\5G\u01b1\nG\3H\3H\3H\6H\u01b6\nH\rH\16H\u01b7\3H\5H\u01bb"+ + "\nH\3I\3I\3I\7I\u01c0\nI\fI\16I\u01c3\13I\5I\u01c5\nI\3I\5I\u01c8\nI\3"+ + "J\3J\3J\7J\u01cd\nJ\fJ\16J\u01d0\13J\5J\u01d2\nJ\3J\3J\6J\u01d6\nJ\rJ"+ + "\16J\u01d7\5J\u01da\nJ\3J\3J\5J\u01de\nJ\3J\6J\u01e1\nJ\rJ\16J\u01e2\5"+ + "J\u01e5\nJ\3J\5J\u01e8\nJ\3K\3K\3K\3K\3K\3K\7K\u01f0\nK\fK\16K\u01f3\13"+ + "K\3K\3K\3K\3K\3K\3K\3K\7K\u01fc\nK\fK\16K\u01ff\13K\3K\5K\u0202\nK\3L"+ + "\3L\3L\3L\6L\u0208\nL\rL\16L\u0209\3L\3L\7L\u020e\nL\fL\16L\u0211\13L"+ + "\3L\3L\3M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3P\3P\3P\3P\7P"+ + "\u0229\nP\fP\16P\u022c\13P\3P\3P\3Q\3Q\7Q\u0232\nQ\fQ\16Q\u0235\13Q\3"+ + "R\3R\3R\7R\u023a\nR\fR\16R\u023d\13R\5R\u023f\nR\3R\3R\3S\3S\7S\u0245"+ + "\nS\fS\16S\u0248\13S\3S\3S\6\u00b5\u00bf\u01f1\u01fd\2T\4\3\6\4\b\5\n"+ + "\6\f\7\16\b\20\t\22\n\24\13\26\f\30\r\32\16\34\17\36\20 \21\"\22$\23&"+ + "\24(\25*\26,\27.\30\60\31\62\32\64\33\66\348\35:\36<\37> @!B\"D#F$H%J"+ + "&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63f\64h\65j\66l\67n8p9r:t;v|?"+ + "~@\u0080A\u0082B\u0084C\u0086D\u0088E\u008aF\u008cG\u008eH\u0090I\u0092"+ + "J\u0094K\u0096L\u0098M\u009aN\u009cO\u009eP\u00a0Q\u00a2R\u00a4S\u00a6"+ + "T\4\2\3\24\5\2\13\f\17\17\"\"\4\2\f\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5"+ + "\2\62;CHch\3\2\63;\3\2\62;\b\2FFHHNNffhhnn\4\2GGgg\4\2--//\4\2HHhh\4\2"+ + "$$^^\4\2\f\f\61\61\3\2\f\f\t\2WWeekknouuwwzz\5\2C\\aac|\6\2\62;C\\aac"+ + "|\u026b\2\4\3\2\2\2\2\6\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2"+ + "\16\3\2\2\2\2\20\3\2\2\2\2\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3"+ + "\2\2\2\2\32\3\2\2\2\2\34\3\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2"+ + "$\3\2\2\2\2&\3\2\2\2\2(\3\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60"+ + "\3\2\2\2\2\62\3\2\2\2\2\64\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2"+ + "\2<\3\2\2\2\2>\3\2\2\2\2@\3\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H"+ + "\3\2\2\2\2J\3\2\2\2\2L\3\2\2\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2"+ + "\2\2\2V\3\2\2\2\2X\3\2\2\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2"+ + "\2\2b\3\2\2\2\2d\3\2\2\2\2f\3\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2"+ + "n\3\2\2\2\2p\3\2\2\2\2r\3\2\2\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3"+ + "\2\2\2\2|\3\2\2\2\2~\3\2\2\2\2\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3"+ + "\2\2\2\2\u0086\3\2\2\2\2\u0088\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2"+ + "\2\u008e\3\2\2\2\2\u0090\3\2\2\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0096"+ + "\3\2\2\2\2\u0098\3\2\2\2\2\u009a\3\2\2\2\2\u009c\3\2\2\2\2\u009e\3\2\2"+ + "\2\2\u00a0\3\2\2\2\2\u00a2\3\2\2\2\3\u00a4\3\2\2\2\3\u00a6\3\2\2\2\4\u00a9"+ + "\3\2\2\2\6\u00c4\3\2\2\2\b\u00c8\3\2\2\2\n\u00ca\3\2\2\2\f\u00cc\3\2\2"+ + "\2\16\u00ce\3\2\2\2\20\u00d0\3\2\2\2\22\u00d2\3\2\2\2\24\u00d4\3\2\2\2"+ + "\26\u00d8\3\2\2\2\30\u00da\3\2\2\2\32\u00dc\3\2\2\2\34\u00df\3\2\2\2\36"+ + "\u00e2\3\2\2\2 \u00e7\3\2\2\2\"\u00ed\3\2\2\2$\u00f0\3\2\2\2&\u00f4\3"+ + "\2\2\2(\u00fd\3\2\2\2*\u0103\3\2\2\2,\u010a\3\2\2\2.\u010e\3\2\2\2\60"+ + "\u0112\3\2\2\2\62\u0118\3\2\2\2\64\u011e\3\2\2\2\66\u0123\3\2\2\28\u012e"+ + "\3\2\2\2:\u0130\3\2\2\2<\u0132\3\2\2\2>\u0134\3\2\2\2@\u0137\3\2\2\2B"+ + "\u0139\3\2\2\2D\u013b\3\2\2\2F\u013d\3\2\2\2H\u0140\3\2\2\2J\u0143\3\2"+ + "\2\2L\u0147\3\2\2\2N\u0149\3\2\2\2P\u014c\3\2\2\2R\u014e\3\2\2\2T\u0151"+ + "\3\2\2\2V\u0154\3\2\2\2X\u0158\3\2\2\2Z\u015b\3\2\2\2\\\u015f\3\2\2\2"+ + "^\u0161\3\2\2\2`\u0163\3\2\2\2b\u0165\3\2\2\2d\u0168\3\2\2\2f\u016b\3"+ + "\2\2\2h\u016d\3\2\2\2j\u016f\3\2\2\2l\u0172\3\2\2\2n\u0175\3\2\2\2p\u0178"+ + "\3\2\2\2r\u017c\3\2\2\2t\u017f\3\2\2\2v\u0182\3\2\2\2x\u0184\3\2\2\2z"+ + "\u0187\3\2\2\2|\u018a\3\2\2\2~\u018d\3\2\2\2\u0080\u0190\3\2\2\2\u0082"+ + "\u0193\3\2\2\2\u0084\u0196\3\2\2\2\u0086\u0199\3\2\2\2\u0088\u019c\3\2"+ + "\2\2\u008a\u01a0\3\2\2\2\u008c\u01a4\3\2\2\2\u008e\u01a9\3\2\2\2\u0090"+ + "\u01b2\3\2\2\2\u0092\u01c4\3\2\2\2\u0094\u01d1\3\2\2\2\u0096\u0201\3\2"+ + "\2\2\u0098\u0203\3\2\2\2\u009a\u0214\3\2\2\2\u009c\u0219\3\2\2\2\u009e"+ + "\u021f\3\2\2\2\u00a0\u0224\3\2\2\2\u00a2\u022f\3\2\2\2\u00a4\u023e\3\2"+ + "\2\2\u00a6\u0242\3\2\2\2\u00a8\u00aa\t\2\2\2\u00a9\u00a8\3\2\2\2\u00aa"+ + "\u00ab\3\2\2\2\u00ab\u00a9\3\2\2\2\u00ab\u00ac\3\2\2\2\u00ac\u00ad\3\2"+ + "\2\2\u00ad\u00ae\b\2\2\2\u00ae\5\3\2\2\2\u00af\u00b0\7\61\2\2\u00b0\u00b1"+ + "\7\61\2\2\u00b1\u00b5\3\2\2\2\u00b2\u00b4\13\2\2\2\u00b3\u00b2\3\2\2\2"+ + "\u00b4\u00b7\3\2\2\2\u00b5\u00b6\3\2\2\2\u00b5\u00b3\3\2\2\2\u00b6\u00b8"+ + "\3\2\2\2\u00b7\u00b5\3\2\2\2\u00b8\u00c5\t\3\2\2\u00b9\u00ba\7\61\2\2"+ + "\u00ba\u00bb\7,\2\2\u00bb\u00bf\3\2\2\2\u00bc\u00be\13\2\2\2\u00bd\u00bc"+ + "\3\2\2\2\u00be\u00c1\3\2\2\2\u00bf\u00c0\3\2\2\2\u00bf\u00bd\3\2\2\2\u00c0"+ + "\u00c2\3\2\2\2\u00c1\u00bf\3\2\2\2\u00c2\u00c3\7,\2\2\u00c3\u00c5\7\61"+ + "\2\2\u00c4\u00af\3\2\2\2\u00c4\u00b9\3\2\2\2\u00c5\u00c6\3\2\2\2\u00c6"+ + "\u00c7\b\3\2\2\u00c7\7\3\2\2\2\u00c8\u00c9\7}\2\2\u00c9\t\3\2\2\2\u00ca"+ + "\u00cb\7\177\2\2\u00cb\13\3\2\2\2\u00cc\u00cd\7]\2\2\u00cd\r\3\2\2\2\u00ce"+ + "\u00cf\7_\2\2\u00cf\17\3\2\2\2\u00d0\u00d1\7*\2\2\u00d1\21\3\2\2\2\u00d2"+ + "\u00d3\7+\2\2\u00d3\23\3\2\2\2\u00d4\u00d5\7\60\2\2\u00d5\u00d6\3\2\2"+ + "\2\u00d6\u00d7\b\n\3\2\u00d7\25\3\2\2\2\u00d8\u00d9\7.\2\2\u00d9\27\3"+ + "\2\2\2\u00da\u00db\7=\2\2\u00db\31\3\2\2\2\u00dc\u00dd\7k\2\2\u00dd\u00de"+ + "\7h\2\2\u00de\33\3\2\2\2\u00df\u00e0\7k\2\2\u00e0\u00e1\7p\2\2\u00e1\35"+ + "\3\2\2\2\u00e2\u00e3\7g\2\2\u00e3\u00e4\7n\2\2\u00e4\u00e5\7u\2\2\u00e5"+ + "\u00e6\7g\2\2\u00e6\37\3\2\2\2\u00e7\u00e8\7y\2\2\u00e8\u00e9\7j\2\2\u00e9"+ + "\u00ea\7k\2\2\u00ea\u00eb\7n\2\2\u00eb\u00ec\7g\2\2\u00ec!\3\2\2\2\u00ed"+ + "\u00ee\7f\2\2\u00ee\u00ef\7q\2\2\u00ef#\3\2\2\2\u00f0\u00f1\7h\2\2\u00f1"+ + "\u00f2\7q\2\2\u00f2\u00f3\7t\2\2\u00f3%\3\2\2\2\u00f4\u00f5\7e\2\2\u00f5"+ + "\u00f6\7q\2\2\u00f6\u00f7\7p\2\2\u00f7\u00f8\7v\2\2\u00f8\u00f9\7k\2\2"+ + "\u00f9\u00fa\7p\2\2\u00fa\u00fb\7w\2\2\u00fb\u00fc\7g\2\2\u00fc\'\3\2"+ + "\2\2\u00fd\u00fe\7d\2\2\u00fe\u00ff\7t\2\2\u00ff\u0100\7g\2\2\u0100\u0101"+ + "\7c\2\2\u0101\u0102\7m\2\2\u0102)\3\2\2\2\u0103\u0104\7t\2\2\u0104\u0105"+ + "\7g\2\2\u0105\u0106\7v\2\2\u0106\u0107\7w\2\2\u0107\u0108\7t\2\2\u0108"+ + "\u0109\7p\2\2\u0109+\3\2\2\2\u010a\u010b\7p\2\2\u010b\u010c\7g\2\2\u010c"+ + "\u010d\7y\2\2\u010d-\3\2\2\2\u010e\u010f\7v\2\2\u010f\u0110\7t\2\2\u0110"+ + "\u0111\7{\2\2\u0111/\3\2\2\2\u0112\u0113\7e\2\2\u0113\u0114\7c\2\2\u0114"+ + "\u0115\7v\2\2\u0115\u0116\7e\2\2\u0116\u0117\7j\2\2\u0117\61\3\2\2\2\u0118"+ + "\u0119\7v\2\2\u0119\u011a\7j\2\2\u011a\u011b\7t\2\2\u011b\u011c\7q\2\2"+ + "\u011c\u011d\7y\2\2\u011d\63\3\2\2\2\u011e\u011f\7v\2\2\u011f\u0120\7"+ + "j\2\2\u0120\u0121\7k\2\2\u0121\u0122\7u\2\2\u0122\65\3\2\2\2\u0123\u0124"+ + "\7k\2\2\u0124\u0125\7p\2\2\u0125\u0126\7u\2\2\u0126\u0127\7v\2\2\u0127"+ + "\u0128\7c\2\2\u0128\u0129\7p\2\2\u0129\u012a\7e\2\2\u012a\u012b\7g\2\2"+ + "\u012b\u012c\7q\2\2\u012c\u012d\7h\2\2\u012d\67\3\2\2\2\u012e\u012f\7"+ + "#\2\2\u012f9\3\2\2\2\u0130\u0131\7\u0080\2\2\u0131;\3\2\2\2\u0132\u0133"+ + "\7,\2\2\u0133=\3\2\2\2\u0134\u0135\7\61\2\2\u0135\u0136\6\37\2\2\u0136"+ + "?\3\2\2\2\u0137\u0138\7\'\2\2\u0138A\3\2\2\2\u0139\u013a\7-\2\2\u013a"+ + "C\3\2\2\2\u013b\u013c\7/\2\2\u013cE\3\2\2\2\u013d\u013e\7>\2\2\u013e\u013f"+ + "\7>\2\2\u013fG\3\2\2\2\u0140\u0141\7@\2\2\u0141\u0142\7@\2\2\u0142I\3"+ + "\2\2\2\u0143\u0144\7@\2\2\u0144\u0145\7@\2\2\u0145\u0146\7@\2\2\u0146"+ + "K\3\2\2\2\u0147\u0148\7>\2\2\u0148M\3\2\2\2\u0149\u014a\7>\2\2\u014a\u014b"+ + "\7?\2\2\u014bO\3\2\2\2\u014c\u014d\7@\2\2\u014dQ\3\2\2\2\u014e\u014f\7"+ + "@\2\2\u014f\u0150\7?\2\2\u0150S\3\2\2\2\u0151\u0152\7?\2\2\u0152\u0153"+ + "\7?\2\2\u0153U\3\2\2\2\u0154\u0155\7?\2\2\u0155\u0156\7?\2\2\u0156\u0157"+ + "\7?\2\2\u0157W\3\2\2\2\u0158\u0159\7#\2\2\u0159\u015a\7?\2\2\u015aY\3"+ + "\2\2\2\u015b\u015c\7#\2\2\u015c\u015d\7?\2\2\u015d\u015e\7?\2\2\u015e"+ + "[\3\2\2\2\u015f\u0160\7(\2\2\u0160]\3\2\2\2\u0161\u0162\7`\2\2\u0162_"+ + "\3\2\2\2\u0163\u0164\7~\2\2\u0164a\3\2\2\2\u0165\u0166\7(\2\2\u0166\u0167"+ + "\7(\2\2\u0167c\3\2\2\2\u0168\u0169\7~\2\2\u0169\u016a\7~\2\2\u016ae\3"+ + "\2\2\2\u016b\u016c\7A\2\2\u016cg\3\2\2\2\u016d\u016e\7<\2\2\u016ei\3\2"+ + "\2\2\u016f\u0170\7<\2\2\u0170\u0171\7<\2\2\u0171k\3\2\2\2\u0172\u0173"+ + "\7/\2\2\u0173\u0174\7@\2\2\u0174m\3\2\2\2\u0175\u0176\7?\2\2\u0176\u0177"+ + "\7\u0080\2\2\u0177o\3\2\2\2\u0178\u0179\7?\2\2\u0179\u017a\7?\2\2\u017a"+ + "\u017b\7\u0080\2\2\u017bq\3\2\2\2\u017c\u017d\7-\2\2\u017d\u017e\7-\2"+ + "\2\u017es\3\2\2\2\u017f\u0180\7/\2\2\u0180\u0181\7/\2\2\u0181u\3\2\2\2"+ + "\u0182\u0183\7?\2\2\u0183w\3\2\2\2\u0184\u0185\7-\2\2\u0185\u0186\7?\2"+ + "\2\u0186y\3\2\2\2\u0187\u0188\7/\2\2\u0188\u0189\7?\2\2\u0189{\3\2\2\2"+ + "\u018a\u018b\7,\2\2\u018b\u018c\7?\2\2\u018c}\3\2\2\2\u018d\u018e\7\61"+ + "\2\2\u018e\u018f\7?\2\2\u018f\177\3\2\2\2\u0190\u0191\7\'\2\2\u0191\u0192"+ + "\7?\2\2\u0192\u0081\3\2\2\2\u0193\u0194\7(\2\2\u0194\u0195\7?\2\2\u0195"+ + "\u0083\3\2\2\2\u0196\u0197\7`\2\2\u0197\u0198\7?\2\2\u0198\u0085\3\2\2"+ + "\2\u0199\u019a\7~\2\2\u019a\u019b\7?\2\2\u019b\u0087\3\2\2\2\u019c\u019d"+ + "\7>\2\2\u019d\u019e\7>\2\2\u019e\u019f\7?\2\2\u019f\u0089\3\2\2\2\u01a0"+ + "\u01a1\7@\2\2\u01a1\u01a2\7@\2\2\u01a2\u01a3\7?\2\2\u01a3\u008b\3\2\2"+ + "\2\u01a4\u01a5\7@\2\2\u01a5\u01a6\7@\2\2\u01a6\u01a7\7@\2\2\u01a7\u01a8"+ + "\7?\2\2\u01a8\u008d\3\2\2\2\u01a9\u01ab\7\62\2\2\u01aa\u01ac\t\4\2\2\u01ab"+ + "\u01aa\3\2\2\2\u01ac\u01ad\3\2\2\2\u01ad\u01ab\3\2\2\2\u01ad\u01ae\3\2"+ + "\2\2\u01ae\u01b0\3\2\2\2\u01af\u01b1\t\5\2\2\u01b0\u01af\3\2\2\2\u01b0"+ + "\u01b1\3\2\2\2\u01b1\u008f\3\2\2\2\u01b2\u01b3\7\62\2\2\u01b3\u01b5\t"+ + "\6\2\2\u01b4\u01b6\t\7\2\2\u01b5\u01b4\3\2\2\2\u01b6\u01b7\3\2\2\2\u01b7"+ + "\u01b5\3\2\2\2\u01b7\u01b8\3\2\2\2\u01b8\u01ba\3\2\2\2\u01b9\u01bb\t\5"+ + "\2\2\u01ba\u01b9\3\2\2\2\u01ba\u01bb\3\2\2\2\u01bb\u0091\3\2\2\2\u01bc"+ + "\u01c5\7\62\2\2\u01bd\u01c1\t\b\2\2\u01be\u01c0\t\t\2\2\u01bf\u01be\3"+ + "\2\2\2\u01c0\u01c3\3\2\2\2\u01c1\u01bf\3\2\2\2\u01c1\u01c2\3\2\2\2\u01c2"+ + "\u01c5\3\2\2\2\u01c3\u01c1\3\2\2\2\u01c4\u01bc\3\2\2\2\u01c4\u01bd\3\2"+ + "\2\2\u01c5\u01c7\3\2\2\2\u01c6\u01c8\t\n\2\2\u01c7\u01c6\3\2\2\2\u01c7"+ + "\u01c8\3\2\2\2\u01c8\u0093\3\2\2\2\u01c9\u01d2\7\62\2\2\u01ca\u01ce\t"+ + "\b\2\2\u01cb\u01cd\t\t\2\2\u01cc\u01cb\3\2\2\2\u01cd\u01d0\3\2\2\2\u01ce"+ + "\u01cc\3\2\2\2\u01ce\u01cf\3\2\2\2\u01cf\u01d2\3\2\2\2\u01d0\u01ce\3\2"+ + "\2\2\u01d1\u01c9\3\2\2\2\u01d1\u01ca\3\2\2\2\u01d2\u01d9\3\2\2\2\u01d3"+ + "\u01d5\5\24\n\2\u01d4\u01d6\t\t\2\2\u01d5\u01d4\3\2\2\2\u01d6\u01d7\3"+ + "\2\2\2\u01d7\u01d5\3\2\2\2\u01d7\u01d8\3\2\2\2\u01d8\u01da\3\2\2\2\u01d9"+ + "\u01d3\3\2\2\2\u01d9\u01da\3\2\2\2\u01da\u01e4\3\2\2\2\u01db\u01dd\t\13"+ + "\2\2\u01dc\u01de\t\f\2\2\u01dd\u01dc\3\2\2\2\u01dd\u01de\3\2\2\2\u01de"+ + "\u01e0\3\2\2\2\u01df\u01e1\t\t\2\2\u01e0\u01df\3\2\2\2\u01e1\u01e2\3\2"+ + "\2\2\u01e2\u01e0\3\2\2\2\u01e2\u01e3\3\2\2\2\u01e3\u01e5\3\2\2\2\u01e4"+ + "\u01db\3\2\2\2\u01e4\u01e5\3\2\2\2\u01e5\u01e7\3\2\2\2\u01e6\u01e8\t\r"+ + "\2\2\u01e7\u01e6\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\u0095\3\2\2\2\u01e9"+ + "\u01f1\7$\2\2\u01ea\u01eb\7^\2\2\u01eb\u01f0\7$\2\2\u01ec\u01ed\7^\2\2"+ + "\u01ed\u01f0\7^\2\2\u01ee\u01f0\n\16\2\2\u01ef\u01ea\3\2\2\2\u01ef\u01ec"+ + "\3\2\2\2\u01ef\u01ee\3\2\2\2\u01f0\u01f3\3\2\2\2\u01f1\u01f2\3\2\2\2\u01f1"+ + "\u01ef\3\2\2\2\u01f2\u01f4\3\2\2\2\u01f3\u01f1\3\2\2\2\u01f4\u0202\7$"+ + "\2\2\u01f5\u01fd\7)\2\2\u01f6\u01f7\7^\2\2\u01f7\u01fc\7)\2\2\u01f8\u01f9"+ + "\7^\2\2\u01f9\u01fc\7^\2\2\u01fa\u01fc\n\16\2\2\u01fb\u01f6\3\2\2\2\u01fb"+ + "\u01f8\3\2\2\2\u01fb\u01fa\3\2\2\2\u01fc\u01ff\3\2\2\2\u01fd\u01fe\3\2"+ + "\2\2\u01fd\u01fb\3\2\2\2\u01fe\u0200\3\2\2\2\u01ff\u01fd\3\2\2\2\u0200"+ + "\u0202\7)\2\2\u0201\u01e9\3\2\2\2\u0201\u01f5\3\2\2\2\u0202\u0097\3\2"+ + "\2\2\u0203\u0207\7\61\2\2\u0204\u0208\n\17\2\2\u0205\u0206\7^\2\2\u0206"+ + "\u0208\n\20\2\2\u0207\u0204\3\2\2\2\u0207\u0205\3\2\2\2\u0208\u0209\3"+ + "\2\2\2\u0209\u0207\3\2\2\2\u0209\u020a\3\2\2\2\u020a\u020b\3\2\2\2\u020b"+ + "\u020f\7\61\2\2\u020c\u020e\t\21\2\2\u020d\u020c\3\2\2\2\u020e\u0211\3"+ + "\2\2\2\u020f\u020d\3\2\2\2\u020f\u0210\3\2\2\2\u0210\u0212\3\2\2\2\u0211"+ + "\u020f\3\2\2\2\u0212\u0213\6L\3\2\u0213\u0099\3\2\2\2\u0214\u0215\7v\2"+ + "\2\u0215\u0216\7t\2\2\u0216\u0217\7w\2\2\u0217\u0218\7g\2\2\u0218\u009b"+ + "\3\2\2\2\u0219\u021a\7h\2\2\u021a\u021b\7c\2\2\u021b\u021c\7n\2\2\u021c"+ + "\u021d\7u\2\2\u021d\u021e\7g\2\2\u021e\u009d\3\2\2\2\u021f\u0220\7p\2"+ + "\2\u0220\u0221\7w\2\2\u0221\u0222\7n\2\2\u0222\u0223\7n\2\2\u0223\u009f"+ + "\3\2\2\2\u0224\u022a\5\u00a2Q\2\u0225\u0226\5\24\n\2\u0226\u0227\5\u00a2"+ + "Q\2\u0227\u0229\3\2\2\2\u0228\u0225\3\2\2\2\u0229\u022c\3\2\2\2\u022a"+ + "\u0228\3\2\2\2\u022a\u022b\3\2\2\2\u022b\u022d\3\2\2\2\u022c\u022a\3\2"+ + "\2\2\u022d\u022e\6P\4\2\u022e\u00a1\3\2\2\2\u022f\u0233\t\22\2\2\u0230"+ + "\u0232\t\23\2\2\u0231\u0230\3\2\2\2\u0232\u0235\3\2\2\2\u0233\u0231\3"+ + "\2\2\2\u0233\u0234\3\2\2\2\u0234\u00a3\3\2\2\2\u0235\u0233\3\2\2\2\u0236"+ + "\u023f\7\62\2\2\u0237\u023b\t\b\2\2\u0238\u023a\t\t\2\2\u0239\u0238\3"+ + "\2\2\2\u023a\u023d\3\2\2\2\u023b\u0239\3\2\2\2\u023b\u023c\3\2\2\2\u023c"+ + "\u023f\3\2\2\2\u023d\u023b\3\2\2\2\u023e\u0236\3\2\2\2\u023e\u0237\3\2"+ + "\2\2\u023f\u0240\3\2\2\2\u0240\u0241\bR\4\2\u0241\u00a5\3\2\2\2\u0242"+ + "\u0246\t\22\2\2\u0243\u0245\t\23\2\2\u0244\u0243\3\2\2\2\u0245\u0248\3"+ + "\2\2\2\u0246\u0244\3\2\2\2\u0246\u0247\3\2\2\2\u0247\u0249\3\2\2\2\u0248"+ + "\u0246\3\2\2\2\u0249\u024a\bS\4\2\u024a\u00a7\3\2\2\2$\2\3\u00ab\u00b5"+ + "\u00bf\u00c4\u01ad\u01b0\u01b7\u01ba\u01c1\u01c4\u01c7\u01ce\u01d1\u01d7"+ + "\u01d9\u01dd\u01e2\u01e4\u01e7\u01ef\u01f1\u01fb\u01fd\u0201\u0207\u0209"+ + "\u020f\u022a\u0233\u023b\u023e\u0246\5\b\2\2\4\3\2\4\2\2"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java index c1b698a9530..933a5f35dcd 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java @@ -18,15 +18,16 @@ class PainlessParser extends Parser { new PredictionContextCache(); public static final int WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, - COMMA=10, SEMICOLON=11, IF=12, ELSE=13, WHILE=14, DO=15, FOR=16, CONTINUE=17, - BREAK=18, RETURN=19, NEW=20, TRY=21, CATCH=22, THROW=23, THIS=24, INSTANCEOF=25, - BOOLNOT=26, BWNOT=27, MUL=28, DIV=29, REM=30, ADD=31, SUB=32, LSH=33, - RSH=34, USH=35, LT=36, LTE=37, GT=38, GTE=39, EQ=40, EQR=41, NE=42, NER=43, - BWAND=44, XOR=45, BWOR=46, BOOLAND=47, BOOLOR=48, COND=49, COLON=50, REF=51, - ARROW=52, FIND=53, MATCH=54, INCR=55, DECR=56, ASSIGN=57, AADD=58, ASUB=59, - AMUL=60, ADIV=61, AREM=62, AAND=63, AXOR=64, AOR=65, ALSH=66, ARSH=67, - AUSH=68, OCTAL=69, HEX=70, INTEGER=71, DECIMAL=72, STRING=73, REGEX=74, - TRUE=75, FALSE=76, NULL=77, TYPE=78, ID=79, DOTINTEGER=80, DOTID=81; + COMMA=10, SEMICOLON=11, IF=12, IN=13, ELSE=14, WHILE=15, DO=16, FOR=17, + CONTINUE=18, BREAK=19, RETURN=20, NEW=21, TRY=22, CATCH=23, THROW=24, + THIS=25, INSTANCEOF=26, BOOLNOT=27, BWNOT=28, MUL=29, DIV=30, REM=31, + ADD=32, SUB=33, LSH=34, RSH=35, USH=36, LT=37, LTE=38, GT=39, GTE=40, + EQ=41, EQR=42, NE=43, NER=44, BWAND=45, XOR=46, BWOR=47, BOOLAND=48, BOOLOR=49, + COND=50, COLON=51, REF=52, ARROW=53, FIND=54, MATCH=55, INCR=56, DECR=57, + ASSIGN=58, AADD=59, ASUB=60, AMUL=61, ADIV=62, AREM=63, AAND=64, AXOR=65, + AOR=66, ALSH=67, ARSH=68, AUSH=69, OCTAL=70, HEX=71, INTEGER=72, DECIMAL=73, + STRING=74, REGEX=75, TRUE=76, FALSE=77, NULL=78, TYPE=79, ID=80, DOTINTEGER=81, + DOTID=82; public static final int RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, RULE_trailer = 4, RULE_block = 5, RULE_empty = 6, RULE_initializer = 7, @@ -48,25 +49,25 @@ class PainlessParser extends Parser { private static final String[] _LITERAL_NAMES = { null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "','", - "';'", "'if'", "'else'", "'while'", "'do'", "'for'", "'continue'", "'break'", - "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", "'instanceof'", - "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", "'>>'", "'>>>'", - "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", "'!=='", "'&'", - "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'::'", "'->'", "'=~'", "'==~'", - "'++'", "'--'", "'='", "'+='", "'-='", "'*='", "'/='", "'%='", "'&='", - "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, null, null, null, - null, "'true'", "'false'", "'null'" + "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'::'", "'->'", + "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", "'/='", + "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, + null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", - "DOT", "COMMA", "SEMICOLON", "IF", "ELSE", "WHILE", "DO", "FOR", "CONTINUE", - "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", "INSTANCEOF", - "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", "RSH", "USH", - "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", "XOR", "BWOR", - "BOOLAND", "BOOLOR", "COND", "COLON", "REF", "ARROW", "FIND", "MATCH", - "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", "AREM", "AAND", - "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", "INTEGER", "DECIMAL", - "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", + "DOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", + "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", + "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", + "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", + "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "REF", "ARROW", "FIND", + "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", "AREM", + "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", "INTEGER", + "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -364,33 +365,6 @@ class PainlessParser extends Parser { else return visitor.visitChildren(this); } } - public static class ThrowContext extends StatementContext { - public TerminalNode THROW() { return getToken(PainlessParser.THROW, 0); } - public ExpressionContext expression() { - return getRuleContext(ExpressionContext.class,0); - } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); - } - public ThrowContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitThrow(this); - else return visitor.visitChildren(this); - } - } - public static class ContinueContext extends StatementContext { - public TerminalNode CONTINUE() { return getToken(PainlessParser.CONTINUE, 0); } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); - } - public ContinueContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitContinue(this); - else return visitor.visitChildren(this); - } - } public static class ForContext extends StatementContext { public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } @@ -421,38 +395,6 @@ class PainlessParser extends Parser { else return visitor.visitChildren(this); } } - public static class TryContext extends StatementContext { - public TerminalNode TRY() { return getToken(PainlessParser.TRY, 0); } - public BlockContext block() { - return getRuleContext(BlockContext.class,0); - } - public List trap() { - return getRuleContexts(TrapContext.class); - } - public TrapContext trap(int i) { - return getRuleContext(TrapContext.class,i); - } - public TryContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTry(this); - else return visitor.visitChildren(this); - } - } - public static class ExprContext extends StatementContext { - public ExpressionContext expression() { - return getRuleContext(ExpressionContext.class,0); - } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); - } - public ExprContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitExpr(this); - else return visitor.visitChildren(this); - } - } public static class DoContext extends StatementContext { public TerminalNode DO() { return getToken(PainlessParser.DO, 0); } public BlockContext block() { @@ -494,24 +436,22 @@ class PainlessParser extends Parser { else return visitor.visitChildren(this); } } - public static class IfContext extends StatementContext { - public TerminalNode IF() { return getToken(PainlessParser.IF, 0); } + public static class IneachContext extends StatementContext { + public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode IN() { return getToken(PainlessParser.IN, 0); } public ExpressionContext expression() { return getRuleContext(ExpressionContext.class,0); } public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } - public List trailer() { - return getRuleContexts(TrailerContext.class); + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); } - public TrailerContext trailer(int i) { - return getRuleContext(TrailerContext.class,i); - } - public TerminalNode ELSE() { return getToken(PainlessParser.ELSE, 0); } - public IfContext(StatementContext ctx) { copyFrom(ctx); } + public IneachContext(StatementContext ctx) { copyFrom(ctx); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitIf(this); + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitIneach(this); else return visitor.visitChildren(this); } } @@ -537,6 +477,86 @@ class PainlessParser extends Parser { else return visitor.visitChildren(this); } } + public static class ThrowContext extends StatementContext { + public TerminalNode THROW() { return getToken(PainlessParser.THROW, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public DelimiterContext delimiter() { + return getRuleContext(DelimiterContext.class,0); + } + public ThrowContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitThrow(this); + else return visitor.visitChildren(this); + } + } + public static class ContinueContext extends StatementContext { + public TerminalNode CONTINUE() { return getToken(PainlessParser.CONTINUE, 0); } + public DelimiterContext delimiter() { + return getRuleContext(DelimiterContext.class,0); + } + public ContinueContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitContinue(this); + else return visitor.visitChildren(this); + } + } + public static class TryContext extends StatementContext { + public TerminalNode TRY() { return getToken(PainlessParser.TRY, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public List trap() { + return getRuleContexts(TrapContext.class); + } + public TrapContext trap(int i) { + return getRuleContext(TrapContext.class,i); + } + public TryContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTry(this); + else return visitor.visitChildren(this); + } + } + public static class ExprContext extends StatementContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public DelimiterContext delimiter() { + return getRuleContext(DelimiterContext.class,0); + } + public ExprContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitExpr(this); + else return visitor.visitChildren(this); + } + } + public static class IfContext extends StatementContext { + public TerminalNode IF() { return getToken(PainlessParser.IF, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public List trailer() { + return getRuleContexts(TrailerContext.class); + } + public TrailerContext trailer(int i) { + return getRuleContext(TrailerContext.class,i); + } + public TerminalNode ELSE() { return getToken(PainlessParser.ELSE, 0); } + public IfContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitIf(this); + else return visitor.visitChildren(this); + } + } public static class ReturnContext extends StatementContext { public TerminalNode RETURN() { return getToken(PainlessParser.RETURN, 0); } public ExpressionContext expression() { @@ -558,7 +578,7 @@ class PainlessParser extends Parser { enterRule(_localctx, 6, RULE_statement); try { int _alt; - setState(181); + setState(189); switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { case 1: _localctx = new IfContext(_localctx); @@ -723,56 +743,76 @@ class PainlessParser extends Parser { } break; case 6: - _localctx = new DeclContext(_localctx); + _localctx = new IneachContext(_localctx); enterOuterAlt(_localctx, 6); { setState(156); - declaration(); + match(FOR); setState(157); - delimiter(); + match(LP); + setState(158); + match(ID); + setState(159); + match(IN); + setState(160); + expression(0); + setState(161); + match(RP); + setState(162); + trailer(); } break; case 7: - _localctx = new ContinueContext(_localctx); + _localctx = new DeclContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(159); - match(CONTINUE); - setState(160); - delimiter(); - } - break; - case 8: - _localctx = new BreakContext(_localctx); - enterOuterAlt(_localctx, 8); - { - setState(161); - match(BREAK); - setState(162); - delimiter(); - } - break; - case 9: - _localctx = new ReturnContext(_localctx); - enterOuterAlt(_localctx, 9); - { - setState(163); - match(RETURN); setState(164); - expression(0); + declaration(); setState(165); delimiter(); } break; - case 10: - _localctx = new TryContext(_localctx); - enterOuterAlt(_localctx, 10); + case 8: + _localctx = new ContinueContext(_localctx); + enterOuterAlt(_localctx, 8); { setState(167); - match(TRY); + match(CONTINUE); setState(168); + delimiter(); + } + break; + case 9: + _localctx = new BreakContext(_localctx); + enterOuterAlt(_localctx, 9); + { + setState(169); + match(BREAK); + setState(170); + delimiter(); + } + break; + case 10: + _localctx = new ReturnContext(_localctx); + enterOuterAlt(_localctx, 10); + { + setState(171); + match(RETURN); + setState(172); + expression(0); + setState(173); + delimiter(); + } + break; + case 11: + _localctx = new TryContext(_localctx); + enterOuterAlt(_localctx, 11); + { + setState(175); + match(TRY); + setState(176); block(); - setState(170); + setState(178); _errHandler.sync(this); _alt = 1; do { @@ -780,7 +820,7 @@ class PainlessParser extends Parser { case 1: { { - setState(169); + setState(177); trap(); } } @@ -788,31 +828,31 @@ class PainlessParser extends Parser { default: throw new NoViableAltException(this); } - setState(172); + setState(180); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,10,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; - case 11: + case 12: _localctx = new ThrowContext(_localctx); - enterOuterAlt(_localctx, 11); + enterOuterAlt(_localctx, 12); { - setState(174); + setState(182); match(THROW); - setState(175); + setState(183); expression(0); - setState(176); + setState(184); delimiter(); } break; - case 12: + case 13: _localctx = new ExprContext(_localctx); - enterOuterAlt(_localctx, 12); + enterOuterAlt(_localctx, 13); { - setState(178); + setState(186); expression(0); - setState(179); + setState(187); delimiter(); } break; @@ -851,19 +891,19 @@ class PainlessParser extends Parser { TrailerContext _localctx = new TrailerContext(_ctx, getState()); enterRule(_localctx, 8, RULE_trailer); try { - setState(185); + setState(193); switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(183); + setState(191); block(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(184); + setState(192); statement(); } break; @@ -907,25 +947,25 @@ class PainlessParser extends Parser { int _alt; enterOuterAlt(_localctx, 1); { - setState(187); + setState(195); match(LBRACK); - setState(191); + setState(199); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,13,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(188); + setState(196); statement(); } } } - setState(193); + setState(201); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,13,_ctx); } - setState(194); + setState(202); match(RBRACK); } } @@ -959,7 +999,7 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(196); + setState(204); match(SEMICOLON); } } @@ -996,19 +1036,19 @@ class PainlessParser extends Parser { InitializerContext _localctx = new InitializerContext(_ctx, getState()); enterRule(_localctx, 14, RULE_initializer); try { - setState(200); + setState(208); switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(198); + setState(206); declaration(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(199); + setState(207); expression(0); } break; @@ -1046,7 +1086,7 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(202); + setState(210); expression(0); } } @@ -1093,23 +1133,23 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(204); + setState(212); decltype(); - setState(205); + setState(213); declvar(); - setState(210); + setState(218); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(206); + setState(214); match(COMMA); - setState(207); + setState(215); declvar(); } } - setState(212); + setState(220); _errHandler.sync(this); _la = _input.LA(1); } @@ -1154,23 +1194,23 @@ class PainlessParser extends Parser { int _alt; enterOuterAlt(_localctx, 1); { - setState(213); + setState(221); match(TYPE); - setState(218); + setState(226); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,16,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(214); + setState(222); match(LBRACE); - setState(215); + setState(223); match(RBRACE); } } } - setState(220); + setState(228); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,16,_ctx); } @@ -1211,15 +1251,15 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(221); + setState(229); match(ID); - setState(224); + setState(232); _la = _input.LA(1); if (_la==ASSIGN) { { - setState(222); + setState(230); match(ASSIGN); - setState(223); + setState(231); expression(0); } } @@ -1263,17 +1303,17 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(226); + setState(234); match(CATCH); - setState(227); + setState(235); match(LP); - setState(228); + setState(236); match(TYPE); - setState(229); + setState(237); match(ID); - setState(230); + setState(238); match(RP); - setState(231); + setState(239); block(); } } @@ -1309,7 +1349,7 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(233); + setState(241); _la = _input.LA(1); if ( !(_la==EOF || _la==SEMICOLON) ) { _errHandler.recoverInline(this); @@ -1495,7 +1535,7 @@ class PainlessParser extends Parser { int _alt; enterOuterAlt(_localctx, 1); { - setState(244); + setState(252); switch ( getInterpreter().adaptivePredict(_input,18,_ctx) ) { case 1: { @@ -1503,16 +1543,16 @@ class PainlessParser extends Parser { _ctx = _localctx; _prevctx = _localctx; - setState(236); + setState(244); chain(true); - setState(237); + setState(245); _la = _input.LA(1); - if ( !(((((_la - 57)) & ~0x3f) == 0 && ((1L << (_la - 57)) & ((1L << (ASSIGN - 57)) | (1L << (AADD - 57)) | (1L << (ASUB - 57)) | (1L << (AMUL - 57)) | (1L << (ADIV - 57)) | (1L << (AREM - 57)) | (1L << (AAND - 57)) | (1L << (AXOR - 57)) | (1L << (AOR - 57)) | (1L << (ALSH - 57)) | (1L << (ARSH - 57)) | (1L << (AUSH - 57)))) != 0)) ) { + if ( !(((((_la - 58)) & ~0x3f) == 0 && ((1L << (_la - 58)) & ((1L << (ASSIGN - 58)) | (1L << (AADD - 58)) | (1L << (ASUB - 58)) | (1L << (AMUL - 58)) | (1L << (ADIV - 58)) | (1L << (AREM - 58)) | (1L << (AAND - 58)) | (1L << (AXOR - 58)) | (1L << (AOR - 58)) | (1L << (ALSH - 58)) | (1L << (ARSH - 58)) | (1L << (AUSH - 58)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(238); + setState(246); expression(1); ((AssignmentContext)_localctx).s = false; } @@ -1522,14 +1562,14 @@ class PainlessParser extends Parser { _localctx = new SingleContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(241); + setState(249); ((SingleContext)_localctx).u = unary(false); ((SingleContext)_localctx).s = ((SingleContext)_localctx).u.s; } break; } _ctx.stop = _input.LT(-1); - setState(315); + setState(323); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,20,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -1537,22 +1577,22 @@ class PainlessParser extends Parser { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(313); + setState(321); switch ( getInterpreter().adaptivePredict(_input,19,_ctx) ) { case 1: { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(246); + setState(254); if (!(precpred(_ctx, 14))) throw new FailedPredicateException(this, "precpred(_ctx, 14)"); - setState(247); + setState(255); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(248); + setState(256); expression(15); ((BinaryContext)_localctx).s = false; } @@ -1561,16 +1601,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(251); + setState(259); if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); - setState(252); + setState(260); _la = _input.LA(1); if ( !(_la==ADD || _la==SUB) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(253); + setState(261); expression(14); ((BinaryContext)_localctx).s = false; } @@ -1579,16 +1619,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(256); + setState(264); if (!(precpred(_ctx, 12))) throw new FailedPredicateException(this, "precpred(_ctx, 12)"); - setState(257); + setState(265); _la = _input.LA(1); if ( !(_la==FIND || _la==MATCH) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(258); + setState(266); expression(13); ((BinaryContext)_localctx).s = false; } @@ -1597,16 +1637,16 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(261); + setState(269); if (!(precpred(_ctx, 11))) throw new FailedPredicateException(this, "precpred(_ctx, 11)"); - setState(262); + setState(270); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(263); + setState(271); expression(12); ((BinaryContext)_localctx).s = false; } @@ -1615,16 +1655,16 @@ class PainlessParser extends Parser { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(266); + setState(274); if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); - setState(267); + setState(275); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(268); + setState(276); expression(11); ((CompContext)_localctx).s = false; } @@ -1633,16 +1673,16 @@ class PainlessParser extends Parser { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(271); + setState(279); if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, "precpred(_ctx, 8)"); - setState(272); + setState(280); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(273); + setState(281); expression(9); ((CompContext)_localctx).s = false; } @@ -1651,11 +1691,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(276); + setState(284); if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); - setState(277); + setState(285); match(BWAND); - setState(278); + setState(286); expression(8); ((BinaryContext)_localctx).s = false; } @@ -1664,11 +1704,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(281); + setState(289); if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, "precpred(_ctx, 6)"); - setState(282); + setState(290); match(XOR); - setState(283); + setState(291); expression(7); ((BinaryContext)_localctx).s = false; } @@ -1677,11 +1717,11 @@ class PainlessParser extends Parser { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(286); + setState(294); if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); - setState(287); + setState(295); match(BWOR); - setState(288); + setState(296); expression(6); ((BinaryContext)_localctx).s = false; } @@ -1690,11 +1730,11 @@ class PainlessParser extends Parser { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(291); + setState(299); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(292); + setState(300); match(BOOLAND); - setState(293); + setState(301); expression(5); ((BoolContext)_localctx).s = false; } @@ -1703,11 +1743,11 @@ class PainlessParser extends Parser { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(296); + setState(304); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(297); + setState(305); match(BOOLOR); - setState(298); + setState(306); expression(4); ((BoolContext)_localctx).s = false; } @@ -1716,15 +1756,15 @@ class PainlessParser extends Parser { { _localctx = new ConditionalContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(301); + setState(309); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(302); + setState(310); match(COND); - setState(303); + setState(311); ((ConditionalContext)_localctx).e0 = expression(0); - setState(304); + setState(312); match(COLON); - setState(305); + setState(313); ((ConditionalContext)_localctx).e1 = expression(2); ((ConditionalContext)_localctx).s = ((ConditionalContext)_localctx).e0.s && ((ConditionalContext)_localctx).e1.s; } @@ -1733,11 +1773,11 @@ class PainlessParser extends Parser { { _localctx = new InstanceofContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(308); + setState(316); if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); - setState(309); + setState(317); match(INSTANCEOF); - setState(310); + setState(318); decltype(); ((InstanceofContext)_localctx).s = false; } @@ -1745,7 +1785,7 @@ class PainlessParser extends Parser { } } } - setState(317); + setState(325); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,20,_ctx); } @@ -1914,22 +1954,22 @@ class PainlessParser extends Parser { enterRule(_localctx, 30, RULE_unary); int _la; try { - setState(355); + setState(363); switch ( getInterpreter().adaptivePredict(_input,21,_ctx) ) { case 1: _localctx = new PreContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(318); + setState(326); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(319); + setState(327); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(320); + setState(328); chain(true); } break; @@ -1937,11 +1977,11 @@ class PainlessParser extends Parser { _localctx = new PostContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(321); + setState(329); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(322); + setState(330); chain(true); - setState(323); + setState(331); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); @@ -1954,9 +1994,9 @@ class PainlessParser extends Parser { _localctx = new ReadContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(325); + setState(333); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(326); + setState(334); chain(false); } break; @@ -1964,11 +2004,11 @@ class PainlessParser extends Parser { _localctx = new NumericContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(327); + setState(335); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(328); + setState(336); _la = _input.LA(1); - if ( !(((((_la - 69)) & ~0x3f) == 0 && ((1L << (_la - 69)) & ((1L << (OCTAL - 69)) | (1L << (HEX - 69)) | (1L << (INTEGER - 69)) | (1L << (DECIMAL - 69)))) != 0)) ) { + if ( !(((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OCTAL - 70)) | (1L << (HEX - 70)) | (1L << (INTEGER - 70)) | (1L << (DECIMAL - 70)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); @@ -1980,9 +2020,9 @@ class PainlessParser extends Parser { _localctx = new TrueContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(330); + setState(338); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(331); + setState(339); match(TRUE); ((TrueContext)_localctx).s = false; } @@ -1991,9 +2031,9 @@ class PainlessParser extends Parser { _localctx = new FalseContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(333); + setState(341); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(334); + setState(342); match(FALSE); ((FalseContext)_localctx).s = false; } @@ -2002,9 +2042,9 @@ class PainlessParser extends Parser { _localctx = new NullContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(336); + setState(344); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(337); + setState(345); match(NULL); ((NullContext)_localctx).s = false; } @@ -2013,9 +2053,9 @@ class PainlessParser extends Parser { _localctx = new ListinitContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(339); + setState(347); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(340); + setState(348); listinitializer(); ((ListinitContext)_localctx).s = false; } @@ -2024,9 +2064,9 @@ class PainlessParser extends Parser { _localctx = new MapinitContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(343); + setState(351); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(344); + setState(352); mapinitializer(); ((MapinitContext)_localctx).s = false; } @@ -2035,16 +2075,16 @@ class PainlessParser extends Parser { _localctx = new OperatorContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(347); + setState(355); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(348); + setState(356); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(349); + setState(357); unary(false); } break; @@ -2052,13 +2092,13 @@ class PainlessParser extends Parser { _localctx = new CastContext(_localctx); enterOuterAlt(_localctx, 11); { - setState(350); + setState(358); match(LP); - setState(351); + setState(359); decltype(); - setState(352); + setState(360); match(RP); - setState(353); + setState(361); unary(_localctx.c); } break; @@ -2145,27 +2185,27 @@ class PainlessParser extends Parser { enterRule(_localctx, 32, RULE_chain); try { int _alt; - setState(373); + setState(381); switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { case 1: _localctx = new DynamicContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(357); + setState(365); ((DynamicContext)_localctx).p = primary(_localctx.c); - setState(361); + setState(369); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,22,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(358); + setState(366); secondary(((DynamicContext)_localctx).p.s); } } } - setState(363); + setState(371); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,22,_ctx); } @@ -2175,23 +2215,23 @@ class PainlessParser extends Parser { _localctx = new StaticContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(364); + setState(372); decltype(); - setState(365); + setState(373); dot(); - setState(369); + setState(377); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,23,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(366); + setState(374); secondary(true); } } } - setState(371); + setState(379); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,23,_ctx); } @@ -2201,7 +2241,7 @@ class PainlessParser extends Parser { _localctx = new NewarrayContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(372); + setState(380); arrayinitializer(); } break; @@ -2319,19 +2359,19 @@ class PainlessParser extends Parser { PrimaryContext _localctx = new PrimaryContext(_ctx, getState(), c); enterRule(_localctx, 34, RULE_primary); try { - setState(394); + setState(402); switch ( getInterpreter().adaptivePredict(_input,25,_ctx) ) { case 1: _localctx = new ExprprecContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(375); + setState(383); if (!( !_localctx.c )) throw new FailedPredicateException(this, " !$c "); - setState(376); + setState(384); match(LP); - setState(377); + setState(385); ((ExprprecContext)_localctx).e = expression(0); - setState(378); + setState(386); match(RP); ((ExprprecContext)_localctx).s = ((ExprprecContext)_localctx).e.s; } @@ -2340,13 +2380,13 @@ class PainlessParser extends Parser { _localctx = new ChainprecContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(381); + setState(389); if (!( _localctx.c )) throw new FailedPredicateException(this, " $c "); - setState(382); + setState(390); match(LP); - setState(383); + setState(391); unary(true); - setState(384); + setState(392); match(RP); } break; @@ -2354,7 +2394,7 @@ class PainlessParser extends Parser { _localctx = new StringContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(386); + setState(394); match(STRING); } break; @@ -2362,7 +2402,7 @@ class PainlessParser extends Parser { _localctx = new RegexContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(387); + setState(395); match(REGEX); } break; @@ -2370,7 +2410,7 @@ class PainlessParser extends Parser { _localctx = new VariableContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(388); + setState(396); match(ID); } break; @@ -2378,9 +2418,9 @@ class PainlessParser extends Parser { _localctx = new CalllocalContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(389); + setState(397); match(ID); - setState(390); + setState(398); arguments(); } break; @@ -2388,11 +2428,11 @@ class PainlessParser extends Parser { _localctx = new NewobjectContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(391); + setState(399); match(NEW); - setState(392); + setState(400); match(TYPE); - setState(393); + setState(401); arguments(); } break; @@ -2434,23 +2474,23 @@ class PainlessParser extends Parser { SecondaryContext _localctx = new SecondaryContext(_ctx, getState(), s); enterRule(_localctx, 36, RULE_secondary); try { - setState(400); + setState(408); switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(396); + setState(404); if (!( _localctx.s )) throw new FailedPredicateException(this, " $s "); - setState(397); + setState(405); dot(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(398); + setState(406); if (!( _localctx.s )) throw new FailedPredicateException(this, " $s "); - setState(399); + setState(407); brace(); } break; @@ -2508,17 +2548,17 @@ class PainlessParser extends Parser { enterRule(_localctx, 38, RULE_dot); int _la; try { - setState(407); + setState(415); switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { case 1: _localctx = new CallinvokeContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(402); + setState(410); match(DOT); - setState(403); + setState(411); match(DOTID); - setState(404); + setState(412); arguments(); } break; @@ -2526,9 +2566,9 @@ class PainlessParser extends Parser { _localctx = new FieldaccessContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(405); + setState(413); match(DOT); - setState(406); + setState(414); _la = _input.LA(1); if ( !(_la==DOTINTEGER || _la==DOTID) ) { _errHandler.recoverInline(this); @@ -2582,11 +2622,11 @@ class PainlessParser extends Parser { _localctx = new BraceaccessContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(409); + setState(417); match(LBRACE); - setState(410); + setState(418); expression(0); - setState(411); + setState(419); match(RBRACE); } } @@ -2633,34 +2673,34 @@ class PainlessParser extends Parser { enterOuterAlt(_localctx, 1); { { - setState(413); + setState(421); match(LP); - setState(422); + setState(430); switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { case 1: { - setState(414); + setState(422); argument(); - setState(419); + setState(427); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(415); + setState(423); match(COMMA); - setState(416); + setState(424); argument(); } } - setState(421); + setState(429); _errHandler.sync(this); _la = _input.LA(1); } } break; } - setState(424); + setState(432); match(RP); } } @@ -2701,26 +2741,26 @@ class PainlessParser extends Parser { ArgumentContext _localctx = new ArgumentContext(_ctx, getState()); enterRule(_localctx, 44, RULE_argument); try { - setState(429); + setState(437); switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(426); + setState(434); expression(0); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(427); + setState(435); lambda(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(428); + setState(436); funcref(); } break; @@ -2775,64 +2815,64 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(444); + setState(452); switch (_input.LA(1)) { case TYPE: case ID: { - setState(431); + setState(439); lamtype(); } break; case LP: { - setState(432); + setState(440); match(LP); - setState(441); + setState(449); _la = _input.LA(1); if (_la==TYPE || _la==ID) { { - setState(433); + setState(441); lamtype(); - setState(438); + setState(446); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(434); + setState(442); match(COMMA); - setState(435); + setState(443); lamtype(); } } - setState(440); + setState(448); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(443); + setState(451); match(RP); } break; default: throw new NoViableAltException(this); } - setState(446); + setState(454); match(ARROW); - setState(449); + setState(457); switch ( getInterpreter().adaptivePredict(_input,34,_ctx) ) { case 1: { - setState(447); + setState(455); block(); } break; case 2: { - setState(448); + setState(456); expression(0); } break; @@ -2873,16 +2913,16 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(452); + setState(460); _la = _input.LA(1); if (_la==TYPE) { { - setState(451); + setState(459); decltype(); } } - setState(454); + setState(462); match(ID); } } @@ -2925,33 +2965,33 @@ class PainlessParser extends Parser { FuncrefContext _localctx = new FuncrefContext(_ctx, getState()); enterRule(_localctx, 50, RULE_funcref); try { - setState(460); + setState(468); switch ( getInterpreter().adaptivePredict(_input,36,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(456); + setState(464); classFuncref(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(457); + setState(465); constructorFuncref(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(458); + setState(466); capturingFuncref(); } break; case 4: enterOuterAlt(_localctx, 4); { - setState(459); + setState(467); localFuncref(); } break; @@ -2989,11 +3029,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(462); + setState(470); match(TYPE); - setState(463); + setState(471); match(REF); - setState(464); + setState(472); match(ID); } } @@ -3031,11 +3071,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(466); + setState(474); decltype(); - setState(467); + setState(475); match(REF); - setState(468); + setState(476); match(NEW); } } @@ -3073,11 +3113,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(470); + setState(478); match(ID); - setState(471); + setState(479); match(REF); - setState(472); + setState(480); match(ID); } } @@ -3113,11 +3153,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(474); + setState(482); match(THIS); - setState(475); + setState(483); match(REF); - setState(476); + setState(484); match(ID); } } @@ -3208,17 +3248,17 @@ class PainlessParser extends Parser { int _la; try { int _alt; - setState(516); + setState(524); switch ( getInterpreter().adaptivePredict(_input,43,_ctx) ) { case 1: _localctx = new NewstandardarrayContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(478); + setState(486); match(NEW); - setState(479); + setState(487); match(TYPE); - setState(484); + setState(492); _errHandler.sync(this); _alt = 1; do { @@ -3226,11 +3266,11 @@ class PainlessParser extends Parser { case 1: { { - setState(480); + setState(488); match(LBRACE); - setState(481); + setState(489); expression(0); - setState(482); + setState(490); match(RBRACE); } } @@ -3238,29 +3278,29 @@ class PainlessParser extends Parser { default: throw new NoViableAltException(this); } - setState(486); + setState(494); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,37,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); - setState(495); + setState(503); switch ( getInterpreter().adaptivePredict(_input,39,_ctx) ) { case 1: { - setState(488); + setState(496); dot(); - setState(492); + setState(500); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,38,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(489); + setState(497); secondary(true); } } } - setState(494); + setState(502); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,38,_ctx); } @@ -3273,51 +3313,51 @@ class PainlessParser extends Parser { _localctx = new NewinitializedarrayContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(497); + setState(505); match(NEW); - setState(498); + setState(506); match(TYPE); - setState(499); + setState(507); match(LBRACE); - setState(500); + setState(508); match(RBRACE); - setState(501); + setState(509); match(LBRACK); - setState(510); + setState(518); switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { case 1: { - setState(502); + setState(510); expression(0); - setState(507); + setState(515); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(503); + setState(511); match(COMMA); - setState(504); + setState(512); expression(0); } } - setState(509); + setState(517); _errHandler.sync(this); _la = _input.LA(1); } } break; } - setState(513); + setState(521); _la = _input.LA(1); if (_la==SEMICOLON) { { - setState(512); + setState(520); match(SEMICOLON); } } - setState(515); + setState(523); match(RBRACK); } break; @@ -3363,41 +3403,41 @@ class PainlessParser extends Parser { enterRule(_localctx, 62, RULE_listinitializer); int _la; try { - setState(531); + setState(539); switch ( getInterpreter().adaptivePredict(_input,45,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(518); + setState(526); match(LBRACE); - setState(519); + setState(527); expression(0); - setState(524); + setState(532); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(520); + setState(528); match(COMMA); - setState(521); + setState(529); expression(0); } } - setState(526); + setState(534); _errHandler.sync(this); _la = _input.LA(1); } - setState(527); + setState(535); match(RBRACE); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(529); + setState(537); match(LBRACE); - setState(530); + setState(538); match(RBRACE); } break; @@ -3444,43 +3484,43 @@ class PainlessParser extends Parser { enterRule(_localctx, 64, RULE_mapinitializer); int _la; try { - setState(547); + setState(555); switch ( getInterpreter().adaptivePredict(_input,47,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(533); + setState(541); match(LBRACE); - setState(534); + setState(542); maptoken(); - setState(539); + setState(547); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(535); + setState(543); match(COMMA); - setState(536); + setState(544); maptoken(); } } - setState(541); + setState(549); _errHandler.sync(this); _la = _input.LA(1); } - setState(542); + setState(550); match(RBRACE); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(544); + setState(552); match(LBRACE); - setState(545); + setState(553); match(COLON); - setState(546); + setState(554); match(RBRACE); } break; @@ -3522,11 +3562,11 @@ class PainlessParser extends Parser { try { enterOuterAlt(_localctx, 1); { - setState(549); + setState(557); expression(0); - setState(550); + setState(558); match(COLON); - setState(551); + setState(559); expression(0); } } @@ -3639,7 +3679,7 @@ class PainlessParser extends Parser { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3S\u022c\4\2\t\2\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3T\u0234\4\2\t\2\4"+ "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ @@ -3650,204 +3690,207 @@ class PainlessParser extends Parser { "\5\3\5\3\5\3\5\3\5\3\5\3\5\5\5z\n\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3"+ "\5\3\5\3\5\5\5\u0087\n\5\3\5\3\5\5\5\u008b\n\5\3\5\3\5\5\5\u008f\n\5\3"+ "\5\3\5\3\5\5\5\u0094\n\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3"+ - "\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\6\5\u00ad\n\5\r\5\16\5"+ - "\u00ae\3\5\3\5\3\5\3\5\3\5\3\5\3\5\5\5\u00b8\n\5\3\6\3\6\5\6\u00bc\n\6"+ - "\3\7\3\7\7\7\u00c0\n\7\f\7\16\7\u00c3\13\7\3\7\3\7\3\b\3\b\3\t\3\t\5\t"+ - "\u00cb\n\t\3\n\3\n\3\13\3\13\3\13\3\13\7\13\u00d3\n\13\f\13\16\13\u00d6"+ - "\13\13\3\f\3\f\3\f\7\f\u00db\n\f\f\f\16\f\u00de\13\f\3\r\3\r\3\r\5\r\u00e3"+ - "\n\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\20\3\20\3\20\3\20"+ - "\3\20\3\20\3\20\3\20\3\20\5\20\u00f7\n\20\3\20\3\20\3\20\3\20\3\20\3\20"+ + "\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5"+ + "\3\5\3\5\6\5\u00b5\n\5\r\5\16\5\u00b6\3\5\3\5\3\5\3\5\3\5\3\5\3\5\5\5"+ + "\u00c0\n\5\3\6\3\6\5\6\u00c4\n\6\3\7\3\7\7\7\u00c8\n\7\f\7\16\7\u00cb"+ + "\13\7\3\7\3\7\3\b\3\b\3\t\3\t\5\t\u00d3\n\t\3\n\3\n\3\13\3\13\3\13\3\13"+ + "\7\13\u00db\n\13\f\13\16\13\u00de\13\13\3\f\3\f\3\f\7\f\u00e3\n\f\f\f"+ + "\16\f\u00e6\13\f\3\r\3\r\3\r\5\r\u00eb\n\r\3\16\3\16\3\16\3\16\3\16\3"+ + "\16\3\16\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\5\20\u00ff"+ + "\n\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ - "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ - "\3\20\3\20\3\20\3\20\3\20\7\20\u013c\n\20\f\20\16\20\u013f\13\20\3\21"+ - "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ - "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ - "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\5\21\u0166\n\21\3\22\3\22\7\22"+ - "\u016a\n\22\f\22\16\22\u016d\13\22\3\22\3\22\3\22\7\22\u0172\n\22\f\22"+ - "\16\22\u0175\13\22\3\22\5\22\u0178\n\22\3\23\3\23\3\23\3\23\3\23\3\23"+ - "\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\5\23"+ - "\u018d\n\23\3\24\3\24\3\24\3\24\5\24\u0193\n\24\3\25\3\25\3\25\3\25\3"+ - "\25\5\25\u019a\n\25\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\7\27\u01a4"+ - "\n\27\f\27\16\27\u01a7\13\27\5\27\u01a9\n\27\3\27\3\27\3\30\3\30\3\30"+ - "\5\30\u01b0\n\30\3\31\3\31\3\31\3\31\3\31\7\31\u01b7\n\31\f\31\16\31\u01ba"+ - "\13\31\5\31\u01bc\n\31\3\31\5\31\u01bf\n\31\3\31\3\31\3\31\5\31\u01c4"+ - "\n\31\3\32\5\32\u01c7\n\32\3\32\3\32\3\33\3\33\3\33\3\33\5\33\u01cf\n"+ - "\33\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\37\3"+ - "\37\3\37\3\37\3 \3 \3 \3 \3 \3 \6 \u01e7\n \r \16 \u01e8\3 \3 \7 \u01ed"+ - "\n \f \16 \u01f0\13 \5 \u01f2\n \3 \3 \3 \3 \3 \3 \3 \3 \7 \u01fc\n \f"+ - " \16 \u01ff\13 \5 \u0201\n \3 \5 \u0204\n \3 \5 \u0207\n \3!\3!\3!\3!"+ - "\7!\u020d\n!\f!\16!\u0210\13!\3!\3!\3!\3!\5!\u0216\n!\3\"\3\"\3\"\3\""+ - "\7\"\u021c\n\"\f\"\16\"\u021f\13\"\3\"\3\"\3\"\3\"\3\"\5\"\u0226\n\"\3"+ - "#\3#\3#\3#\3#\2\3\36$\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,."+ - "\60\62\64\668:<>@BD\2\16\3\3\r\r\3\2;F\3\2\36 \3\2!\"\3\2\678\3\2#%\3"+ - "\2&)\3\2*-\3\29:\3\2GJ\4\2\34\35!\"\3\2RS\u0260\2I\3\2\2\2\4T\3\2\2\2"+ - "\6Y\3\2\2\2\b\u00b7\3\2\2\2\n\u00bb\3\2\2\2\f\u00bd\3\2\2\2\16\u00c6\3"+ - "\2\2\2\20\u00ca\3\2\2\2\22\u00cc\3\2\2\2\24\u00ce\3\2\2\2\26\u00d7\3\2"+ - "\2\2\30\u00df\3\2\2\2\32\u00e4\3\2\2\2\34\u00eb\3\2\2\2\36\u00f6\3\2\2"+ - "\2 \u0165\3\2\2\2\"\u0177\3\2\2\2$\u018c\3\2\2\2&\u0192\3\2\2\2(\u0199"+ - "\3\2\2\2*\u019b\3\2\2\2,\u019f\3\2\2\2.\u01af\3\2\2\2\60\u01be\3\2\2\2"+ - "\62\u01c6\3\2\2\2\64\u01ce\3\2\2\2\66\u01d0\3\2\2\28\u01d4\3\2\2\2:\u01d8"+ - "\3\2\2\2<\u01dc\3\2\2\2>\u0206\3\2\2\2@\u0215\3\2\2\2B\u0225\3\2\2\2D"+ - "\u0227\3\2\2\2FH\5\4\3\2GF\3\2\2\2HK\3\2\2\2IG\3\2\2\2IJ\3\2\2\2JO\3\2"+ - "\2\2KI\3\2\2\2LN\5\b\5\2ML\3\2\2\2NQ\3\2\2\2OM\3\2\2\2OP\3\2\2\2PR\3\2"+ - "\2\2QO\3\2\2\2RS\7\2\2\3S\3\3\2\2\2TU\5\26\f\2UV\7Q\2\2VW\5\6\4\2WX\5"+ - "\f\7\2X\5\3\2\2\2Ye\7\t\2\2Z[\5\26\f\2[b\7Q\2\2\\]\7\f\2\2]^\5\26\f\2"+ - "^_\7Q\2\2_a\3\2\2\2`\\\3\2\2\2ad\3\2\2\2b`\3\2\2\2bc\3\2\2\2cf\3\2\2\2"+ - "db\3\2\2\2eZ\3\2\2\2ef\3\2\2\2fg\3\2\2\2gh\7\n\2\2h\7\3\2\2\2ij\7\16\2"+ - "\2jk\7\t\2\2kl\5\36\20\2lm\7\n\2\2mq\5\n\6\2no\7\17\2\2or\5\n\6\2pr\6"+ - "\5\2\2qn\3\2\2\2qp\3\2\2\2r\u00b8\3\2\2\2st\7\20\2\2tu\7\t\2\2uv\5\36"+ - "\20\2vy\7\n\2\2wz\5\n\6\2xz\5\16\b\2yw\3\2\2\2yx\3\2\2\2z\u00b8\3\2\2"+ - "\2{|\7\21\2\2|}\5\f\7\2}~\7\20\2\2~\177\7\t\2\2\177\u0080\5\36\20\2\u0080"+ - "\u0081\7\n\2\2\u0081\u0082\5\34\17\2\u0082\u00b8\3\2\2\2\u0083\u0084\7"+ - "\22\2\2\u0084\u0086\7\t\2\2\u0085\u0087\5\20\t\2\u0086\u0085\3\2\2\2\u0086"+ - "\u0087\3\2\2\2\u0087\u0088\3\2\2\2\u0088\u008a\7\r\2\2\u0089\u008b\5\36"+ - "\20\2\u008a\u0089\3\2\2\2\u008a\u008b\3\2\2\2\u008b\u008c\3\2\2\2\u008c"+ - "\u008e\7\r\2\2\u008d\u008f\5\22\n\2\u008e\u008d\3\2\2\2\u008e\u008f\3"+ - "\2\2\2\u008f\u0090\3\2\2\2\u0090\u0093\7\n\2\2\u0091\u0094\5\n\6\2\u0092"+ - "\u0094\5\16\b\2\u0093\u0091\3\2\2\2\u0093\u0092\3\2\2\2\u0094\u00b8\3"+ - "\2\2\2\u0095\u0096\7\22\2\2\u0096\u0097\7\t\2\2\u0097\u0098\5\26\f\2\u0098"+ - "\u0099\7Q\2\2\u0099\u009a\7\64\2\2\u009a\u009b\5\36\20\2\u009b\u009c\7"+ - "\n\2\2\u009c\u009d\5\n\6\2\u009d\u00b8\3\2\2\2\u009e\u009f\5\24\13\2\u009f"+ - "\u00a0\5\34\17\2\u00a0\u00b8\3\2\2\2\u00a1\u00a2\7\23\2\2\u00a2\u00b8"+ - "\5\34\17\2\u00a3\u00a4\7\24\2\2\u00a4\u00b8\5\34\17\2\u00a5\u00a6\7\25"+ - "\2\2\u00a6\u00a7\5\36\20\2\u00a7\u00a8\5\34\17\2\u00a8\u00b8\3\2\2\2\u00a9"+ - "\u00aa\7\27\2\2\u00aa\u00ac\5\f\7\2\u00ab\u00ad\5\32\16\2\u00ac\u00ab"+ - "\3\2\2\2\u00ad\u00ae\3\2\2\2\u00ae\u00ac\3\2\2\2\u00ae\u00af\3\2\2\2\u00af"+ - "\u00b8\3\2\2\2\u00b0\u00b1\7\31\2\2\u00b1\u00b2\5\36\20\2\u00b2\u00b3"+ - "\5\34\17\2\u00b3\u00b8\3\2\2\2\u00b4\u00b5\5\36\20\2\u00b5\u00b6\5\34"+ - "\17\2\u00b6\u00b8\3\2\2\2\u00b7i\3\2\2\2\u00b7s\3\2\2\2\u00b7{\3\2\2\2"+ - "\u00b7\u0083\3\2\2\2\u00b7\u0095\3\2\2\2\u00b7\u009e\3\2\2\2\u00b7\u00a1"+ - "\3\2\2\2\u00b7\u00a3\3\2\2\2\u00b7\u00a5\3\2\2\2\u00b7\u00a9\3\2\2\2\u00b7"+ - "\u00b0\3\2\2\2\u00b7\u00b4\3\2\2\2\u00b8\t\3\2\2\2\u00b9\u00bc\5\f\7\2"+ - "\u00ba\u00bc\5\b\5\2\u00bb\u00b9\3\2\2\2\u00bb\u00ba\3\2\2\2\u00bc\13"+ - "\3\2\2\2\u00bd\u00c1\7\5\2\2\u00be\u00c0\5\b\5\2\u00bf\u00be\3\2\2\2\u00c0"+ - "\u00c3\3\2\2\2\u00c1\u00bf\3\2\2\2\u00c1\u00c2\3\2\2\2\u00c2\u00c4\3\2"+ - "\2\2\u00c3\u00c1\3\2\2\2\u00c4\u00c5\7\6\2\2\u00c5\r\3\2\2\2\u00c6\u00c7"+ - "\7\r\2\2\u00c7\17\3\2\2\2\u00c8\u00cb\5\24\13\2\u00c9\u00cb\5\36\20\2"+ - "\u00ca\u00c8\3\2\2\2\u00ca\u00c9\3\2\2\2\u00cb\21\3\2\2\2\u00cc\u00cd"+ - "\5\36\20\2\u00cd\23\3\2\2\2\u00ce\u00cf\5\26\f\2\u00cf\u00d4\5\30\r\2"+ - "\u00d0\u00d1\7\f\2\2\u00d1\u00d3\5\30\r\2\u00d2\u00d0\3\2\2\2\u00d3\u00d6"+ - "\3\2\2\2\u00d4\u00d2\3\2\2\2\u00d4\u00d5\3\2\2\2\u00d5\25\3\2\2\2\u00d6"+ - "\u00d4\3\2\2\2\u00d7\u00dc\7P\2\2\u00d8\u00d9\7\7\2\2\u00d9\u00db\7\b"+ - "\2\2\u00da\u00d8\3\2\2\2\u00db\u00de\3\2\2\2\u00dc\u00da\3\2\2\2\u00dc"+ - "\u00dd\3\2\2\2\u00dd\27\3\2\2\2\u00de\u00dc\3\2\2\2\u00df\u00e2\7Q\2\2"+ - "\u00e0\u00e1\7;\2\2\u00e1\u00e3\5\36\20\2\u00e2\u00e0\3\2\2\2\u00e2\u00e3"+ - "\3\2\2\2\u00e3\31\3\2\2\2\u00e4\u00e5\7\30\2\2\u00e5\u00e6\7\t\2\2\u00e6"+ - "\u00e7\7P\2\2\u00e7\u00e8\7Q\2\2\u00e8\u00e9\7\n\2\2\u00e9\u00ea\5\f\7"+ - "\2\u00ea\33\3\2\2\2\u00eb\u00ec\t\2\2\2\u00ec\35\3\2\2\2\u00ed\u00ee\b"+ - "\20\1\2\u00ee\u00ef\5\"\22\2\u00ef\u00f0\t\3\2\2\u00f0\u00f1\5\36\20\3"+ - "\u00f1\u00f2\b\20\1\2\u00f2\u00f7\3\2\2\2\u00f3\u00f4\5 \21\2\u00f4\u00f5"+ - "\b\20\1\2\u00f5\u00f7\3\2\2\2\u00f6\u00ed\3\2\2\2\u00f6\u00f3\3\2\2\2"+ - "\u00f7\u013d\3\2\2\2\u00f8\u00f9\f\20\2\2\u00f9\u00fa\t\4\2\2\u00fa\u00fb"+ - "\5\36\20\21\u00fb\u00fc\b\20\1\2\u00fc\u013c\3\2\2\2\u00fd\u00fe\f\17"+ - "\2\2\u00fe\u00ff\t\5\2\2\u00ff\u0100\5\36\20\20\u0100\u0101\b\20\1\2\u0101"+ - "\u013c\3\2\2\2\u0102\u0103\f\16\2\2\u0103\u0104\t\6\2\2\u0104\u0105\5"+ - "\36\20\17\u0105\u0106\b\20\1\2\u0106\u013c\3\2\2\2\u0107\u0108\f\r\2\2"+ - "\u0108\u0109\t\7\2\2\u0109\u010a\5\36\20\16\u010a\u010b\b\20\1\2\u010b"+ - "\u013c\3\2\2\2\u010c\u010d\f\f\2\2\u010d\u010e\t\b\2\2\u010e\u010f\5\36"+ - "\20\r\u010f\u0110\b\20\1\2\u0110\u013c\3\2\2\2\u0111\u0112\f\n\2\2\u0112"+ - "\u0113\t\t\2\2\u0113\u0114\5\36\20\13\u0114\u0115\b\20\1\2\u0115\u013c"+ - "\3\2\2\2\u0116\u0117\f\t\2\2\u0117\u0118\7.\2\2\u0118\u0119\5\36\20\n"+ - "\u0119\u011a\b\20\1\2\u011a\u013c\3\2\2\2\u011b\u011c\f\b\2\2\u011c\u011d"+ - "\7/\2\2\u011d\u011e\5\36\20\t\u011e\u011f\b\20\1\2\u011f\u013c\3\2\2\2"+ - "\u0120\u0121\f\7\2\2\u0121\u0122\7\60\2\2\u0122\u0123\5\36\20\b\u0123"+ - "\u0124\b\20\1\2\u0124\u013c\3\2\2\2\u0125\u0126\f\6\2\2\u0126\u0127\7"+ - "\61\2\2\u0127\u0128\5\36\20\7\u0128\u0129\b\20\1\2\u0129\u013c\3\2\2\2"+ - "\u012a\u012b\f\5\2\2\u012b\u012c\7\62\2\2\u012c\u012d\5\36\20\6\u012d"+ - "\u012e\b\20\1\2\u012e\u013c\3\2\2\2\u012f\u0130\f\4\2\2\u0130\u0131\7"+ - "\63\2\2\u0131\u0132\5\36\20\2\u0132\u0133\7\64\2\2\u0133\u0134\5\36\20"+ - "\4\u0134\u0135\b\20\1\2\u0135\u013c\3\2\2\2\u0136\u0137\f\13\2\2\u0137"+ - "\u0138\7\33\2\2\u0138\u0139\5\26\f\2\u0139\u013a\b\20\1\2\u013a\u013c"+ - "\3\2\2\2\u013b\u00f8\3\2\2\2\u013b\u00fd\3\2\2\2\u013b\u0102\3\2\2\2\u013b"+ - "\u0107\3\2\2\2\u013b\u010c\3\2\2\2\u013b\u0111\3\2\2\2\u013b\u0116\3\2"+ - "\2\2\u013b\u011b\3\2\2\2\u013b\u0120\3\2\2\2\u013b\u0125\3\2\2\2\u013b"+ - "\u012a\3\2\2\2\u013b\u012f\3\2\2\2\u013b\u0136\3\2\2\2\u013c\u013f\3\2"+ - "\2\2\u013d\u013b\3\2\2\2\u013d\u013e\3\2\2\2\u013e\37\3\2\2\2\u013f\u013d"+ - "\3\2\2\2\u0140\u0141\6\21\20\3\u0141\u0142\t\n\2\2\u0142\u0166\5\"\22"+ - "\2\u0143\u0144\6\21\21\3\u0144\u0145\5\"\22\2\u0145\u0146\t\n\2\2\u0146"+ - "\u0166\3\2\2\2\u0147\u0148\6\21\22\3\u0148\u0166\5\"\22\2\u0149\u014a"+ - "\6\21\23\3\u014a\u014b\t\13\2\2\u014b\u0166\b\21\1\2\u014c\u014d\6\21"+ - "\24\3\u014d\u014e\7M\2\2\u014e\u0166\b\21\1\2\u014f\u0150\6\21\25\3\u0150"+ - "\u0151\7N\2\2\u0151\u0166\b\21\1\2\u0152\u0153\6\21\26\3\u0153\u0154\7"+ - "O\2\2\u0154\u0166\b\21\1\2\u0155\u0156\6\21\27\3\u0156\u0157\5@!\2\u0157"+ - "\u0158\b\21\1\2\u0158\u0166\3\2\2\2\u0159\u015a\6\21\30\3\u015a\u015b"+ - "\5B\"\2\u015b\u015c\b\21\1\2\u015c\u0166\3\2\2\2\u015d\u015e\6\21\31\3"+ - "\u015e\u015f\t\f\2\2\u015f\u0166\5 \21\2\u0160\u0161\7\t\2\2\u0161\u0162"+ - "\5\26\f\2\u0162\u0163\7\n\2\2\u0163\u0164\5 \21\2\u0164\u0166\3\2\2\2"+ - "\u0165\u0140\3\2\2\2\u0165\u0143\3\2\2\2\u0165\u0147\3\2\2\2\u0165\u0149"+ - "\3\2\2\2\u0165\u014c\3\2\2\2\u0165\u014f\3\2\2\2\u0165\u0152\3\2\2\2\u0165"+ - "\u0155\3\2\2\2\u0165\u0159\3\2\2\2\u0165\u015d\3\2\2\2\u0165\u0160\3\2"+ - "\2\2\u0166!\3\2\2\2\u0167\u016b\5$\23\2\u0168\u016a\5&\24\2\u0169\u0168"+ - "\3\2\2\2\u016a\u016d\3\2\2\2\u016b\u0169\3\2\2\2\u016b\u016c\3\2\2\2\u016c"+ - "\u0178\3\2\2\2\u016d\u016b\3\2\2\2\u016e\u016f\5\26\f\2\u016f\u0173\5"+ - "(\25\2\u0170\u0172\5&\24\2\u0171\u0170\3\2\2\2\u0172\u0175\3\2\2\2\u0173"+ - "\u0171\3\2\2\2\u0173\u0174\3\2\2\2\u0174\u0178\3\2\2\2\u0175\u0173\3\2"+ - "\2\2\u0176\u0178\5> \2\u0177\u0167\3\2\2\2\u0177\u016e\3\2\2\2\u0177\u0176"+ - "\3\2\2\2\u0178#\3\2\2\2\u0179\u017a\6\23\32\3\u017a\u017b\7\t\2\2\u017b"+ - "\u017c\5\36\20\2\u017c\u017d\7\n\2\2\u017d\u017e\b\23\1\2\u017e\u018d"+ - "\3\2\2\2\u017f\u0180\6\23\33\3\u0180\u0181\7\t\2\2\u0181\u0182\5 \21\2"+ - "\u0182\u0183\7\n\2\2\u0183\u018d\3\2\2\2\u0184\u018d\7K\2\2\u0185\u018d"+ - "\7L\2\2\u0186\u018d\7Q\2\2\u0187\u0188\7Q\2\2\u0188\u018d\5,\27\2\u0189"+ - "\u018a\7\26\2\2\u018a\u018b\7P\2\2\u018b\u018d\5,\27\2\u018c\u0179\3\2"+ - "\2\2\u018c\u017f\3\2\2\2\u018c\u0184\3\2\2\2\u018c\u0185\3\2\2\2\u018c"+ - "\u0186\3\2\2\2\u018c\u0187\3\2\2\2\u018c\u0189\3\2\2\2\u018d%\3\2\2\2"+ - "\u018e\u018f\6\24\34\3\u018f\u0193\5(\25\2\u0190\u0191\6\24\35\3\u0191"+ - "\u0193\5*\26\2\u0192\u018e\3\2\2\2\u0192\u0190\3\2\2\2\u0193\'\3\2\2\2"+ - "\u0194\u0195\7\13\2\2\u0195\u0196\7S\2\2\u0196\u019a\5,\27\2\u0197\u0198"+ - "\7\13\2\2\u0198\u019a\t\r\2\2\u0199\u0194\3\2\2\2\u0199\u0197\3\2\2\2"+ - "\u019a)\3\2\2\2\u019b\u019c\7\7\2\2\u019c\u019d\5\36\20\2\u019d\u019e"+ - "\7\b\2\2\u019e+\3\2\2\2\u019f\u01a8\7\t\2\2\u01a0\u01a5\5.\30\2\u01a1"+ - "\u01a2\7\f\2\2\u01a2\u01a4\5.\30\2\u01a3\u01a1\3\2\2\2\u01a4\u01a7\3\2"+ - "\2\2\u01a5\u01a3\3\2\2\2\u01a5\u01a6\3\2\2\2\u01a6\u01a9\3\2\2\2\u01a7"+ - "\u01a5\3\2\2\2\u01a8\u01a0\3\2\2\2\u01a8\u01a9\3\2\2\2\u01a9\u01aa\3\2"+ - "\2\2\u01aa\u01ab\7\n\2\2\u01ab-\3\2\2\2\u01ac\u01b0\5\36\20\2\u01ad\u01b0"+ - "\5\60\31\2\u01ae\u01b0\5\64\33\2\u01af\u01ac\3\2\2\2\u01af\u01ad\3\2\2"+ - "\2\u01af\u01ae\3\2\2\2\u01b0/\3\2\2\2\u01b1\u01bf\5\62\32\2\u01b2\u01bb"+ - "\7\t\2\2\u01b3\u01b8\5\62\32\2\u01b4\u01b5\7\f\2\2\u01b5\u01b7\5\62\32"+ - "\2\u01b6\u01b4\3\2\2\2\u01b7\u01ba\3\2\2\2\u01b8\u01b6\3\2\2\2\u01b8\u01b9"+ - "\3\2\2\2\u01b9\u01bc\3\2\2\2\u01ba\u01b8\3\2\2\2\u01bb\u01b3\3\2\2\2\u01bb"+ - "\u01bc\3\2\2\2\u01bc\u01bd\3\2\2\2\u01bd\u01bf\7\n\2\2\u01be\u01b1\3\2"+ - "\2\2\u01be\u01b2\3\2\2\2\u01bf\u01c0\3\2\2\2\u01c0\u01c3\7\66\2\2\u01c1"+ - "\u01c4\5\f\7\2\u01c2\u01c4\5\36\20\2\u01c3\u01c1\3\2\2\2\u01c3\u01c2\3"+ - "\2\2\2\u01c4\61\3\2\2\2\u01c5\u01c7\5\26\f\2\u01c6\u01c5\3\2\2\2\u01c6"+ - "\u01c7\3\2\2\2\u01c7\u01c8\3\2\2\2\u01c8\u01c9\7Q\2\2\u01c9\63\3\2\2\2"+ - "\u01ca\u01cf\5\66\34\2\u01cb\u01cf\58\35\2\u01cc\u01cf\5:\36\2\u01cd\u01cf"+ - "\5<\37\2\u01ce\u01ca\3\2\2\2\u01ce\u01cb\3\2\2\2\u01ce\u01cc\3\2\2\2\u01ce"+ - "\u01cd\3\2\2\2\u01cf\65\3\2\2\2\u01d0\u01d1\7P\2\2\u01d1\u01d2\7\65\2"+ - "\2\u01d2\u01d3\7Q\2\2\u01d3\67\3\2\2\2\u01d4\u01d5\5\26\f\2\u01d5\u01d6"+ - "\7\65\2\2\u01d6\u01d7\7\26\2\2\u01d79\3\2\2\2\u01d8\u01d9\7Q\2\2\u01d9"+ - "\u01da\7\65\2\2\u01da\u01db\7Q\2\2\u01db;\3\2\2\2\u01dc\u01dd\7\32\2\2"+ - "\u01dd\u01de\7\65\2\2\u01de\u01df\7Q\2\2\u01df=\3\2\2\2\u01e0\u01e1\7"+ - "\26\2\2\u01e1\u01e6\7P\2\2\u01e2\u01e3\7\7\2\2\u01e3\u01e4\5\36\20\2\u01e4"+ - "\u01e5\7\b\2\2\u01e5\u01e7\3\2\2\2\u01e6\u01e2\3\2\2\2\u01e7\u01e8\3\2"+ - "\2\2\u01e8\u01e6\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01f1\3\2\2\2\u01ea"+ - "\u01ee\5(\25\2\u01eb\u01ed\5&\24\2\u01ec\u01eb\3\2\2\2\u01ed\u01f0\3\2"+ - "\2\2\u01ee\u01ec\3\2\2\2\u01ee\u01ef\3\2\2\2\u01ef\u01f2\3\2\2\2\u01f0"+ - "\u01ee\3\2\2\2\u01f1\u01ea\3\2\2\2\u01f1\u01f2\3\2\2\2\u01f2\u0207\3\2"+ - "\2\2\u01f3\u01f4\7\26\2\2\u01f4\u01f5\7P\2\2\u01f5\u01f6\7\7\2\2\u01f6"+ - "\u01f7\7\b\2\2\u01f7\u0200\7\5\2\2\u01f8\u01fd\5\36\20\2\u01f9\u01fa\7"+ - "\f\2\2\u01fa\u01fc\5\36\20\2\u01fb\u01f9\3\2\2\2\u01fc\u01ff\3\2\2\2\u01fd"+ - "\u01fb\3\2\2\2\u01fd\u01fe\3\2\2\2\u01fe\u0201\3\2\2\2\u01ff\u01fd\3\2"+ - "\2\2\u0200\u01f8\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u0203\3\2\2\2\u0202"+ - "\u0204\7\r\2\2\u0203\u0202\3\2\2\2\u0203\u0204\3\2\2\2\u0204\u0205\3\2"+ - "\2\2\u0205\u0207\7\6\2\2\u0206\u01e0\3\2\2\2\u0206\u01f3\3\2\2\2\u0207"+ - "?\3\2\2\2\u0208\u0209\7\7\2\2\u0209\u020e\5\36\20\2\u020a\u020b\7\f\2"+ - "\2\u020b\u020d\5\36\20\2\u020c\u020a\3\2\2\2\u020d\u0210\3\2\2\2\u020e"+ - "\u020c\3\2\2\2\u020e\u020f\3\2\2\2\u020f\u0211\3\2\2\2\u0210\u020e\3\2"+ - "\2\2\u0211\u0212\7\b\2\2\u0212\u0216\3\2\2\2\u0213\u0214\7\7\2\2\u0214"+ - "\u0216\7\b\2\2\u0215\u0208\3\2\2\2\u0215\u0213\3\2\2\2\u0216A\3\2\2\2"+ - "\u0217\u0218\7\7\2\2\u0218\u021d\5D#\2\u0219\u021a\7\f\2\2\u021a\u021c"+ - "\5D#\2\u021b\u0219\3\2\2\2\u021c\u021f\3\2\2\2\u021d\u021b\3\2\2\2\u021d"+ - "\u021e\3\2\2\2\u021e\u0220\3\2\2\2\u021f\u021d\3\2\2\2\u0220\u0221\7\b"+ - "\2\2\u0221\u0226\3\2\2\2\u0222\u0223\7\7\2\2\u0223\u0224\7\64\2\2\u0224"+ - "\u0226\7\b\2\2\u0225\u0217\3\2\2\2\u0225\u0222\3\2\2\2\u0226C\3\2\2\2"+ - "\u0227\u0228\5\36\20\2\u0228\u0229\7\64\2\2\u0229\u022a\5\36\20\2\u022a"+ - "E\3\2\2\2\62IObeqy\u0086\u008a\u008e\u0093\u00ae\u00b7\u00bb\u00c1\u00ca"+ - "\u00d4\u00dc\u00e2\u00f6\u013b\u013d\u0165\u016b\u0173\u0177\u018c\u0192"+ - "\u0199\u01a5\u01a8\u01af\u01b8\u01bb\u01be\u01c3\u01c6\u01ce\u01e8\u01ee"+ - "\u01f1\u01fd\u0200\u0203\u0206\u020e\u0215\u021d\u0225"; + "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\7\20\u0144"+ + "\n\20\f\20\16\20\u0147\13\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3"+ + "\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3"+ + "\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3"+ + "\21\5\21\u016e\n\21\3\22\3\22\7\22\u0172\n\22\f\22\16\22\u0175\13\22\3"+ + "\22\3\22\3\22\7\22\u017a\n\22\f\22\16\22\u017d\13\22\3\22\5\22\u0180\n"+ + "\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3"+ + "\23\3\23\3\23\3\23\3\23\3\23\5\23\u0195\n\23\3\24\3\24\3\24\3\24\5\24"+ + "\u019b\n\24\3\25\3\25\3\25\3\25\3\25\5\25\u01a2\n\25\3\26\3\26\3\26\3"+ + "\26\3\27\3\27\3\27\3\27\7\27\u01ac\n\27\f\27\16\27\u01af\13\27\5\27\u01b1"+ + "\n\27\3\27\3\27\3\30\3\30\3\30\5\30\u01b8\n\30\3\31\3\31\3\31\3\31\3\31"+ + "\7\31\u01bf\n\31\f\31\16\31\u01c2\13\31\5\31\u01c4\n\31\3\31\5\31\u01c7"+ + "\n\31\3\31\3\31\3\31\5\31\u01cc\n\31\3\32\5\32\u01cf\n\32\3\32\3\32\3"+ + "\33\3\33\3\33\3\33\5\33\u01d7\n\33\3\34\3\34\3\34\3\34\3\35\3\35\3\35"+ + "\3\35\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \6 \u01ef"+ + "\n \r \16 \u01f0\3 \3 \7 \u01f5\n \f \16 \u01f8\13 \5 \u01fa\n \3 \3 "+ + "\3 \3 \3 \3 \3 \3 \7 \u0204\n \f \16 \u0207\13 \5 \u0209\n \3 \5 \u020c"+ + "\n \3 \5 \u020f\n \3!\3!\3!\3!\7!\u0215\n!\f!\16!\u0218\13!\3!\3!\3!\3"+ + "!\5!\u021e\n!\3\"\3\"\3\"\3\"\7\"\u0224\n\"\f\"\16\"\u0227\13\"\3\"\3"+ + "\"\3\"\3\"\3\"\5\"\u022e\n\"\3#\3#\3#\3#\3#\2\3\36$\2\4\6\b\n\f\16\20"+ + "\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BD\2\16\3\3\r\r\3\2\u020e\3\2\2\2@"+ + "\u021d\3\2\2\2B\u022d\3\2\2\2D\u022f\3\2\2\2FH\5\4\3\2GF\3\2\2\2HK\3\2"+ + "\2\2IG\3\2\2\2IJ\3\2\2\2JO\3\2\2\2KI\3\2\2\2LN\5\b\5\2ML\3\2\2\2NQ\3\2"+ + "\2\2OM\3\2\2\2OP\3\2\2\2PR\3\2\2\2QO\3\2\2\2RS\7\2\2\3S\3\3\2\2\2TU\5"+ + "\26\f\2UV\7R\2\2VW\5\6\4\2WX\5\f\7\2X\5\3\2\2\2Ye\7\t\2\2Z[\5\26\f\2["+ + "b\7R\2\2\\]\7\f\2\2]^\5\26\f\2^_\7R\2\2_a\3\2\2\2`\\\3\2\2\2ad\3\2\2\2"+ + "b`\3\2\2\2bc\3\2\2\2cf\3\2\2\2db\3\2\2\2eZ\3\2\2\2ef\3\2\2\2fg\3\2\2\2"+ + "gh\7\n\2\2h\7\3\2\2\2ij\7\16\2\2jk\7\t\2\2kl\5\36\20\2lm\7\n\2\2mq\5\n"+ + "\6\2no\7\20\2\2or\5\n\6\2pr\6\5\2\2qn\3\2\2\2qp\3\2\2\2r\u00c0\3\2\2\2"+ + "st\7\21\2\2tu\7\t\2\2uv\5\36\20\2vy\7\n\2\2wz\5\n\6\2xz\5\16\b\2yw\3\2"+ + "\2\2yx\3\2\2\2z\u00c0\3\2\2\2{|\7\22\2\2|}\5\f\7\2}~\7\21\2\2~\177\7\t"+ + "\2\2\177\u0080\5\36\20\2\u0080\u0081\7\n\2\2\u0081\u0082\5\34\17\2\u0082"+ + "\u00c0\3\2\2\2\u0083\u0084\7\23\2\2\u0084\u0086\7\t\2\2\u0085\u0087\5"+ + "\20\t\2\u0086\u0085\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0088\3\2\2\2\u0088"+ + "\u008a\7\r\2\2\u0089\u008b\5\36\20\2\u008a\u0089\3\2\2\2\u008a\u008b\3"+ + "\2\2\2\u008b\u008c\3\2\2\2\u008c\u008e\7\r\2\2\u008d\u008f\5\22\n\2\u008e"+ + "\u008d\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0090\3\2\2\2\u0090\u0093\7\n"+ + "\2\2\u0091\u0094\5\n\6\2\u0092\u0094\5\16\b\2\u0093\u0091\3\2\2\2\u0093"+ + "\u0092\3\2\2\2\u0094\u00c0\3\2\2\2\u0095\u0096\7\23\2\2\u0096\u0097\7"+ + "\t\2\2\u0097\u0098\5\26\f\2\u0098\u0099\7R\2\2\u0099\u009a\7\65\2\2\u009a"+ + "\u009b\5\36\20\2\u009b\u009c\7\n\2\2\u009c\u009d\5\n\6\2\u009d\u00c0\3"+ + "\2\2\2\u009e\u009f\7\23\2\2\u009f\u00a0\7\t\2\2\u00a0\u00a1\7R\2\2\u00a1"+ + "\u00a2\7\17\2\2\u00a2\u00a3\5\36\20\2\u00a3\u00a4\7\n\2\2\u00a4\u00a5"+ + "\5\n\6\2\u00a5\u00c0\3\2\2\2\u00a6\u00a7\5\24\13\2\u00a7\u00a8\5\34\17"+ + "\2\u00a8\u00c0\3\2\2\2\u00a9\u00aa\7\24\2\2\u00aa\u00c0\5\34\17\2\u00ab"+ + "\u00ac\7\25\2\2\u00ac\u00c0\5\34\17\2\u00ad\u00ae\7\26\2\2\u00ae\u00af"+ + "\5\36\20\2\u00af\u00b0\5\34\17\2\u00b0\u00c0\3\2\2\2\u00b1\u00b2\7\30"+ + "\2\2\u00b2\u00b4\5\f\7\2\u00b3\u00b5\5\32\16\2\u00b4\u00b3\3\2\2\2\u00b5"+ + "\u00b6\3\2\2\2\u00b6\u00b4\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00c0\3\2"+ + "\2\2\u00b8\u00b9\7\32\2\2\u00b9\u00ba\5\36\20\2\u00ba\u00bb\5\34\17\2"+ + "\u00bb\u00c0\3\2\2\2\u00bc\u00bd\5\36\20\2\u00bd\u00be\5\34\17\2\u00be"+ + "\u00c0\3\2\2\2\u00bfi\3\2\2\2\u00bfs\3\2\2\2\u00bf{\3\2\2\2\u00bf\u0083"+ + "\3\2\2\2\u00bf\u0095\3\2\2\2\u00bf\u009e\3\2\2\2\u00bf\u00a6\3\2\2\2\u00bf"+ + "\u00a9\3\2\2\2\u00bf\u00ab\3\2\2\2\u00bf\u00ad\3\2\2\2\u00bf\u00b1\3\2"+ + "\2\2\u00bf\u00b8\3\2\2\2\u00bf\u00bc\3\2\2\2\u00c0\t\3\2\2\2\u00c1\u00c4"+ + "\5\f\7\2\u00c2\u00c4\5\b\5\2\u00c3\u00c1\3\2\2\2\u00c3\u00c2\3\2\2\2\u00c4"+ + "\13\3\2\2\2\u00c5\u00c9\7\5\2\2\u00c6\u00c8\5\b\5\2\u00c7\u00c6\3\2\2"+ + "\2\u00c8\u00cb\3\2\2\2\u00c9\u00c7\3\2\2\2\u00c9\u00ca\3\2\2\2\u00ca\u00cc"+ + "\3\2\2\2\u00cb\u00c9\3\2\2\2\u00cc\u00cd\7\6\2\2\u00cd\r\3\2\2\2\u00ce"+ + "\u00cf\7\r\2\2\u00cf\17\3\2\2\2\u00d0\u00d3\5\24\13\2\u00d1\u00d3\5\36"+ + "\20\2\u00d2\u00d0\3\2\2\2\u00d2\u00d1\3\2\2\2\u00d3\21\3\2\2\2\u00d4\u00d5"+ + "\5\36\20\2\u00d5\23\3\2\2\2\u00d6\u00d7\5\26\f\2\u00d7\u00dc\5\30\r\2"+ + "\u00d8\u00d9\7\f\2\2\u00d9\u00db\5\30\r\2\u00da\u00d8\3\2\2\2\u00db\u00de"+ + "\3\2\2\2\u00dc\u00da\3\2\2\2\u00dc\u00dd\3\2\2\2\u00dd\25\3\2\2\2\u00de"+ + "\u00dc\3\2\2\2\u00df\u00e4\7Q\2\2\u00e0\u00e1\7\7\2\2\u00e1\u00e3\7\b"+ + "\2\2\u00e2\u00e0\3\2\2\2\u00e3\u00e6\3\2\2\2\u00e4\u00e2\3\2\2\2\u00e4"+ + "\u00e5\3\2\2\2\u00e5\27\3\2\2\2\u00e6\u00e4\3\2\2\2\u00e7\u00ea\7R\2\2"+ + "\u00e8\u00e9\7<\2\2\u00e9\u00eb\5\36\20\2\u00ea\u00e8\3\2\2\2\u00ea\u00eb"+ + "\3\2\2\2\u00eb\31\3\2\2\2\u00ec\u00ed\7\31\2\2\u00ed\u00ee\7\t\2\2\u00ee"+ + "\u00ef\7Q\2\2\u00ef\u00f0\7R\2\2\u00f0\u00f1\7\n\2\2\u00f1\u00f2\5\f\7"+ + "\2\u00f2\33\3\2\2\2\u00f3\u00f4\t\2\2\2\u00f4\35\3\2\2\2\u00f5\u00f6\b"+ + "\20\1\2\u00f6\u00f7\5\"\22\2\u00f7\u00f8\t\3\2\2\u00f8\u00f9\5\36\20\3"+ + "\u00f9\u00fa\b\20\1\2\u00fa\u00ff\3\2\2\2\u00fb\u00fc\5 \21\2\u00fc\u00fd"+ + "\b\20\1\2\u00fd\u00ff\3\2\2\2\u00fe\u00f5\3\2\2\2\u00fe\u00fb\3\2\2\2"+ + "\u00ff\u0145\3\2\2\2\u0100\u0101\f\20\2\2\u0101\u0102\t\4\2\2\u0102\u0103"+ + "\5\36\20\21\u0103\u0104\b\20\1\2\u0104\u0144\3\2\2\2\u0105\u0106\f\17"+ + "\2\2\u0106\u0107\t\5\2\2\u0107\u0108\5\36\20\20\u0108\u0109\b\20\1\2\u0109"+ + "\u0144\3\2\2\2\u010a\u010b\f\16\2\2\u010b\u010c\t\6\2\2\u010c\u010d\5"+ + "\36\20\17\u010d\u010e\b\20\1\2\u010e\u0144\3\2\2\2\u010f\u0110\f\r\2\2"+ + "\u0110\u0111\t\7\2\2\u0111\u0112\5\36\20\16\u0112\u0113\b\20\1\2\u0113"+ + "\u0144\3\2\2\2\u0114\u0115\f\f\2\2\u0115\u0116\t\b\2\2\u0116\u0117\5\36"+ + "\20\r\u0117\u0118\b\20\1\2\u0118\u0144\3\2\2\2\u0119\u011a\f\n\2\2\u011a"+ + "\u011b\t\t\2\2\u011b\u011c\5\36\20\13\u011c\u011d\b\20\1\2\u011d\u0144"+ + "\3\2\2\2\u011e\u011f\f\t\2\2\u011f\u0120\7/\2\2\u0120\u0121\5\36\20\n"+ + "\u0121\u0122\b\20\1\2\u0122\u0144\3\2\2\2\u0123\u0124\f\b\2\2\u0124\u0125"+ + "\7\60\2\2\u0125\u0126\5\36\20\t\u0126\u0127\b\20\1\2\u0127\u0144\3\2\2"+ + "\2\u0128\u0129\f\7\2\2\u0129\u012a\7\61\2\2\u012a\u012b\5\36\20\b\u012b"+ + "\u012c\b\20\1\2\u012c\u0144\3\2\2\2\u012d\u012e\f\6\2\2\u012e\u012f\7"+ + "\62\2\2\u012f\u0130\5\36\20\7\u0130\u0131\b\20\1\2\u0131\u0144\3\2\2\2"+ + "\u0132\u0133\f\5\2\2\u0133\u0134\7\63\2\2\u0134\u0135\5\36\20\6\u0135"+ + "\u0136\b\20\1\2\u0136\u0144\3\2\2\2\u0137\u0138\f\4\2\2\u0138\u0139\7"+ + "\64\2\2\u0139\u013a\5\36\20\2\u013a\u013b\7\65\2\2\u013b\u013c\5\36\20"+ + "\4\u013c\u013d\b\20\1\2\u013d\u0144\3\2\2\2\u013e\u013f\f\13\2\2\u013f"+ + "\u0140\7\34\2\2\u0140\u0141\5\26\f\2\u0141\u0142\b\20\1\2\u0142\u0144"+ + "\3\2\2\2\u0143\u0100\3\2\2\2\u0143\u0105\3\2\2\2\u0143\u010a\3\2\2\2\u0143"+ + "\u010f\3\2\2\2\u0143\u0114\3\2\2\2\u0143\u0119\3\2\2\2\u0143\u011e\3\2"+ + "\2\2\u0143\u0123\3\2\2\2\u0143\u0128\3\2\2\2\u0143\u012d\3\2\2\2\u0143"+ + "\u0132\3\2\2\2\u0143\u0137\3\2\2\2\u0143\u013e\3\2\2\2\u0144\u0147\3\2"+ + "\2\2\u0145\u0143\3\2\2\2\u0145\u0146\3\2\2\2\u0146\37\3\2\2\2\u0147\u0145"+ + "\3\2\2\2\u0148\u0149\6\21\20\3\u0149\u014a\t\n\2\2\u014a\u016e\5\"\22"+ + "\2\u014b\u014c\6\21\21\3\u014c\u014d\5\"\22\2\u014d\u014e\t\n\2\2\u014e"+ + "\u016e\3\2\2\2\u014f\u0150\6\21\22\3\u0150\u016e\5\"\22\2\u0151\u0152"+ + "\6\21\23\3\u0152\u0153\t\13\2\2\u0153\u016e\b\21\1\2\u0154\u0155\6\21"+ + "\24\3\u0155\u0156\7N\2\2\u0156\u016e\b\21\1\2\u0157\u0158\6\21\25\3\u0158"+ + "\u0159\7O\2\2\u0159\u016e\b\21\1\2\u015a\u015b\6\21\26\3\u015b\u015c\7"+ + "P\2\2\u015c\u016e\b\21\1\2\u015d\u015e\6\21\27\3\u015e\u015f\5@!\2\u015f"+ + "\u0160\b\21\1\2\u0160\u016e\3\2\2\2\u0161\u0162\6\21\30\3\u0162\u0163"+ + "\5B\"\2\u0163\u0164\b\21\1\2\u0164\u016e\3\2\2\2\u0165\u0166\6\21\31\3"+ + "\u0166\u0167\t\f\2\2\u0167\u016e\5 \21\2\u0168\u0169\7\t\2\2\u0169\u016a"+ + "\5\26\f\2\u016a\u016b\7\n\2\2\u016b\u016c\5 \21\2\u016c\u016e\3\2\2\2"+ + "\u016d\u0148\3\2\2\2\u016d\u014b\3\2\2\2\u016d\u014f\3\2\2\2\u016d\u0151"+ + "\3\2\2\2\u016d\u0154\3\2\2\2\u016d\u0157\3\2\2\2\u016d\u015a\3\2\2\2\u016d"+ + "\u015d\3\2\2\2\u016d\u0161\3\2\2\2\u016d\u0165\3\2\2\2\u016d\u0168\3\2"+ + "\2\2\u016e!\3\2\2\2\u016f\u0173\5$\23\2\u0170\u0172\5&\24\2\u0171\u0170"+ + "\3\2\2\2\u0172\u0175\3\2\2\2\u0173\u0171\3\2\2\2\u0173\u0174\3\2\2\2\u0174"+ + "\u0180\3\2\2\2\u0175\u0173\3\2\2\2\u0176\u0177\5\26\f\2\u0177\u017b\5"+ + "(\25\2\u0178\u017a\5&\24\2\u0179\u0178\3\2\2\2\u017a\u017d\3\2\2\2\u017b"+ + "\u0179\3\2\2\2\u017b\u017c\3\2\2\2\u017c\u0180\3\2\2\2\u017d\u017b\3\2"+ + "\2\2\u017e\u0180\5> \2\u017f\u016f\3\2\2\2\u017f\u0176\3\2\2\2\u017f\u017e"+ + "\3\2\2\2\u0180#\3\2\2\2\u0181\u0182\6\23\32\3\u0182\u0183\7\t\2\2\u0183"+ + "\u0184\5\36\20\2\u0184\u0185\7\n\2\2\u0185\u0186\b\23\1\2\u0186\u0195"+ + "\3\2\2\2\u0187\u0188\6\23\33\3\u0188\u0189\7\t\2\2\u0189\u018a\5 \21\2"+ + "\u018a\u018b\7\n\2\2\u018b\u0195\3\2\2\2\u018c\u0195\7L\2\2\u018d\u0195"+ + "\7M\2\2\u018e\u0195\7R\2\2\u018f\u0190\7R\2\2\u0190\u0195\5,\27\2\u0191"+ + "\u0192\7\27\2\2\u0192\u0193\7Q\2\2\u0193\u0195\5,\27\2\u0194\u0181\3\2"+ + "\2\2\u0194\u0187\3\2\2\2\u0194\u018c\3\2\2\2\u0194\u018d\3\2\2\2\u0194"+ + "\u018e\3\2\2\2\u0194\u018f\3\2\2\2\u0194\u0191\3\2\2\2\u0195%\3\2\2\2"+ + "\u0196\u0197\6\24\34\3\u0197\u019b\5(\25\2\u0198\u0199\6\24\35\3\u0199"+ + "\u019b\5*\26\2\u019a\u0196\3\2\2\2\u019a\u0198\3\2\2\2\u019b\'\3\2\2\2"+ + "\u019c\u019d\7\13\2\2\u019d\u019e\7T\2\2\u019e\u01a2\5,\27\2\u019f\u01a0"+ + "\7\13\2\2\u01a0\u01a2\t\r\2\2\u01a1\u019c\3\2\2\2\u01a1\u019f\3\2\2\2"+ + "\u01a2)\3\2\2\2\u01a3\u01a4\7\7\2\2\u01a4\u01a5\5\36\20\2\u01a5\u01a6"+ + "\7\b\2\2\u01a6+\3\2\2\2\u01a7\u01b0\7\t\2\2\u01a8\u01ad\5.\30\2\u01a9"+ + "\u01aa\7\f\2\2\u01aa\u01ac\5.\30\2\u01ab\u01a9\3\2\2\2\u01ac\u01af\3\2"+ + "\2\2\u01ad\u01ab\3\2\2\2\u01ad\u01ae\3\2\2\2\u01ae\u01b1\3\2\2\2\u01af"+ + "\u01ad\3\2\2\2\u01b0\u01a8\3\2\2\2\u01b0\u01b1\3\2\2\2\u01b1\u01b2\3\2"+ + "\2\2\u01b2\u01b3\7\n\2\2\u01b3-\3\2\2\2\u01b4\u01b8\5\36\20\2\u01b5\u01b8"+ + "\5\60\31\2\u01b6\u01b8\5\64\33\2\u01b7\u01b4\3\2\2\2\u01b7\u01b5\3\2\2"+ + "\2\u01b7\u01b6\3\2\2\2\u01b8/\3\2\2\2\u01b9\u01c7\5\62\32\2\u01ba\u01c3"+ + "\7\t\2\2\u01bb\u01c0\5\62\32\2\u01bc\u01bd\7\f\2\2\u01bd\u01bf\5\62\32"+ + "\2\u01be\u01bc\3\2\2\2\u01bf\u01c2\3\2\2\2\u01c0\u01be\3\2\2\2\u01c0\u01c1"+ + "\3\2\2\2\u01c1\u01c4\3\2\2\2\u01c2\u01c0\3\2\2\2\u01c3\u01bb\3\2\2\2\u01c3"+ + "\u01c4\3\2\2\2\u01c4\u01c5\3\2\2\2\u01c5\u01c7\7\n\2\2\u01c6\u01b9\3\2"+ + "\2\2\u01c6\u01ba\3\2\2\2\u01c7\u01c8\3\2\2\2\u01c8\u01cb\7\67\2\2\u01c9"+ + "\u01cc\5\f\7\2\u01ca\u01cc\5\36\20\2\u01cb\u01c9\3\2\2\2\u01cb\u01ca\3"+ + "\2\2\2\u01cc\61\3\2\2\2\u01cd\u01cf\5\26\f\2\u01ce\u01cd\3\2\2\2\u01ce"+ + "\u01cf\3\2\2\2\u01cf\u01d0\3\2\2\2\u01d0\u01d1\7R\2\2\u01d1\63\3\2\2\2"+ + "\u01d2\u01d7\5\66\34\2\u01d3\u01d7\58\35\2\u01d4\u01d7\5:\36\2\u01d5\u01d7"+ + "\5<\37\2\u01d6\u01d2\3\2\2\2\u01d6\u01d3\3\2\2\2\u01d6\u01d4\3\2\2\2\u01d6"+ + "\u01d5\3\2\2\2\u01d7\65\3\2\2\2\u01d8\u01d9\7Q\2\2\u01d9\u01da\7\66\2"+ + "\2\u01da\u01db\7R\2\2\u01db\67\3\2\2\2\u01dc\u01dd\5\26\f\2\u01dd\u01de"+ + "\7\66\2\2\u01de\u01df\7\27\2\2\u01df9\3\2\2\2\u01e0\u01e1\7R\2\2\u01e1"+ + "\u01e2\7\66\2\2\u01e2\u01e3\7R\2\2\u01e3;\3\2\2\2\u01e4\u01e5\7\33\2\2"+ + "\u01e5\u01e6\7\66\2\2\u01e6\u01e7\7R\2\2\u01e7=\3\2\2\2\u01e8\u01e9\7"+ + "\27\2\2\u01e9\u01ee\7Q\2\2\u01ea\u01eb\7\7\2\2\u01eb\u01ec\5\36\20\2\u01ec"+ + "\u01ed\7\b\2\2\u01ed\u01ef\3\2\2\2\u01ee\u01ea\3\2\2\2\u01ef\u01f0\3\2"+ + "\2\2\u01f0\u01ee\3\2\2\2\u01f0\u01f1\3\2\2\2\u01f1\u01f9\3\2\2\2\u01f2"+ + "\u01f6\5(\25\2\u01f3\u01f5\5&\24\2\u01f4\u01f3\3\2\2\2\u01f5\u01f8\3\2"+ + "\2\2\u01f6\u01f4\3\2\2\2\u01f6\u01f7\3\2\2\2\u01f7\u01fa\3\2\2\2\u01f8"+ + "\u01f6\3\2\2\2\u01f9\u01f2\3\2\2\2\u01f9\u01fa\3\2\2\2\u01fa\u020f\3\2"+ + "\2\2\u01fb\u01fc\7\27\2\2\u01fc\u01fd\7Q\2\2\u01fd\u01fe\7\7\2\2\u01fe"+ + "\u01ff\7\b\2\2\u01ff\u0208\7\5\2\2\u0200\u0205\5\36\20\2\u0201\u0202\7"+ + "\f\2\2\u0202\u0204\5\36\20\2\u0203\u0201\3\2\2\2\u0204\u0207\3\2\2\2\u0205"+ + "\u0203\3\2\2\2\u0205\u0206\3\2\2\2\u0206\u0209\3\2\2\2\u0207\u0205\3\2"+ + "\2\2\u0208\u0200\3\2\2\2\u0208\u0209\3\2\2\2\u0209\u020b\3\2\2\2\u020a"+ + "\u020c\7\r\2\2\u020b\u020a\3\2\2\2\u020b\u020c\3\2\2\2\u020c\u020d\3\2"+ + "\2\2\u020d\u020f\7\6\2\2\u020e\u01e8\3\2\2\2\u020e\u01fb\3\2\2\2\u020f"+ + "?\3\2\2\2\u0210\u0211\7\7\2\2\u0211\u0216\5\36\20\2\u0212\u0213\7\f\2"+ + "\2\u0213\u0215\5\36\20\2\u0214\u0212\3\2\2\2\u0215\u0218\3\2\2\2\u0216"+ + "\u0214\3\2\2\2\u0216\u0217\3\2\2\2\u0217\u0219\3\2\2\2\u0218\u0216\3\2"+ + "\2\2\u0219\u021a\7\b\2\2\u021a\u021e\3\2\2\2\u021b\u021c\7\7\2\2\u021c"+ + "\u021e\7\b\2\2\u021d\u0210\3\2\2\2\u021d\u021b\3\2\2\2\u021eA\3\2\2\2"+ + "\u021f\u0220\7\7\2\2\u0220\u0225\5D#\2\u0221\u0222\7\f\2\2\u0222\u0224"+ + "\5D#\2\u0223\u0221\3\2\2\2\u0224\u0227\3\2\2\2\u0225\u0223\3\2\2\2\u0225"+ + "\u0226\3\2\2\2\u0226\u0228\3\2\2\2\u0227\u0225\3\2\2\2\u0228\u0229\7\b"+ + "\2\2\u0229\u022e\3\2\2\2\u022a\u022b\7\7\2\2\u022b\u022c\7\65\2\2\u022c"+ + "\u022e\7\b\2\2\u022d\u021f\3\2\2\2\u022d\u022a\3\2\2\2\u022eC\3\2\2\2"+ + "\u022f\u0230\5\36\20\2\u0230\u0231\7\65\2\2\u0231\u0232\5\36\20\2\u0232"+ + "E\3\2\2\2\62IObeqy\u0086\u008a\u008e\u0093\u00b6\u00bf\u00c3\u00c9\u00d2"+ + "\u00dc\u00e4\u00ea\u00fe\u0143\u0145\u016d\u0173\u017b\u017f\u0194\u019a"+ + "\u01a1\u01ad\u01b0\u01b7\u01c0\u01c3\u01c6\u01cb\u01ce\u01d6\u01f0\u01f6"+ + "\u01f9\u0205\u0208\u020b\u020e\u0216\u021d\u0225\u022d"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java index a11a856edd4..a1279d611e3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java @@ -67,6 +67,13 @@ class PainlessParserBaseVisitor extends AbstractParseTreeVisitor implement * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitEach(PainlessParser.EachContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitIneach(PainlessParser.IneachContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java index bc9e8c4f994..8a297651070 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java @@ -63,6 +63,13 @@ interface PainlessParserVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitEach(PainlessParser.EachContext ctx); + /** + * Visit a parse tree produced by the {@code ineach} + * labeled alternative in {@link PainlessParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitIneach(PainlessParser.IneachContext ctx); /** * Visit a parse tree produced by the {@code decl} * labeled alternative in {@link PainlessParser#statement}. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 88774bb9984..55e3445bace 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -81,6 +81,7 @@ import org.elasticsearch.painless.antlr.PainlessParser.ForContext; import org.elasticsearch.painless.antlr.PainlessParser.FuncrefContext; import org.elasticsearch.painless.antlr.PainlessParser.FunctionContext; import org.elasticsearch.painless.antlr.PainlessParser.IfContext; +import org.elasticsearch.painless.antlr.PainlessParser.IneachContext; import org.elasticsearch.painless.antlr.PainlessParser.InitializerContext; import org.elasticsearch.painless.antlr.PainlessParser.InstanceofContext; import org.elasticsearch.painless.antlr.PainlessParser.LambdaContext; @@ -358,6 +359,17 @@ public final class Walker extends PainlessParserBaseVisitor { return new SEach(location(ctx), type, name, expression, block); } + + @Override + public Object visitIneach(IneachContext ctx) { + reserved.peek().setMaxLoopCounter(settings.getMaxLoopCounter()); + + String name = ctx.ID().getText(); + AExpression expression = (AExpression)visitExpression(ctx.expression()); + SBlock block = (SBlock)visit(ctx.trailer()); + + return new SEach(location(ctx), "def", name, expression, block); + } @Override public Object visitDecl(DeclContext ctx) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java index 46dc8af5ab9..3eee2a7b2d8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java @@ -158,6 +158,7 @@ public class SFunction extends AStatement { access |= Opcodes.ACC_SYNTHETIC; } final MethodWriter function = new MethodWriter(access, method.method, writer, globals.getStatements(), settings); + function.visitCode(); write(function, globals); function.endMethod(); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 950b021486f..e55ad91d492 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -31,6 +31,7 @@ import org.elasticsearch.painless.node.SFunction.Reserved; import org.elasticsearch.painless.WriterConstants; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.SimpleChecksAdapter; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.ClassWriter; import org.objectweb.asm.Opcodes; @@ -146,7 +147,6 @@ public final class SSource extends AStatement { // Create the ClassWriter. int classFrames = ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS; - int classVersion = Opcodes.V1_8; int classAccess = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL; String classBase = BASE_CLASS_TYPE.getInternalName(); String className = CLASS_TYPE.getInternalName(); @@ -155,10 +155,15 @@ public final class SSource extends AStatement { ClassWriter writer = new ClassWriter(classFrames); ClassVisitor visitor = writer; + // if picky is enabled, turn on some checks. instead of VerifyError at the end, you get a helpful stacktrace. + if (settings.isPicky()) { + visitor = new SimpleChecksAdapter(visitor); + } + if (debugStream != null) { visitor = new TraceClassVisitor(visitor, debugStream, null); } - visitor.visit(classVersion, classAccess, className, null, classBase, classInterfaces); + visitor.visit(WriterConstants.CLASS_VERSION, classAccess, className, null, classBase, classInterfaces); visitor.visitSource(Location.computeSourceName(name, source), null); // Write the constructor: @@ -207,6 +212,7 @@ public final class SSource extends AStatement { // Initialize the constants in a static initializer final MethodWriter clinit = new MethodWriter(Opcodes.ACC_STATIC, WriterConstants.CLINIT, visitor, globals.getStatements(), settings); + clinit.visitCode(); for (Constant constant : inits) { constant.initializer.accept(clinit); clinit.putStatic(CLASS_TYPE, constant.name, constant.type); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java index 01f3ee42ae6..e023ac364b3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicStatementTests.java @@ -130,39 +130,65 @@ public class BasicStatementTests extends ScriptTestCase { public void testIterableForEachStatement() { assertEquals(6, exec("List l = new ArrayList(); l.add(1); l.add(2); l.add(3); int total = 0;" + " for (int x : l) total += x; return total")); + assertEquals(6, exec("List l = new ArrayList(); l.add(1); l.add(2); l.add(3); int total = 0;" + + " for (x in l) total += x; return total")); assertEquals("123", exec("List l = new ArrayList(); l.add('1'); l.add('2'); l.add('3'); String cat = '';" + " for (String x : l) cat += x; return cat")); + assertEquals("123", exec("List l = new ArrayList(); l.add('1'); l.add('2'); l.add('3'); String cat = '';" + + " for (x in l) cat += x; return cat")); assertEquals("1236", exec("Map m = new HashMap(); m.put('1', 1); m.put('2', 2); m.put('3', 3);" + " String cat = ''; int total = 0;" + " for (Map.Entry e : m.entrySet()) { cat += e.getKey(); total += e.getValue(); } return cat + total")); + assertEquals("1236", exec("Map m = new HashMap(); m.put('1', 1); m.put('2', 2); m.put('3', 3);" + + " String cat = ''; int total = 0;" + + " for (e in m.entrySet()) { cat += e.getKey(); total += e.getValue(); } return cat + total")); } public void testIterableForEachStatementDef() { assertEquals(6, exec("def l = new ArrayList(); l.add(1); l.add(2); l.add(3); int total = 0;" + " for (int x : l) total += x; return total")); + assertEquals(6, exec("def l = new ArrayList(); l.add(1); l.add(2); l.add(3); int total = 0;" + + " for (x in l) total += x; return total")); assertEquals("123", exec("def l = new ArrayList(); l.add('1'); l.add('2'); l.add('3'); String cat = '';" + " for (String x : l) cat += x; return cat")); + assertEquals("123", exec("def l = new ArrayList(); l.add('1'); l.add('2'); l.add('3'); String cat = '';" + + " for (x in l) cat += x; return cat")); assertEquals("1236", exec("def m = new HashMap(); m.put('1', 1); m.put('2', 2); m.put('3', 3);" + " String cat = ''; int total = 0;" + " for (Map.Entry e : m.entrySet()) { cat += e.getKey(); total += e.getValue(); } return cat + total")); + assertEquals("1236", exec("def m = new HashMap(); m.put('1', 1); m.put('2', 2); m.put('3', 3);" + + " String cat = ''; int total = 0;" + + " for (e in m.entrySet()) { cat += e.getKey(); total += e.getValue(); } return cat + total")); } public void testArrayForEachStatement() { assertEquals(6, exec("int[] a = new int[3]; a[0] = 1; a[1] = 2; a[2] = 3; int total = 0;" + " for (int x : a) total += x; return total")); + assertEquals(6, exec("int[] a = new int[3]; a[0] = 1; a[1] = 2; a[2] = 3; int total = 0;" + + " for (x in a) total += x; return total")); assertEquals("123", exec("String[] a = new String[3]; a[0] = '1'; a[1] = '2'; a[2] = '3'; def total = '';" + " for (String x : a) total += x; return total")); + assertEquals("123", exec("String[] a = new String[3]; a[0] = '1'; a[1] = '2'; a[2] = '3'; def total = '';" + + " for (x in a) total += x; return total")); assertEquals(6, exec("int[][] i = new int[3][1]; i[0][0] = 1; i[1][0] = 2; i[2][0] = 3; int total = 0;" + " for (int[] j : i) total += j[0]; return total")); + assertEquals(6, exec("int[][] i = new int[3][1]; i[0][0] = 1; i[1][0] = 2; i[2][0] = 3; int total = 0;" + + " for (j in i) total += j[0]; return total")); } public void testArrayForEachStatementDef() { assertEquals(6, exec("def a = new int[3]; a[0] = 1; a[1] = 2; a[2] = 3; int total = 0;" + " for (int x : a) total += x; return total")); + assertEquals(6, exec("def a = new int[3]; a[0] = 1; a[1] = 2; a[2] = 3; int total = 0;" + + " for (x in a) total += x; return total")); assertEquals("123", exec("def a = new String[3]; a[0] = '1'; a[1] = '2'; a[2] = '3'; def total = '';" + " for (String x : a) total += x; return total")); + assertEquals("123", exec("def a = new String[3]; a[0] = '1'; a[1] = '2'; a[2] = '3'; def total = '';" + + " for (x in a) total += x; return total")); assertEquals(6, exec("def i = new int[3][1]; i[0][0] = 1; i[1][0] = 2; i[2][0] = 3; int total = 0;" + " for (int[] j : i) total += j[0]; return total")); + assertEquals(6, exec("def i = new int[3][1]; i[0][0] = 1; i[1][0] = 2; i[2][0] = 3; int total = 0;" + + " for (j in i) total += j[0]; return total")); } public void testDeclarationStatement() { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java index dbca5243ec2..6bb800eb92c 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LambdaTests.java @@ -79,7 +79,7 @@ public class LambdaTests extends ScriptTestCase { } public void testMultipleStatements() { - assertEquals(2, exec("int applyOne(IntFunction arg) { arg.apply(1) } applyOne(x -> { x = x + 1; return x })")); + assertEquals(2, exec("int applyOne(IntFunction arg) { arg.apply(1) } applyOne(x -> { def y = x + 1; return y })")); } public void testUnneededCurlyStatements() { @@ -138,6 +138,7 @@ public class LambdaTests extends ScriptTestCase { assertTrue(expected.getMessage().contains("is read-only")); } + @AwaitsFix(bugUrl = "def type tracking") public void testOnlyCapturesAreReadOnly() { assertEquals(4, exec("List l = new ArrayList(); l.add(1); l.add(1); " + "return l.stream().mapToInt(x -> { x += 1; return x }).sum();")); From eb67b3c6131d6027d9ca924a21815343e836a059 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 27 Jun 2016 12:34:10 -0400 Subject: [PATCH 02/43] Move the build vagrant build listener That way it doesn't register until we actually try and set up the vagrant test root. We don't need it all the time. --- qa/vagrant/build.gradle | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 905710a82be..784655edaa3 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -170,6 +170,16 @@ task prepareTestRoot(type: Copy) { from configurations.test dependsOn createVersionFile, createUpgradeFromFile + doFirst { + gradle.addBuildListener new BuildAdapter() { + @Override + void buildFinished(BuildResult result) { + if (result.failure) { + println "Reproduce with: gradle packagingTest -Pvagrant.boxes=${vagrantBoxes} -Dtests.seed=${formattedSeed} -Dtests.packaging.upgrade.from.versions=${upgradeFromVersions.join(",")}" + } + } + } + } } task checkVagrantVersion(type: Exec) { @@ -196,14 +206,6 @@ task packagingTest { " 'sample' can be used to test a single yum and apt box. 'all' can be used to\n" + " test all available boxes. The available boxes are: \n" + " ${availableBoxes}" - gradle.addBuildListener new BuildAdapter() { - @Override - void buildFinished(BuildResult result) { - if (result.failure) { - println "Reproduce with: gradle packagingTest -Pvagrant.boxes=${vagrantBoxes} -Dtests.seed=${formattedSeed} -Dtests.packaging.upgrade.from.versions=${upgradeFromVersions.join(",")}" - } - } - } } // Each box gets it own set of tasks @@ -245,7 +247,7 @@ for (String box : availableBoxes) { commandLine 'vagrant', 'ssh', box, '--command', "set -o pipefail && ${smokeTestCommand} | sed -ue 's/^/ ${box}: /'" } - vagrantSmokeTest.dependsOn(smoke) + vagrantSmokeTest.dependsOn(smoke) Task packaging = tasks.create("packagingTest${boxTask}", BatsOverVagrantTask) { dependsOn up From 79fa778e33507120591a3782c017d950d2891d41 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 27 Jun 2016 15:34:36 -0400 Subject: [PATCH 03/43] Fix percolator tests They need their plugin or they'll break! --- .../java/org/elasticsearch/test/AbstractQueryTestCase.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 6a5ea4f7afe..c3693905501 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -92,6 +92,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.script.Script.ScriptParseException; @@ -895,7 +896,7 @@ public abstract class AbstractQueryTestCase> b.bind(Environment.class).toInstance(new Environment(settings)); b.bind(ThreadPool.class).toInstance(threadPool); }, - settingsModule, new IndicesModule(namedWriteableRegistry, Collections.emptyList()) { + settingsModule, new IndicesModule(namedWriteableRegistry, pluginsService.filterPlugins(MapperPlugin.class)) { @Override public void configure() { // skip services From aec033386ee2a5f43e8bfe3a486efe5e3889153d Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 27 Jun 2016 13:49:10 -0600 Subject: [PATCH 04/43] Add the integ test node's configuration directory as a system property --- .../org/elasticsearch/gradle/test/RestIntegTestTask.groovy | 1 + 1 file changed, 1 insertion(+) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 3bfe9d61018..fedcf6e87d3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -62,6 +62,7 @@ public class RestIntegTestTask extends RandomizedTestingTask { project.gradle.projectsEvaluated { NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig) systemProperty('tests.rest.cluster', "${-> node.httpUri()}") + systemProperty('tests.config.dir', "${-> node.confDir}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops From 2f638b5a23597967a98b1ced1deac91d64af5a44 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 27 Jun 2016 18:41:18 -0400 Subject: [PATCH 05/43] Keep input time unit when parsing TimeValues This commit modifies TimeValue parsing to keep the input time unit. This enables round-trip parsing from instances of String to instances of TimeValue and vice-versa. With this, this commit removes support for the unit "w" representing weeks, and also removes support for fractional values of units (e.g., 0.5s). Relates #19102 --- .../resources/checkstyle_suppressions.xml | 1 - .../common/io/stream/Streamable.java | 5 +- .../common/io/stream/Writeable.java | 3 +- .../elasticsearch/common/unit/TimeValue.java | 123 ++++++++++++------ .../elasticsearch/search/SearchService.java | 2 +- .../search/builder/SearchSourceBuilder.java | 24 ++-- .../search/internal/DefaultSearchContext.java | 15 +-- .../internal/FilteredSearchContext.java | 9 +- .../search/internal/SearchContext.java | 5 +- .../search/internal/SubSearchContext.java | 3 +- .../search/query/QueryPhase.java | 4 +- .../cluster/settings/ClusterSettingsIT.java | 2 +- .../common/unit/TimeValueTests.java | 43 +++--- .../index/IndexingSlowLogTests.java | 8 +- .../index/SearchSlowLogTests.java | 16 +-- .../index/mapper/ttl/TTLMappingTests.java | 2 +- .../ingest/PipelineExecutionServiceTests.java | 2 +- .../builder/SearchSourceBuilderTests.java | 7 +- .../functionscore/DecayFunctionScoreIT.java | 4 +- .../versioning/SimpleVersioningIT.java | 2 +- .../migration/migrate_5_0/settings.asciidoc | 8 ++ .../org/elasticsearch/test/ESTestCase.java | 2 +- .../test/InternalTestCluster.java | 8 +- .../elasticsearch/test/TestSearchContext.java | 7 +- 24 files changed, 183 insertions(+), 122 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 745c8374063..5a08a834c3c 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -323,7 +323,6 @@ - diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/Streamable.java b/core/src/main/java/org/elasticsearch/common/io/stream/Streamable.java index a37c6371482..99c054c4c78 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/Streamable.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/Streamable.java @@ -24,10 +24,7 @@ import java.io.IOException; /** * Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown * across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by - * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. For - * example, {@link org.elasticsearch.common.unit.TimeValue} converts the time to nanoseconds for serialization. - * {@linkplain org.elasticsearch.common.unit.TimeValue} actually implements {@linkplain Writeable} not {@linkplain Streamable} but it has - * the same contract. + * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. * * Prefer implementing {@link Writeable} over implementing this interface where possible. Lots of code depends on this interface so this * isn't always possible. diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java index cf127e5b968..16497533e29 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java @@ -24,8 +24,7 @@ import java.io.IOException; /** * Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown * across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by - * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. For - * example, {@link org.elasticsearch.common.unit.TimeValue} converts the time to nanoseconds for serialization. + * serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged. * * Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable} * so this isn't always possible. diff --git a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java index 2058355d300..5a56603dad7 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -30,14 +30,50 @@ import org.joda.time.format.PeriodFormat; import org.joda.time.format.PeriodFormatter; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.Locale; +import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.TimeUnit; public class TimeValue implements Writeable { /** How many nano-seconds in one milli-second */ - public static final long NSEC_PER_MSEC = 1000000; + public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); + + private static Map TIME_UNIT_BYTE_MAP; + private static Map BYTE_TIME_UNIT_MAP; + + static { + final Map timeUnitByteMap = new HashMap<>(); + timeUnitByteMap.put(TimeUnit.NANOSECONDS, (byte)0); + timeUnitByteMap.put(TimeUnit.MICROSECONDS, (byte)1); + timeUnitByteMap.put(TimeUnit.MILLISECONDS, (byte)2); + timeUnitByteMap.put(TimeUnit.SECONDS, (byte)3); + timeUnitByteMap.put(TimeUnit.MINUTES, (byte)4); + timeUnitByteMap.put(TimeUnit.HOURS, (byte)5); + timeUnitByteMap.put(TimeUnit.DAYS, (byte)6); + + final Set bytes = new HashSet<>(); + for (TimeUnit value : TimeUnit.values()) { + assert timeUnitByteMap.containsKey(value) : value; + assert bytes.add(timeUnitByteMap.get(value)); + } + + final Map byteTimeUnitMap = new HashMap<>(); + for (Map.Entry entry : timeUnitByteMap.entrySet()) { + byteTimeUnitMap.put(entry.getValue(), entry.getKey()); + } + + TIME_UNIT_BYTE_MAP = Collections.unmodifiableMap(timeUnitByteMap); + BYTE_TIME_UNIT_MAP = Collections.unmodifiableMap(byteTimeUnitMap); + } + + public static final TimeValue MINUS_ONE = timeValueMillis(-1); + public static final TimeValue ZERO = timeValueMillis(0); public static TimeValue timeValueNanos(long nanos) { return new TimeValue(nanos, TimeUnit.NANOSECONDS); @@ -60,8 +96,19 @@ public class TimeValue implements Writeable { } private final long duration; + + // visible for testing + long duration() { + return duration; + } + private final TimeUnit timeUnit; + // visible for testing + TimeUnit timeUnit() { + return timeUnit; + } + public TimeValue(long millis) { this(millis, TimeUnit.MILLISECONDS); } @@ -76,12 +123,13 @@ public class TimeValue implements Writeable { */ public TimeValue(StreamInput in) throws IOException { duration = in.readZLong(); - timeUnit = TimeUnit.NANOSECONDS; + timeUnit = BYTE_TIME_UNIT_MAP.get(in.readByte()); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeZLong(nanos()); + out.writeZLong(duration); + out.writeByte(TIME_UNIT_BYTE_MAP.get(timeUnit)); } public long nanos() { @@ -240,19 +288,19 @@ public class TimeValue implements Writeable { } switch (timeUnit) { case NANOSECONDS: - return Strings.format1Decimals(duration, "nanos"); + return duration + "nanos"; case MICROSECONDS: - return Strings.format1Decimals(duration, "micros"); + return duration + "micros"; case MILLISECONDS: - return Strings.format1Decimals(duration, "ms"); + return duration + "ms"; case SECONDS: - return Strings.format1Decimals(duration, "s"); + return duration + "s"; case MINUTES: - return Strings.format1Decimals(duration, "m"); + return duration + "m"; case HOURS: - return Strings.format1Decimals(duration, "h"); + return duration + "h"; case DAYS: - return Strings.format1Decimals(duration, "d"); + return duration + "d"; default: throw new IllegalArgumentException("unknown time unit: " + timeUnit.name()); } @@ -270,47 +318,48 @@ public class TimeValue implements Writeable { return defaultValue; } try { - long millis; String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim(); - if (lowerSValue.endsWith("ms")) { - millis = parse(lowerSValue, 2, 1); + if (lowerSValue.endsWith("nanos")) { + return new TimeValue(parse(lowerSValue, 5), TimeUnit.NANOSECONDS); + } else if (lowerSValue.endsWith("micros")) { + return new TimeValue(parse(lowerSValue, 6), TimeUnit.MICROSECONDS); + } else if (lowerSValue.endsWith("ms")) { + return new TimeValue(parse(lowerSValue, 2), TimeUnit.MILLISECONDS); } else if (lowerSValue.endsWith("s")) { - millis = parse(lowerSValue, 1, 1000); + return new TimeValue(parse(lowerSValue, 1), TimeUnit.SECONDS); } else if (lowerSValue.endsWith("m")) { - millis = parse(lowerSValue, 1, 60 * 1000); + return new TimeValue(parse(lowerSValue, 1), TimeUnit.MINUTES); } else if (lowerSValue.endsWith("h")) { - millis = parse(lowerSValue, 1, 60 * 60 * 1000); + return new TimeValue(parse(lowerSValue, 1), TimeUnit.HOURS); } else if (lowerSValue.endsWith("d")) { - millis = parse(lowerSValue, 1, 24 * 60 * 60 * 1000); - } else if (lowerSValue.endsWith("w")) { - millis = parse(lowerSValue, 1, 7 * 24 * 60 * 60 * 1000); - } else if (lowerSValue.equals("-1")) { - // Allow this special value to be unit-less: - millis = -1; - } else if (lowerSValue.equals("0")) { - // Allow this special value to be unit-less: - millis = 0; + return new TimeValue(parse(lowerSValue, 1), TimeUnit.DAYS); + } else if (lowerSValue.matches("-0*1")) { + return TimeValue.MINUS_ONE; + } else if (lowerSValue.matches("0+")) { + return TimeValue.ZERO; } else { // Missing units: - throw new ElasticsearchParseException("Failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized", settingName, sValue); + throw new ElasticsearchParseException( + "failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized", + settingName, + sValue); } - return new TimeValue(millis, TimeUnit.MILLISECONDS); } catch (NumberFormatException e) { - throw new ElasticsearchParseException("Failed to parse [{}]", e, sValue); + throw new ElasticsearchParseException("failed to parse [{}]", e, sValue); } } - private static long parse(String s, int suffixLength, long scale) { - return (long) (Double.parseDouble(s.substring(0, s.length() - suffixLength)) * scale); + private static long parse(String s, int suffixLength) { + return Long.parseLong(s.substring(0, s.length() - suffixLength).trim()); } - static final long C0 = 1L; - static final long C1 = C0 * 1000L; - static final long C2 = C1 * 1000L; - static final long C3 = C2 * 1000L; - static final long C4 = C3 * 60L; - static final long C5 = C4 * 60L; - static final long C6 = C5 * 24L; + private static final long C0 = 1L; + private static final long C1 = C0 * 1000L; + private static final long C2 = C1 * 1000L; + private static final long C3 = C2 * 1000L; + private static final long C4 = C3 * 60L; + private static final long C5 = C4 * 60L; + private static final long C6 = C5 * 24L; @Override public boolean equals(Object o) { diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 50b59631e95..96fee4b968f 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -685,7 +685,7 @@ public class SearchService extends AbstractLifecycleComponent imp if (source.profile()) { context.setProfilers(new Profilers(context.searcher())); } - context.timeoutInMillis(source.timeoutInMillis()); + context.timeout(source.timeout()); context.terminateAfter(source.terminateAfter()); if (source.aggregations() != null) { try { diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 24278bdf127..ebf8880d4aa 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -143,7 +143,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private Float minScore; - private long timeoutInMillis = -1; + private TimeValue timeout = null; private int terminateAfter = SearchContext.DEFAULT_TERMINATE_AFTER; private List fieldNames; @@ -241,7 +241,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } suggestBuilder = in.readOptionalWriteable(SuggestBuilder::new); terminateAfter = in.readVInt(); - timeoutInMillis = in.readLong(); + timeout = in.readOptionalWriteable(TimeValue::new); trackScores = in.readBoolean(); version = in.readOptionalBoolean(); ext = in.readOptionalBytesReference(); @@ -320,7 +320,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } out.writeOptionalWriteable(suggestBuilder); out.writeVInt(terminateAfter); - out.writeLong(timeoutInMillis); + out.writeOptionalWriteable(timeout); out.writeBoolean(trackScores); out.writeOptionalBoolean(version); out.writeOptionalBytesReference(ext); @@ -446,15 +446,15 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ * An optional timeout to control how long search is allowed to take. */ public SearchSourceBuilder timeout(TimeValue timeout) { - this.timeoutInMillis = timeout.millis(); + this.timeout = timeout; return this; } /** * Gets the timeout to control how long search is allowed to take. */ - public long timeoutInMillis() { - return timeoutInMillis; + public TimeValue timeout() { + return timeout; } /** @@ -928,7 +928,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ rewrittenBuilder.stats = stats; rewrittenBuilder.suggestBuilder = suggestBuilder; rewrittenBuilder.terminateAfter = terminateAfter; - rewrittenBuilder.timeoutInMillis = timeoutInMillis; + rewrittenBuilder.timeout = timeout; rewrittenBuilder.trackScores = trackScores; rewrittenBuilder.version = version; return rewrittenBuilder; @@ -958,7 +958,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } else if (context.getParseFieldMatcher().match(currentFieldName, SIZE_FIELD)) { size = parser.intValue(); } else if (context.getParseFieldMatcher().match(currentFieldName, TIMEOUT_FIELD)) { - timeoutInMillis = TimeValue.parseTimeValue(parser.text(), null, TIMEOUT_FIELD.getPreferredName()).millis(); + timeout = TimeValue.parseTimeValue(parser.text(), null, TIMEOUT_FIELD.getPreferredName()); } else if (context.getParseFieldMatcher().match(currentFieldName, TERMINATE_AFTER_FIELD)) { terminateAfter = parser.intValue(); } else if (context.getParseFieldMatcher().match(currentFieldName, MIN_SCORE_FIELD)) { @@ -1095,8 +1095,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.field(SIZE_FIELD.getPreferredName(), size); } - if (timeoutInMillis != -1) { - builder.field(TIMEOUT_FIELD.getPreferredName(), TimeValue.timeValueMillis(timeoutInMillis).toString()); + if (timeout != null && !timeout.equals(TimeValue.MINUS_ONE)) { + builder.field(TIMEOUT_FIELD.getPreferredName(), timeout.getStringRep()); } if (terminateAfter != SearchContext.DEFAULT_TERMINATE_AFTER) { @@ -1341,7 +1341,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ public int hashCode() { return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from, highlightBuilder, indexBoost, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, - size, sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version, profile); + size, sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, profile); } @Override @@ -1373,7 +1373,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ && Objects.equals(stats, other.stats) && Objects.equals(suggestBuilder, other.suggestBuilder) && Objects.equals(terminateAfter, other.terminateAfter) - && Objects.equals(timeoutInMillis, other.timeoutInMillis) + && Objects.equals(timeout, other.timeout) && Objects.equals(trackScores, other.trackScores) && Objects.equals(version, other.version) && Objects.equals(profile, other.profile); diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index 06df04db8a0..e10cafdaaf6 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -24,8 +24,8 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchType; @@ -99,8 +99,7 @@ public class DefaultSearchContext extends SearchContext { private final QuerySearchResult queryResult; private final FetchSearchResult fetchResult; private float queryBoost = 1.0f; - // timeout in millis - private long timeoutInMillis; + private TimeValue timeout; // terminate after count private int terminateAfter = DEFAULT_TERMINATE_AFTER; private List groupStats; @@ -174,7 +173,7 @@ public class DefaultSearchContext extends SearchContext { this.indexService = indexService; this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.timeEstimateCounter = timeEstimateCounter; - this.timeoutInMillis = timeout.millis(); + this.timeout = timeout; queryShardContext = indexService.newQueryShardContext(searcher.getIndexReader()); queryShardContext.setTypes(request.types()); } @@ -512,13 +511,13 @@ public class DefaultSearchContext extends SearchContext { } @Override - public long timeoutInMillis() { - return timeoutInMillis; + public TimeValue timeout() { + return timeout; } @Override - public void timeoutInMillis(long timeoutInMillis) { - this.timeoutInMillis = timeoutInMillis; + public void timeout(TimeValue timeout) { + this.timeout = timeout; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 822ac6f5422..6c646a62b6b 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; @@ -264,13 +265,13 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public long timeoutInMillis() { - return in.timeoutInMillis(); + public TimeValue timeout() { + return in.timeout(); } @Override - public void timeoutInMillis(long timeoutInMillis) { - in.timeoutInMillis(timeoutInMillis); + public void timeout(TimeValue timeout) { + in.timeout(timeout); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 5135f42ab56..7b29cabdf16 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.analysis.AnalysisService; @@ -226,9 +227,9 @@ public abstract class SearchContext implements Releasable { public abstract IndexFieldDataService fieldData(); - public abstract long timeoutInMillis(); + public abstract TimeValue timeout(); - public abstract void timeoutInMillis(long timeoutInMillis); + public abstract void timeout(TimeValue timeout); public abstract int terminateAfter(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index 37fb608fd0c..363b6ad076a 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.internal; import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -155,7 +156,7 @@ public class SubSearchContext extends FilteredSearchContext { } @Override - public void timeoutInMillis(long timeoutInMillis) { + public void timeout(TimeValue timeout) { throw new UnsupportedOperationException("Not supported"); } diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 4e3a642f694..22e6c40d338 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -349,12 +349,12 @@ public class QueryPhase implements SearchPhase { } } - final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis(); + final boolean timeoutSet = searchContext.timeout() != null && !searchContext.timeout().equals(SearchService.NO_TIMEOUT); if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed final Collector child = collector; // TODO: change to use our own counter that uses the scheduler in ThreadPool // throws TimeLimitingCollector.TimeExceededException when timeout has reached - collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis()); + collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeout().millis()); if (doProfile) { collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TIMEOUT, Collections.singletonList((InternalProfileCollector) child)); diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 1c25659d2cf..7f423d1bb9e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -263,7 +263,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { .get(); fail("bogus value"); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [discovery.zen.publish_timeout] with value [whatever] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [discovery.zen.publish_timeout] with value [whatever] as a time value: unit is missing or unrecognized"); } assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L)); diff --git a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index cc36625e68f..9b73f2f99af 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -85,9 +85,6 @@ public class TimeValueTests extends ESTestCase { assertEquals(new TimeValue(10, TimeUnit.SECONDS), TimeValue.parseTimeValue("10S", null, "test")); - assertEquals(new TimeValue(100, TimeUnit.MILLISECONDS), - TimeValue.parseTimeValue("0.1s", null, "test")); - assertEquals(new TimeValue(10, TimeUnit.MINUTES), TimeValue.parseTimeValue("10 m", null, "test")); assertEquals(new TimeValue(10, TimeUnit.MINUTES), @@ -115,14 +112,17 @@ public class TimeValueTests extends ESTestCase { assertEquals(new TimeValue(10, TimeUnit.DAYS), TimeValue.parseTimeValue("10D", null, "test")); - assertEquals(new TimeValue(70, TimeUnit.DAYS), - TimeValue.parseTimeValue("10 w", null, "test")); - assertEquals(new TimeValue(70, TimeUnit.DAYS), - TimeValue.parseTimeValue("10w", null, "test")); - assertEquals(new TimeValue(70, TimeUnit.DAYS), - TimeValue.parseTimeValue("10 W", null, "test")); - assertEquals(new TimeValue(70, TimeUnit.DAYS), - TimeValue.parseTimeValue("10W", null, "test")); + final int length = randomIntBetween(0, 8); + final String zeros = new String(new char[length]).replace('\0', '0'); + assertTrue(TimeValue.parseTimeValue("-" + zeros + "1", null, "test") == TimeValue.MINUS_ONE); + assertTrue(TimeValue.parseTimeValue(zeros + "0", null, "test") == TimeValue.ZERO); + } + + public void testRoundTrip() { + final String s = randomTimeValue(); + assertThat(TimeValue.parseTimeValue(s, null, "test").getStringRep(), equalTo(s)); + final TimeValue t = new TimeValue(randomIntBetween(1, 128), randomFrom(TimeUnit.values())); + assertThat(TimeValue.parseTimeValue(t.getStringRep(), null, "test"), equalTo(t)); } private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException { @@ -134,13 +134,20 @@ public class TimeValueTests extends ESTestCase { TimeValue inValue = new TimeValue(in); assertThat(inValue, equalTo(value)); + assertThat(inValue.duration(), equalTo(value.duration())); + assertThat(inValue.timeUnit(), equalTo(value.timeUnit())); } public void testSerialize() throws Exception { - assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 8); - assertEqualityAfterSerialize(timeValueNanos(-1), 1); - assertEqualityAfterSerialize(timeValueNanos(1), 1); - assertEqualityAfterSerialize(timeValueSeconds(30), 6); + assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 3); + assertEqualityAfterSerialize(timeValueNanos(-1), 2); + assertEqualityAfterSerialize(timeValueNanos(1), 2); + assertEqualityAfterSerialize(timeValueSeconds(30), 2); + + final TimeValue timeValue = new TimeValue(randomIntBetween(0, 1024), randomFrom(TimeUnit.values())); + BytesStreamOutput out = new BytesStreamOutput(); + out.writeZLong(timeValue.duration()); + assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length()); } public void testFailOnUnknownUnits() { @@ -148,7 +155,7 @@ public class TimeValueTests extends ESTestCase { TimeValue.parseTimeValue("23tw", null, "test"); fail("Expected ElasticsearchParseException"); } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("Failed to parse")); + assertThat(e.getMessage(), containsString("failed to parse")); } } @@ -157,7 +164,7 @@ public class TimeValueTests extends ESTestCase { TimeValue.parseTimeValue("42", null, "test"); fail("Expected ElasticsearchParseException"); } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("Failed to parse")); + assertThat(e.getMessage(), containsString("failed to parse")); } } @@ -166,7 +173,7 @@ public class TimeValueTests extends ESTestCase { TimeValue.parseTimeValue("42ms.", null, "test"); fail("Expected ElasticsearchParseException"); } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("Failed to parse")); + assertThat(e.getMessage(), containsString("failed to parse")); } } diff --git a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index a076224eae5..8951123d0da 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -176,28 +176,28 @@ public class IndexingSlowLogTests extends ESTestCase { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.indexing.slowlog.threshold.index.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.indexing.slowlog.threshold.index.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } try { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.indexing.slowlog.threshold.index.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.indexing.slowlog.threshold.index.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } try { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.indexing.slowlog.threshold.index.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.indexing.slowlog.threshold.index.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } try { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.indexing.slowlog.threshold.index.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.indexing.slowlog.threshold.index.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } } diff --git a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 04291d957a8..d2bffb0f749 100644 --- a/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -249,28 +249,28 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.query.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.query.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } try { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.query.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.query.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } try { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.query.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.query.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } try { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.query.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.query.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } } @@ -320,28 +320,28 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.fetch.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.fetch.trace] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } try { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.fetch.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.fetch.debug] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } try { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.fetch.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.fetch.info] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } try { settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Failed to parse setting [index.search.slowlog.threshold.fetch.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); + assertEquals(ex.getMessage(), "failed to parse setting [index.search.slowlog.threshold.fetch.warn] with value [NOT A TIME VALUE] as a time value: unit is missing or unrecognized"); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java index ad9e03327b2..620968ddbe8 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java @@ -139,7 +139,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_ttl") - .field("default", "1w") + .field("default", "7d") .endObject() .startObject("properties").field("field").startObject().field("type", "text").endObject().endObject() .endObject().endObject().string(); diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index d001a829443..8bf6f77a026 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -140,7 +140,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0]; for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { if (metaData == IngestDocument.MetaData.TTL) { - ingestDocument.setFieldValue(IngestDocument.MetaData.TTL.getFieldName(), "5w"); + ingestDocument.setFieldValue(IngestDocument.MetaData.TTL.getFieldName(), "35d"); } else { ingestDocument.setFieldValue(metaData.getFieldName(), "update" + metaData.getFieldName()); } diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 9d66669d07c..f3a78b65d78 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -86,7 +86,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; @@ -208,7 +207,7 @@ public class SearchSourceBuilderTests extends ESTestCase { builder.minScore(randomFloat() * 1000); } if (randomBoolean()) { - builder.timeout(new TimeValue(randomIntBetween(1, 100), randomFrom(TimeUnit.values()))); + builder.timeout(TimeValue.parseTimeValue(randomTimeValue(), null, "timeout")); } if (randomBoolean()) { builder.terminateAfter(randomIntBetween(1, 100000)); @@ -456,7 +455,7 @@ public class SearchSourceBuilderTests extends ESTestCase { public void testEqualsAndHashcode() throws IOException { SearchSourceBuilder firstBuilder = createSearchSourceBuilder(); - assertFalse("source builder is equal to null", firstBuilder.equals(null)); + assertNotNull("source builder is equal to null", firstBuilder); assertFalse("source builder is equal to incompatible type", firstBuilder.equals("")); assertTrue("source builder is not equal to self", firstBuilder.equals(firstBuilder)); assertThat("same source builder's hashcode returns different values if called multiple times", firstBuilder.hashCode(), @@ -601,7 +600,7 @@ public class SearchSourceBuilderTests extends ESTestCase { final String query = "{ \"query\": { \"match_all\": {}}, \"timeout\": \"" + timeout + "\"}"; try (XContentParser parser = XContentFactory.xContent(query).createParser(query)) { final SearchSourceBuilder builder = SearchSourceBuilder.fromXContent(createParseContext(parser), aggParsers, suggesters); - assertThat(builder.timeoutInMillis(), equalTo(TimeValue.parseTimeValue(timeout, null, "timeout").millis())); + assertThat(builder.timeout(), equalTo(TimeValue.parseTimeValue(timeout, null, "timeout"))); } } diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index 1418f476e00..155363f72a7 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -594,9 +594,9 @@ public class DecayFunctionScoreIT extends ESIntegTestCase { searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source( searchSource().query( functionScoreQuery(QueryBuilders.matchAllQuery(), new FilterFunctionBuilder[]{ - new FilterFunctionBuilder(linearDecayFunction("num1", null, "1000w")), + new FilterFunctionBuilder(linearDecayFunction("num1", null, "7000d")), new FilterFunctionBuilder(gaussDecayFunction("num1", null, "1d")), - new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "1000w")) + new FilterFunctionBuilder(exponentialDecayFunction("num1", null, "7000d")) }).scoreMode(FiltersFunctionScoreQuery.ScoreMode.MULTIPLY)))); SearchResponse sr = response.actionGet(); diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 2356395219f..74b910bf2b4 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -220,7 +220,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { fail("did not hit expected exception"); } catch (IllegalArgumentException iae) { // expected - assertTrue(iae.getMessage().contains("Failed to parse setting [index.gc_deletes] with value [42] as a time value: unit is missing or unrecognized")); + assertTrue(iae.getMessage().contains("failed to parse setting [index.gc_deletes] with value [42] as a time value: unit is missing or unrecognized")); } } diff --git a/docs/reference/migration/migrate_5_0/settings.asciidoc b/docs/reference/migration/migrate_5_0/settings.asciidoc index be578b00c3b..8a7c386acdb 100644 --- a/docs/reference/migration/migrate_5_0/settings.asciidoc +++ b/docs/reference/migration/migrate_5_0/settings.asciidoc @@ -286,3 +286,11 @@ The setting `bootstrap.mlockall` has been renamed to The default setting `include_global_state` for restoring snapshots has been changed from `true` to `false`. It has not been changed for taking snapshots and still defaults to `true` in that case. + +==== Time value parsing + +The unit 'w' representing weeks is no longer supported. + +Fractional time values (e.g., 0.5s) are no longer supported. For +example, this means when setting timeouts "0.5s" will be rejected and +should instead be input as "500ms". diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 860f133a453..d04d12304de 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -413,7 +413,7 @@ public abstract class ESTestCase extends LuceneTestCase { return generateRandomStringArray(maxArraySize, maxStringSize, allowNull, true); } - private static String[] TIME_SUFFIXES = new String[]{"d", "H", "ms", "s", "S", "w"}; + private static String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m"}; private static String randomTimeValue(int lower, int upper) { return randomIntBetween(lower, upper) + randomFrom(TIME_SUFFIXES); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 025740dbe36..9a00fbc4557 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -410,12 +410,12 @@ public final class InternalTestCluster extends TestCluster { builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, PageCacheRecycler.Type.values())); } if (random.nextInt(10) == 0) { // 10% of the nodes have a very frequent check interval - builder.put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(10 + random.nextInt(2000))); + builder.put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(10 + random.nextInt(2000)).getStringRep()); } else if (random.nextInt(10) != 0) { // 90% of the time - 10% of the time we don't set anything - builder.put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(10 + random.nextInt(5 * 60))); + builder.put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(10 + random.nextInt(5 * 60)).getStringRep()); } if (random.nextBoolean()) { // sometimes set a - builder.put(SearchService.DEFAULT_KEEPALIVE_SETTING.getKey(), TimeValue.timeValueSeconds(100 + random.nextInt(5 * 60))); + builder.put(SearchService.DEFAULT_KEEPALIVE_SETTING.getKey(), TimeValue.timeValueSeconds(100 + random.nextInt(5 * 60)).getStringRep()); } builder.put(EsExecutors.PROCESSORS_SETTING.getKey(), 1 + random.nextInt(3)); @@ -469,7 +469,7 @@ public final class InternalTestCluster extends TestCluster { builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getKey(), RandomInts.randomIntBetween(random, 0, 2000)); } if (random.nextBoolean()) { - builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 750, 10000000))); + builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 750, 10000000)).getStringRep()); } return builder.build(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 8122abe483d..97240bd9e5e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; @@ -311,12 +312,12 @@ public class TestSearchContext extends SearchContext { } @Override - public long timeoutInMillis() { - return 0; + public TimeValue timeout() { + return TimeValue.ZERO; } @Override - public void timeoutInMillis(long timeoutInMillis) { + public void timeout(TimeValue timeout) { } @Override From 0ec07833a8f7fc9b8a2928444f2809514d8330ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Honza=20Kr=C3=A1l?= Date: Tue, 28 Jun 2016 01:54:49 +0200 Subject: [PATCH 06/43] [TEST] refactor search yaml tests (#19109) --- .../test/search/10_source_filtering.yaml | 33 +++++++++++++++---- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml index e0ac2aea2df..84bf44f7392 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml @@ -1,6 +1,5 @@ --- -"Source filtering": - +setup: - do: index: index: test_1 @@ -10,40 +9,54 @@ - do: indices.refresh: {} +--- +"_source: true": - do: search: - # stringified for boolean value body: { _source: true, query: { match_all: {} } } - length: { hits.hits: 1 } - match: { hits.hits.0._source.count: 1 } +--- +"_source: false": - do: { search: { body: { _source: false, query: { match_all: {} } } } } - length: { hits.hits: 1 } - is_false: hits.hits.0._source +--- +"no filtering": - do: { search: { body: { query: { match_all: {} } } } } - length: { hits.hits: 1 } - match: { hits.hits.0._source.count: 1 } +--- +"_source in body": - do: { search: { body: { _source: include.field1, query: { match_all: {} } } } } - match: { hits.hits.0._source.include.field1: v1 } - is_false: hits.hits.0._source.include.field2 +--- +"_source include and _source in body": - do: { search: { _source_include: include.field1, body: { _source: include.field2, query: { match_all: {} } } } } - match: { hits.hits.0._source.include.field1: v1 } - is_false: hits.hits.0._source.include.field2 +--- +"_source_include": - do: { search: { _source_include: include.field1, body: { query: { match_all: {} } } } } - match: { hits.hits.0._source.include.field1: v1 } - is_false: hits.hits.0._source.include.field2 +--- +"_source_exclude": - do: { search: { _source_exclude: count, body: { query: { match_all: {} } } } } - match: { hits.hits.0._source.include: { field1 : v1 , field2: v2 }} - is_false: hits.hits.0._source.count - +--- +"_source field1 field2": - do: search: body: @@ -53,6 +66,8 @@ - match: { hits.hits.0._source.include.field2: v2 } - is_false: hits.hits.0._source.count +--- +"_source.include field1 field2": - do: search: body: @@ -63,6 +78,8 @@ - match: { hits.hits.0._source.include.field2: v2 } - is_false: hits.hits.0._source.count +--- +"_source includes and excludes": - do: search: body: @@ -73,7 +90,8 @@ - match: { hits.hits.0._source.include.field1: v1 } - is_false: hits.hits.0._source.include.field2 - +--- +"fields in body": - do: search: body: @@ -81,6 +99,8 @@ query: { match_all: {} } - is_false: hits.hits.0._source +--- +"fields in body with source": - do: search: body: @@ -89,7 +109,8 @@ - match: { hits.hits.0._source.include.field2: v2 } - is_true: hits.hits.0._source - +--- +"fielddata_fields": - do: search: fielddata_fields: [ "count" ] From f79851e23a32c31efbb9342a84ef3e50feb061bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Honza=20Kr=C3=A1l?= Date: Tue, 28 Jun 2016 02:07:46 +0200 Subject: [PATCH 07/43] [API] separate tasks.list and tasks.get APIs in the json definition (#19107) --- .../rest-api-spec/api/{task.get.json => tasks.get.json} | 2 +- .../src/main/resources/rest-api-spec/api/tasks.list.json | 2 +- .../rest-api-spec/test/{task.get => tasks.get}/10_basic.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) rename rest-api-spec/src/main/resources/rest-api-spec/api/{task.get.json => tasks.get.json} (97%) rename rest-api-spec/src/main/resources/rest-api-spec/test/{task.get => tasks.get}/10_basic.yaml (92%) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/task.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json similarity index 97% rename from rest-api-spec/src/main/resources/rest-api-spec/api/task.get.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json index 8024f015e96..f97206cd16f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/task.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json @@ -1,5 +1,5 @@ { - "task.get": { + "tasks.get": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", "methods": ["GET"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json index a1913fbfc17..a966cb0e507 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json @@ -4,7 +4,7 @@ "methods": ["GET"], "url": { "path": "/_tasks", - "paths": ["/_tasks", "/_tasks/{task_id}"], + "paths": ["/_tasks"], "parts": {}, "params": { "node_id": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/task.get/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yaml similarity index 92% rename from rest-api-spec/src/main/resources/rest-api-spec/test/task.get/10_basic.yaml rename to rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yaml index ba90e1541fe..48d9f46ac73 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/task.get/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yaml @@ -6,5 +6,5 @@ - do: catch: missing - task.get: + tasks.get: task_id: foo:1 From af989c0780e9b0c289fa7b96e55ffbc4b22a5524 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 28 Jun 2016 08:50:50 +0200 Subject: [PATCH 08/43] Support new Asia Pacific (Mumbai) ap-south-1 AWS region AWS [announced](http://www.allthingsdistributed.com/2016/06/introducing-aws-asia-pacific-mumbai-region.html) a new region: Asia Pacific (Mumbai) `ap-south-1`. We need to support it for: * repository-s3: s3.ap-south-1.amazonaws.com or s3-ap-south-1.amazonaws.com * discovery-ec2: ec2.ap-south-1.amazonaws.com For reference: http://docs.aws.amazon.com/general/latest/gr/rande.html Closes #19110. --- docs/plugins/discovery-ec2.asciidoc | 25 +++++++++---------- docs/plugins/repository-s3.asciidoc | 25 +++++++++---------- .../cloud/aws/AwsEc2ServiceImpl.java | 2 ++ .../cloud/aws/InternalAwsS3Service.java | 2 ++ 4 files changed, 28 insertions(+), 26 deletions(-) diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 17659d496b8..224080c522c 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -105,19 +105,18 @@ cloud: The `cloud.aws.region` can be set to a region and will automatically use the relevant settings for both `ec2` and `s3`. The available values are: -* `us-east` (`us-east-1`) -* `us-west` (`us-west-1`) -* `us-west-1` -* `us-west-2` -* `ap-southeast` (`ap-southeast-1`) -* `ap-southeast-1` -* `ap-southeast-2` -* `ap-northeast` (`ap-northeast-1`) -* `ap-northeast-2` (`ap-northeast-2`) -* `eu-west` (`eu-west-1`) -* `eu-central` (`eu-central-1`) -* `sa-east` (`sa-east-1`) -* `cn-north` (`cn-north-1`) +* `us-east` (`us-east-1`) for US East (N. Virginia) +* `us-west` (`us-west-1`) for US West (N. California) +* `us-west-2` for US West (Oregon) +* `ap-south-1` for Asia Pacific (Mumbai) +* `ap-southeast` (`ap-southeast-1`) for Asia Pacific (Singapore) +* `ap-southeast-2` for Asia Pacific (Sydney) +* `ap-northeast` (`ap-northeast-1`) for Asia Pacific (Tokyo) +* `ap-northeast-2` (`ap-northeast-2`) for Asia Pacific (Seoul) +* `eu-west` (`eu-west-1`) for EU (Ireland) +* `eu-central` (`eu-central-1`) for EU (Frankfurt) +* `sa-east` (`sa-east-1`) for South America (São Paulo) +* `cn-north` (`cn-north-1`) for China (Beijing) [[discovery-ec2-usage-signer]] ===== EC2 Signer API diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 63e3ad311b2..e1f07f6c8e2 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -110,19 +110,18 @@ The `cloud.aws.region` can be set to a region and will automatically use the rel You can specifically set it for s3 only using `cloud.aws.s3.region`. The available values are: -* `us-east` (`us-east-1`) -* `us-west` (`us-west-1`) -* `us-west-1` -* `us-west-2` -* `ap-southeast` (`ap-southeast-1`) -* `ap-southeast-1` -* `ap-southeast-2` -* `ap-northeast` (`ap-northeast-1`) -* `ap-northeast-2` (`ap-northeast-2`) -* `eu-west` (`eu-west-1`) -* `eu-central` (`eu-central-1`) -* `sa-east` (`sa-east-1`) -* `cn-north` (`cn-north-1`) +* `us-east` (`us-east-1`) for US East (N. Virginia) +* `us-west` (`us-west-1`) for US West (N. California) +* `us-west-2` for US West (Oregon) +* `ap-south-1` for Asia Pacific (Mumbai) +* `ap-southeast` (`ap-southeast-1`) for Asia Pacific (Singapore) +* `ap-southeast-2` for Asia Pacific (Sydney) +* `ap-northeast` (`ap-northeast-1`) for Asia Pacific (Tokyo) +* `ap-northeast-2` (`ap-northeast-2`) for Asia Pacific (Seoul) +* `eu-west` (`eu-west-1`) for EU (Ireland) +* `eu-central` (`eu-central-1`) for EU (Frankfurt) +* `sa-east` (`sa-east-1`) for South America (São Paulo) +* `cn-north` (`cn-north-1`) for China (Beijing) [[repository-s3-usage-signer]] ===== S3 Signer API diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java index 2aec30d6ddb..5ce110487c4 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java @@ -148,6 +148,8 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent endpoint = "ec2.ap-southeast-1.amazonaws.com"; } else if (region.equals("us-gov-west") || region.equals("us-gov-west-1")) { endpoint = "ec2.us-gov-west-1.amazonaws.com"; + } else if (region.equals("ap-south-1")) { + endpoint = "ec2.ap-south-1.amazonaws.com"; } else if (region.equals("ap-southeast-2")) { endpoint = "ec2.ap-southeast-2.amazonaws.com"; } else if (region.equals("ap-northeast") || region.equals("ap-northeast-1")) { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java index 352aa196b06..287973a3a90 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java @@ -155,6 +155,8 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent Date: Tue, 28 Jun 2016 09:13:19 +0200 Subject: [PATCH 09/43] Tests: Rename task.get to tasks.get The task.get action got renamed to tasks.get, some tests did not change this. Relates #19107 --- .../rest-api-spec/test/delete_by_query/10_basic.yaml | 2 +- .../rest-api-spec/test/delete_by_query/70_throttle.yaml | 4 ++-- .../test/resources/rest-api-spec/test/reindex/10_basic.yaml | 2 +- .../resources/rest-api-spec/test/reindex/70_throttle.yaml | 4 ++-- .../rest-api-spec/test/update_by_query/10_basic.yaml | 2 +- .../rest-api-spec/test/update_by_query/60_throttle.yaml | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yaml index 85cd6143d69..041aa127cd4 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yaml @@ -69,7 +69,7 @@ - is_false: deleted - do: - task.get: + tasks.get: wait_for_completion: true task_id: $task - is_false: node_failures diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml index 65a22781550..96cfaa42b5a 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/70_throttle.yaml @@ -134,7 +134,7 @@ task_id: $task - do: - task.get: + tasks.get: wait_for_completion: true task_id: $task @@ -197,6 +197,6 @@ task_id: $task - do: - task.get: + tasks.get: wait_for_completion: true task_id: $task diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml index addbebd44a7..a567ca67bfa 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml @@ -93,7 +93,7 @@ - is_false: deleted - do: - task.get: + tasks.get: wait_for_completion: true task_id: $task - is_false: node_failures diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/70_throttle.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/70_throttle.yaml index 73e1a3a3a94..05d7668ed2e 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/70_throttle.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/70_throttle.yaml @@ -156,7 +156,7 @@ task_id: $task - do: - task.get: + tasks.get: wait_for_completion: true task_id: $task @@ -214,6 +214,6 @@ task_id: $task - do: - task.get: + tasks.get: wait_for_completion: true task_id: $task diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml index 62c8677921d..17b2dc77816 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yaml @@ -53,7 +53,7 @@ - is_false: deleted - do: - task.get: + tasks.get: wait_for_completion: true task_id: $task - is_false: node_failures diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/60_throttle.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/60_throttle.yaml index e13e29bc3f5..7ecf7000bfd 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/60_throttle.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/60_throttle.yaml @@ -122,7 +122,7 @@ task_id: $task - do: - task.get: + tasks.get: wait_for_completion: true task_id: $task @@ -172,6 +172,6 @@ task_id: $task - do: - task.get: + tasks.get: wait_for_completion: true task_id: $task From c557663b90dc3ac3f0ae7391cb6eac543812a946 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 24 Jun 2016 16:51:49 +0200 Subject: [PATCH 10/43] Make discovery-azure work again The discovery-plugin has been broken since 2.x because the code was not compliant with the security manager and because plugins have been refactored. closes #18637, #15630 --- docs/plugins/discovery-azure.asciidoc | 4 +- .../management/AzureComputeServiceImpl.java | 46 +++++++++++-------- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/docs/plugins/discovery-azure.asciidoc b/docs/plugins/discovery-azure.asciidoc index e3bdc83e470..825c258133b 100644 --- a/docs/plugins/discovery-azure.asciidoc +++ b/docs/plugins/discovery-azure.asciidoc @@ -56,9 +56,11 @@ discovery: .Binding the network host ============================================== +WARNING: The keystore file must be placed in a directory accessible by elasticsearch like the `config` directory. + It's important to define `network.host` as by default it's bound to `localhost`. -You can use {ref}/modules-network.html[core network host settings]. For example `_non_loopback_` or `_en0_`. +You can use {ref}/modules-network.html[core network host settings]. For example `_en0_`. ============================================== diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java index 0764ec99c12..076ce52ff80 100644 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/cloud/azure/management/AzureComputeServiceImpl.java @@ -20,19 +20,21 @@ package org.elasticsearch.cloud.azure.management; import com.microsoft.windowsazure.Configuration; +import com.microsoft.windowsazure.core.Builder; +import com.microsoft.windowsazure.core.DefaultBuilder; import com.microsoft.windowsazure.core.utils.KeyStoreType; import com.microsoft.windowsazure.management.compute.ComputeManagementClient; import com.microsoft.windowsazure.management.compute.ComputeManagementService; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.configuration.ManagementConfiguration; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cloud.azure.AzureServiceDisableException; import org.elasticsearch.cloud.azure.AzureServiceRemoteException; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import java.io.IOException; +import java.util.ServiceLoader; /** * @@ -40,7 +42,7 @@ import java.io.IOException; public class AzureComputeServiceImpl extends AbstractLifecycleComponent implements AzureComputeService { - private final ComputeManagementClient computeManagementClient; + private final ComputeManagementClient client; private final String serviceName; @Inject @@ -54,28 +56,36 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent Date: Tue, 28 Jun 2016 10:13:08 +0200 Subject: [PATCH 11/43] Update Java API doc for cluster health In 995e4eda08be99f72ef56052b3f78ceef9100885 we changed the cluster health Java API. We need to also change the documentation. Backport of #19093 in master branch --- docs/java-api/admin/cluster/health.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/java-api/admin/cluster/health.asciidoc b/docs/java-api/admin/cluster/health.asciidoc index 7d20fdde6a3..615a011cf72 100644 --- a/docs/java-api/admin/cluster/health.asciidoc +++ b/docs/java-api/admin/cluster/health.asciidoc @@ -14,7 +14,7 @@ String clusterName = healths.getClusterName(); <2> int numberOfDataNodes = healths.getNumberOfDataNodes(); <3> int numberOfNodes = healths.getNumberOfNodes(); <4> -for (ClusterIndexHealth health : healths) { <5> +for (ClusterIndexHealth health : healths.getIndices().values()) { <5> String index = health.getIndex(); <6> int numberOfShards = health.getNumberOfShards(); <7> int numberOfReplicas = health.getNumberOfReplicas(); <8> From 26e6a522c7fedac27ebf0c6a5d5e8007faf0bd1a Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 28 Jun 2016 09:37:34 +0100 Subject: [PATCH 12/43] [TEST] Fixed bounds calculation for extended bounds in histogram agg empty buckets test --- .../java/org/elasticsearch/messy/tests/HistogramTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HistogramTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HistogramTests.java index 45274f44401..a6349db0d07 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HistogramTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HistogramTests.java @@ -857,7 +857,7 @@ public class HistogramTests extends ESIntegTestCase { boolean invalidBoundsError = boundsMin > boundsMax; // constructing the newly expected bucket list - int bucketsCount = numValueBuckets + addedBucketsLeft + addedBucketsRight; + int bucketsCount = (int) ((boundsMaxKey - boundsMinKey) / interval) + 1; long[] extendedValueCounts = new long[bucketsCount]; System.arraycopy(valueCounts, 0, extendedValueCounts, addedBucketsLeft, valueCounts.length); @@ -893,7 +893,7 @@ public class HistogramTests extends ESIntegTestCase { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(bucketsCount)); - long key = Math.min(boundsMinKey, 0); + long key = boundsMinKey; for (int i = 0; i < bucketsCount; i++) { Histogram.Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); From fc9fa3afaf28f7e362eca2a9ab28b6f0dcd0e3cb Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Tue, 28 Jun 2016 12:26:03 +0200 Subject: [PATCH 13/43] Added release notes for 5.0.0-alpha4 --- .../release-notes/5.0.0-alpha4.asciidoc | 353 ++++++++++++++++++ 1 file changed, 353 insertions(+) create mode 100644 docs/reference/release-notes/5.0.0-alpha4.asciidoc diff --git a/docs/reference/release-notes/5.0.0-alpha4.asciidoc b/docs/reference/release-notes/5.0.0-alpha4.asciidoc new file mode 100644 index 00000000000..b85d4ae32a5 --- /dev/null +++ b/docs/reference/release-notes/5.0.0-alpha4.asciidoc @@ -0,0 +1,353 @@ +[[release-notes-5.0.0-alpha4]] +== 5.0.0-alpha4 Release Notes + +Also see <>. + +IMPORTANT: This is an alpha release and is intended for _testing purposes only_. Indices created in this version will *not be compatible with Elasticsearch 5.0.0 GA*. Upgrading 5.0.0-alpha4 to any other version is not supported. + +[[breaking-5.0.0-alpha4]] +[float] +=== Breaking changes + +Aggregations:: +* Remove size 0 options in aggregations {pull}18854[#18854] (issue: {issue}18838[#18838]) + +CRUD:: +* Wait for changes to be visible by search {pull}17986[#17986] (issue: {issue}1063[#1063]) + +Core:: +* Register thread pool settings {pull}18674[#18674] (issues: {issue}18613[#18613], {issue}9216[#9216]) +* Remove cluster name from data path {pull}18554[#18554] (issue: {issue}17810[#17810]) + +Highlighting:: +* Register Highlighter instances instead of classes {pull}18859[#18859] + +Inner Hits:: +* Also do not serialize `_index` key in search response for parent/child inner hits {pull}19011[#19011] +* Don't include `_id`, `_type` and `_index` keys in search response for inner hits {pull}18995[#18995] (issue: {issue}18091[#18091]) +* Nested inner hits shouldn't use relative paths {pull}18567[#18567] (issue: {issue}16653[#16653]) + +Internal:: +* Cleanup ClusterService dependencies and detached from Guice {pull}18941[#18941] +* Simplify SubFetchPhase interface {pull}18881[#18881] +* Simplify FetchSubPhase registration and detach it from Guice {pull}18862[#18862] + +Java API:: +* Remove setRefresh {pull}18752[#18752] (issue: {issue}1063[#1063]) + +Mapping:: +* Remove `_timestamp` and `_ttl` on 5.x indices. {pull}18980[#18980] (issue: {issue}18280[#18280]) + +Packaging:: +* Remove allow running as root {pull}18694[#18694] (issue: {issue}18688[#18688]) + +Plugins:: +* Fail to start if plugin tries broken onModule {pull}19025[#19025] +* Simplify ScriptModule and script registration {pull}18903[#18903] +* Cut over settings registration to a pull model {pull}18890[#18890] +* Plugins cleanup {pull}18594[#18594] (issue: {issue}18588[#18588]) + +Scripting:: +* Move search template to lang-mustache module {pull}18765[#18765] (issue: {issue}17906[#17906]) + +Search:: +* Remove only node preference {pull}18875[#18875] (issue: {issue}18822[#18822]) +* Add search preference to prefer multiple nodes {pull}18872[#18872] (issue: {issue}18822[#18822]) + +Settings:: +* Rename boostrap.mlockall to bootstrap.memory_lock {pull}18669[#18669] + +Snapshot/Restore:: +* Change the default of `include_global_state` from true to false for snapshot restores {pull}18773[#18773] (issue: {issue}18569[#18569]) + + + +[[feature-5.0.0-alpha4]] +[float] +=== New features + +Aggregations:: +* Adds aggregation profiling to the profile API {pull}18414[#18414] (issue: {issue}10538[#10538]) +* New Matrix Stats Aggregation module {pull}18300[#18300] (issue: {issue}16826[#16826]) + +Index APIs:: +* Add rollover API to switch index aliases given some predicates {pull}18732[#18732] (issue: {issue}18647[#18647]) + +Mapping:: +* Expose half-floats. {pull}18887[#18887] + +REST:: +* Low level Rest Client {pull}18735[#18735] (issue: {issue}7743[#7743]) + +Scroll:: +* Add the ability to partition a scroll in multiple slices. {pull}18237[#18237] (issue: {issue}13494[#13494]) + +Store:: +* Expose MMapDirectory.preLoad(). {pull}18880[#18880] +* Add primitive to shrink an index into a single shard {pull}18270[#18270] + + + +[[enhancement-5.0.0-alpha4]] +[float] +=== Enhancements + +Aggregations:: +* Automatically set the collection mode to breadth_first in the terms aggregation when the cardinality of the field is unknown or smaller than the requested size. {pull}18779[#18779] (issue: {issue}9825[#9825]) +* Rename PipelineAggregatorBuilder to PipelineAggregationBuilder. {pull}18677[#18677] (issue: {issue}18377[#18377]) +* AggregatorBuilder and PipelineAggregatorBuilder do not need generics. {pull}18368[#18368] (issue: {issue}18133[#18133]) + +Allocation:: +* Allow `_shrink` to N shards if source shards is a multiple of N {pull}18699[#18699] +* Only filter intial recovery (post API) when shrinking an index {pull}18661[#18661] +* Estimate shard size for shrinked indices {pull}18659[#18659] +* Only fail relocation target shard if failing source shard is a primary {pull}18574[#18574] (issue: {issue}16144[#16144]) +* Simplify delayed shard allocation {pull}18351[#18351] (issue: {issue}18293[#18293]) + +Analysis:: +* Add a MultiTermAwareComponent marker interface to analysis factories. {pull}19028[#19028] (issues: {issue}18064[#18064], {issue}9978[#9978]) +* Add Flags Parameter for Char Filter {pull}18363[#18363] (issue: {issue}18362[#18362]) + +Cache:: +* Cache FieldStats in the request cache {pull}18768[#18768] (issue: {issue}18717[#18717]) + +Cluster:: +* Index creation does not cause the cluster health to go RED {pull}18737[#18737] (issues: {issue}9106[#9106], {issue}9126[#9126]) +* Cluster Health class improvements {pull}18673[#18673] + +Core:: +* Read Elasticsearch manifest via URL {pull}18999[#18999] (issue: {issue}18996[#18996]) +* Throw if the local node is not set {pull}18963[#18963] (issue: {issue}18962[#18962]) +* Improve performance of applyDeletedShards {pull}18788[#18788] (issue: {issue}18776[#18776]) +* Bootstrap check for OnOutOfMemoryError and seccomp {pull}18756[#18756] (issue: {issue}18736[#18736]) + +Dates:: +* Improve TimeZoneRoundingTests error messages {pull}18895[#18895] +* Improve TimeUnitRounding for edge cases and DST transitions {pull}18589[#18589] + +Expressions:: +* improve date api for expressions/painless fields {pull}18658[#18658] + +Index APIs:: +* Add Shrink request source parser to parse create index request body {pull}18802[#18802] + +Index Templates:: +* Parse and validate mappings on index template creation {pull}8802[#8802] (issue: {issue}2415[#2415]) + +Ingest:: +* Add `ignore_failure` option to all ingest processors {pull}18650[#18650] (issue: {issue}18493[#18493]) +* new ScriptProcessor for Ingest {pull}18193[#18193] + +Internal:: +* Hot methods redux {pull}19016[#19016] (issue: {issue}16725[#16725]) +* Remove forked joda time BaseDateTime class {pull}18953[#18953] +* Support optional ctor args in ConstructingObjectParser {pull}18725[#18725] +* Remove thread pool from page cache recycler {pull}18664[#18664] (issue: {issue}18613[#18613]) + +Java API:: +* Switch QueryBuilders to new MatchPhraseQueryBuilder {pull}18753[#18753] + +Logging:: +* Throw IllegalStateException when handshake fails due to version or cluster mismatch {pull}18676[#18676] + +Mapping:: +* Upgrade `string` fields to `text`/`keyword` even if `include_in_all` is set. {pull}19004[#19004] (issue: {issue}18974[#18974]) + +Network:: +* Exclude admin / diagnostic requests from HTTP request limiting {pull}18833[#18833] (issues: {issue}17951[#17951], {issue}18145[#18145]) +* Do not start scheduled pings until transport start {pull}18702[#18702] + +Packaging:: +* Remove explicit parallel new GC flag {pull}18767[#18767] +* Use JAVA_HOME or java.exe in PATH like the Linux scripts do {pull}18685[#18685] (issue: {issue}4913[#4913]) + +Percolator:: +* Add percolator query extraction support for dismax query {pull}18845[#18845] +* Improve percolate query performance by not verifying certain candidate matches {pull}18696[#18696] +* Improve percolator query term extraction {pull}18610[#18610] + +Plugin Lang Painless:: +* Painless Initializers {pull}19012[#19012] +* Add augmentation {pull}19003[#19003] +* Infer lambda arguments/return type {pull}18983[#18983] +* Fix explicit casts and improve tests. {pull}18958[#18958] +* Add lambda captures {pull}18954[#18954] +* improve Debugger to print code even if it hits exception {pull}18932[#18932] (issue: {issue}1[#1]) +* Move semicolon hack into lexer {pull}18931[#18931] +* Add flag support to regexes {pull}18927[#18927] +* improve lambda syntax (allow single expression) {pull}18924[#18924] +* Remove useless dropArguments in megamorphic cache {pull}18913[#18913] +* non-capturing lambda support {pull}18911[#18911] (issue: {issue}18824[#18824]) +* fix bugs in operators and more improvements for the dynamic case {pull}18899[#18899] +* improve unary operators and cleanup tests {pull}18867[#18867] (issue: {issue}18849[#18849]) +* Add support for the find operator (=~) and the match operator (==~) {pull}18858[#18858] +* Remove casts and boxing for dynamic math {pull}18849[#18849] (issue: {issue}18847[#18847]) +* Refactor def math {pull}18847[#18847] +* Add support for /regex/ {pull}18842[#18842] +* Array constructor references {pull}18831[#18831] +* Method references to user functions {pull}18828[#18828] +* Add } as a delimiter. {pull}18827[#18827] (issue: {issue}18821[#18821]) +* Add Lambda Stub Node {pull}18824[#18824] +* Add capturing method references {pull}18818[#18818] (issue: {issue}18748[#18748]) +* Add Functions to Painless {pull}18810[#18810] +* Add Method to Get New MethodWriters {pull}18771[#18771] +* Static For Each {pull}18757[#18757] +* Method reference support {pull}18748[#18748] (issue: {issue}18578[#18578]) +* Add support for the new Java 9 MethodHandles#arrayLength() factory {pull}18734[#18734] +* Improve painless compile-time exceptions {pull}18711[#18711] (issue: {issue}18600[#18600]) +* add java.time packages to painless whitelist {pull}18621[#18621] +* Add Function Reference Stub to Painless {pull}18578[#18578] + +Plugins:: +* Add did-you-mean for plugin cli {pull}18942[#18942] (issue: {issue}18896[#18896]) +* Plugins: Remove name() and description() from api {pull}18906[#18906] +* Emit nicer error message when trying to install unknown plugin {pull}18876[#18876] (issue: {issue}17226[#17226]) + +Query DSL:: +* Treat zero token in `common` terms query as MatchNoDocsQuery {pull}18656[#18656] +* Handle empty query bodies at parse time and remove EmptyQueryBuilder {pull}17624[#17624] (issues: {issue}17540[#17540], {issue}17541[#17541]) + +REST:: +* Adding status field in _msearch error request bodies {pull}18586[#18586] (issue: {issue}18013[#18013]) + +Recovery:: +* index shard should be able to cancel check index on close. {pull}18839[#18839] (issue: {issue}12011[#12011]) + +Reindex API:: +* Implement ctx.op = "delete" on _update_by_query and _reindex {pull}18614[#18614] (issue: {issue}18043[#18043]) + +Scripting:: +* Compile each Groovy script in its own classloader {pull}18918[#18918] (issue: {issue}18572[#18572]) +* Include script field even if it value is null {pull}18384[#18384] (issue: {issue}16408[#16408]) + +Scroll:: +* Add an index setting to limit the maximum number of slices allowed in a scroll request. {pull}18782[#18782] + +Search:: +* Change default similarity to BM25 {pull}18948[#18948] (issue: {issue}18944[#18944]) +* Add a parameter to cap the number of searches the msearch api will concurrently execute {pull}18721[#18721] + +Sequence IDs:: +* Persist sequence number checkpoints {pull}18949[#18949] (issue: {issue}10708[#10708]) +* Add sequence numbers to cat shards API {pull}18772[#18772] + +Settings:: +* Improve error message if a setting is not found {pull}18920[#18920] (issue: {issue}18663[#18663]) +* Cleanup placeholder replacement {pull}17335[#17335] + +Snapshot/Restore:: +* Adds UUIDs to snapshots {pull}18228[#18228] (issue: {issue}18156[#18156]) +* Clarify the semantics of the BlobContainer interface {pull}18157[#18157] (issue: {issue}15580[#15580]) + +Stats:: +* Add total_indexing_buffer/_in_bytes to nodes info API {pull}18914[#18914] (issue: {issue}18651[#18651]) +* Allow FieldStatsRequest to disable cache {pull}18900[#18900] +* Remove index_writer_max_memory stat from segment stats {pull}18651[#18651] (issues: {issue}14121[#14121], {issue}7440[#7440]) +* Move DocStats under Engine to get more accurate numbers {pull}18587[#18587] + +Task Manager:: +* Fetch result when wait_for_completion {pull}18905[#18905] +* Create get task API that falls back to the .tasks index {pull}18682[#18682] +* Add ability to store results for long running tasks {pull}17928[#17928] + +Translog:: +* Beef up Translog testing with random channel exceptions {pull}18997[#18997] +* Do not replay into translog on local recovery {pull}18547[#18547] + + + +[[bug-5.0.0-alpha4]] +[float] +=== Bug fixes + +Allocation:: +* Fix recovery throttling to properly handle relocating non-primary shards {pull}18701[#18701] (issue: {issue}18640[#18640]) + +CAT API:: +* Fix merge stats rendering in RestIndicesAction {pull}18720[#18720] + +CRUD:: +* Squash a race condition in RefreshListeners {pull}18806[#18806] + +Circuit Breakers:: +* Never trip circuit breaker in liveness request {pull}18627[#18627] (issue: {issue}17951[#17951]) + +Cluster:: +* Fix block checks when no indices are specified {pull}19047[#19047] (issue: {issue}8105[#8105]) +* Acknowledge index deletion requests based on standard cluster state acknowledgment {pull}18602[#18602] (issues: {issue}16442[#16442], {issue}18558[#18558]) + +Core:: +* Throw exception if using a closed transport client {pull}18722[#18722] (issue: {issue}18708[#18708]) + +Dates:: +* Fix invalid rounding value for TimeIntervalRounding close to DST transitions {pull}18800[#18800] +* Fix problem with TimeIntervalRounding on DST end {pull}18780[#18780] + +Expressions:: +* replace ScriptException with a better one {pull}18600[#18600] + +Ingest:: +* Fix ignore_failure behavior in _simulate?verbose and more cleanup {pull}18987[#18987] + +Internal:: +* Fix filtering of node ids for TransportNodesAction {pull}18634[#18634] (issue: {issue}18618[#18618]) + +Mapping:: +* Better error message when mapping configures null {pull}18809[#18809] (issue: {issue}18803[#18803]) +* Process dynamic templates in order. {pull}18638[#18638] (issues: {issue}18625[#18625], {issue}2401[#2401]) + +Packaging:: +* Remove extra bin/ directory in bin folder {pull}18630[#18630] + +Plugin Lang Painless:: +* Fix compound assignment with string concats {pull}18933[#18933] (issue: {issue}18929[#18929]) +* Fix horrible capture {pull}18907[#18907] (issue: {issue}18899[#18899]) +* Fix Casting Bug {pull}18871[#18871] + +Query DSL:: +* Make parsing of bool queries stricter {pull}19052[#19052] (issue: {issue}19034[#19034]) + +REST:: +* Get XContent params from request in Nodes rest actions {pull}18860[#18860] (issue: {issue}18794[#18794]) + +Reindex API:: +* Fix a race condition in reindex's rethrottle {pull}18731[#18731] (issue: {issue}18744[#18744]) + +Search:: +* Require timeout units when parsing query body {pull}19077[#19077] (issue: {issue}19075[#19075]) +* Close SearchContext if query rewrite failed {pull}18727[#18727] + +Settings:: +* Register "cloud.node.auto_attributes" setting in EC2 discovery plugin {pull}18678[#18678] + +Snapshot/Restore:: +* Better handling of an empty shard's segments_N file {pull}18784[#18784] (issue: {issue}18707[#18707]) + +Stats:: +* Fix sync flush total shards statistics {pull}18766[#18766] + +Translog:: +* Fix translog replay multiple operations same doc {pull}18611[#18611] (issues: {issue}18547[#18547], {issue}18623[#18623]) + + + +[[upgrade-5.0.0-alpha4]] +[float] +=== Upgrades + +Core:: +* Upgrade to Lucene 6.1.0. {pull}18926[#18926] +* Upgrade to lucene-6.1.0-snapshot-3a57bea. {pull}18786[#18786] +* Upgrade to Lucene 6.0.1. {pull}18648[#18648] (issues: {issue}17535[#17535], {issue}28[#28]) + +Dates:: +* Upgrade joda-time to 2.9.4 {pull}18609[#18609] (issues: {issue}14524[#14524], {issue}18017[#18017]) + +Packaging:: +* Upgrade JNA to 4.2.2 and remove optionality {pull}19045[#19045] (issue: {issue}13245[#13245]) + +Plugin Discovery EC2:: +* Update aws sdk to 1.10.69 and add use_throttle_retries repository setting {pull}17784[#17784] (issues: {issue}538[#538], {issue}586[#586], {issue}589[#589]) + + + From fa4844c3f47ebc56478ef1bb7913d73a962fe945 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 27 Jun 2016 17:33:01 -0400 Subject: [PATCH 14/43] Pull actions from plugins Instead of implementing onModule(ActionModule) to register actions, this has plugins implement ActionPlugin to declare actions. This is yet another step in cleaning up the plugin infrastructure. While I was in there I switched AutoCreateIndex and DestructiveOperations to be eagerly constructed which makes them easier to use when de-guice-ing the code base. --- .../elasticsearch/action/ActionModule.java | 316 +++++++++--------- .../action/support/AutoCreateIndex.java | 1 - .../action/support/DestructiveOperations.java | 2 - .../client/transport/TransportClient.java | 21 +- .../elasticsearch/cluster/ClusterModule.java | 9 +- .../elasticsearch/common/NamedRegistry.java | 59 ++++ .../indices/analysis/AnalysisModule.java | 47 +-- .../java/org/elasticsearch/node/Node.java | 10 +- .../elasticsearch/plugins/ActionPlugin.java | 87 +++++ .../org/elasticsearch/plugins/Plugin.java | 9 + .../cluster/node/tasks/TestTaskPlugin.java | 14 +- .../cluster/ClusterInfoServiceIT.java | 13 +- .../ContextAndHeaderTransportIT.java | 10 +- .../migration/migrate_5_0/plugins.asciidoc | 3 + .../script/mustache/MustachePlugin.java | 18 +- .../percolator/PercolatorPlugin.java | 23 +- .../index/reindex/ReindexPlugin.java | 20 +- 17 files changed, 419 insertions(+), 243 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/common/NamedRegistry.java create mode 100644 core/src/main/java/org/elasticsearch/plugins/ActionPlugin.java diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index a3a1fbca153..f5c83e272ce 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -64,6 +64,12 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction; import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; @@ -151,18 +157,12 @@ import org.elasticsearch.action.get.TransportMultiGetAction; import org.elasticsearch.action.get.TransportShardMultiGetAction; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.TransportIndexAction; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; -import org.elasticsearch.action.ingest.IngestActionFilter; -import org.elasticsearch.action.ingest.IngestProxyActionFilter; import org.elasticsearch.action.ingest.DeletePipelineAction; import org.elasticsearch.action.ingest.DeletePipelineTransportAction; import org.elasticsearch.action.ingest.GetPipelineAction; import org.elasticsearch.action.ingest.GetPipelineTransportAction; +import org.elasticsearch.action.ingest.IngestActionFilter; +import org.elasticsearch.action.ingest.IngestProxyActionFilter; import org.elasticsearch.action.ingest.PutPipelineAction; import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulatePipelineAction; @@ -189,186 +189,204 @@ import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction import org.elasticsearch.action.termvectors.TransportTermVectorsAction; import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.NamedRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.MapBinder; import org.elasticsearch.common.inject.multibindings.Multibinder; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.ActionPlugin.ActionHandler; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; + /** - * + * Builds and binds the generic action map, all {@link TransportAction}s, and {@link ActionFilters}. */ public class ActionModule extends AbstractModule { - private final Map actions = new HashMap<>(); - private final List> actionFilters = new ArrayList<>(); + private final boolean transportClient; + private final Map> actions; + private final List> actionFilters; + private final AutoCreateIndex autoCreateIndex; + private final DestructiveOperations destructiveOperations; - static class ActionEntry, Response extends ActionResponse> { - public final GenericAction action; - public final Class> transportAction; - public final Class[] supportTransportActions; + public ActionModule(boolean ingestEnabled, boolean transportClient, Settings settings, IndexNameExpressionResolver resolver, + ClusterSettings clusterSettings, List actionPlugins) { + this.transportClient = transportClient; + actions = setupActions(actionPlugins); + actionFilters = setupActionFilters(actionPlugins, ingestEnabled); + autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver); + destructiveOperations = new DestructiveOperations(settings, clusterSettings); + } - ActionEntry(GenericAction action, Class> transportAction, Class... supportTransportActions) { - this.action = action; - this.transportAction = transportAction; - this.supportTransportActions = supportTransportActions; + private Map> setupActions(List actionPlugins) { + // Subclass NamedRegistry for easy registration + class ActionRegistry extends NamedRegistry> { + public ActionRegistry() { + super("action"); + } + + public void register(ActionHandler handler) { + register(handler.getAction().name(), handler); + } + + public , Response extends ActionResponse> void register( + GenericAction action, Class> transportAction, + Class... supportTransportActions) { + register(new ActionHandler<>(action, transportAction, supportTransportActions)); + } } + ActionRegistry actions = new ActionRegistry(); + + actions.register(MainAction.INSTANCE, TransportMainAction.class); + actions.register(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); + actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); + actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); + actions.register(ListTasksAction.INSTANCE, TransportListTasksAction.class); + actions.register(GetTaskAction.INSTANCE, TransportGetTaskAction.class); + actions.register(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class); + + actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); + actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); + actions.register(ClusterStateAction.INSTANCE, TransportClusterStateAction.class); + actions.register(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class); + actions.register(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class); + actions.register(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class); + actions.register(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class); + actions.register(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class); + actions.register(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class); + actions.register(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class); + actions.register(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class); + actions.register(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class); + actions.register(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class); + actions.register(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class); + actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class); + actions.register(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class); + actions.register(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class); + + actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); + actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); + actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); + actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); + actions.register(ShrinkAction.INSTANCE, TransportShrinkAction.class); + actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class); + actions.register(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); + actions.register(GetIndexAction.INSTANCE, TransportGetIndexAction.class); + actions.register(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class); + actions.register(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class); + actions.register(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class); + actions.register(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class); + actions.register(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class); + actions.register(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class, + TransportGetFieldMappingsIndexAction.class); + actions.register(PutMappingAction.INSTANCE, TransportPutMappingAction.class); + actions.register(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class); + actions.register(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class); + actions.register(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class); + actions.register(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class); + actions.register(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class); + actions.register(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class); + actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); + actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class); + actions.register(FlushAction.INSTANCE, TransportFlushAction.class); + actions.register(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); + actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); + actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class); + actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); + actions.register(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class); + actions.register(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class); + actions.register(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class); + actions.register(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class); + actions.register(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class); + + actions.register(IndexAction.INSTANCE, TransportIndexAction.class); + actions.register(GetAction.INSTANCE, TransportGetAction.class); + actions.register(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class); + actions.register(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class, + TransportShardMultiTermsVectorAction.class); + actions.register(DeleteAction.INSTANCE, TransportDeleteAction.class); + actions.register(UpdateAction.INSTANCE, TransportUpdateAction.class); + actions.register(MultiGetAction.INSTANCE, TransportMultiGetAction.class, + TransportShardMultiGetAction.class); + actions.register(BulkAction.INSTANCE, TransportBulkAction.class, + TransportShardBulkAction.class); + actions.register(SearchAction.INSTANCE, TransportSearchAction.class); + actions.register(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); + actions.register(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); + actions.register(ExplainAction.INSTANCE, TransportExplainAction.class); + actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); + actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class); + + //Indexed scripts + actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); + actions.register(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class); + actions.register(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class); + + actions.register(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class); + + actions.register(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class); + actions.register(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class); + actions.register(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class); + actions.register(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class); + + actionPlugins.stream().flatMap(p -> p.getActions().stream()).forEach(actions::register); + + return unmodifiableMap(actions.getRegistry()); } - private final boolean ingestEnabled; - private final boolean proxy; + private List> setupActionFilters(List actionPlugins, boolean ingestEnabled) { + List> filters = new ArrayList<>(); + if (transportClient == false) { + if (ingestEnabled) { + filters.add(IngestActionFilter.class); + } else { + filters.add(IngestProxyActionFilter.class); + } + } - public ActionModule(boolean ingestEnabled, boolean proxy) { - this.ingestEnabled = ingestEnabled; - this.proxy = proxy; - } - - /** - * Registers an action. - * - * @param action The action type. - * @param transportAction The transport action implementing the actual action. - * @param supportTransportActions Any support actions that are needed by the transport action. - * @param The request type. - * @param The response type. - */ - public , Response extends ActionResponse> void registerAction(GenericAction action, Class> transportAction, Class... supportTransportActions) { - actions.put(action.name(), new ActionEntry<>(action, transportAction, supportTransportActions)); - } - - public ActionModule registerFilter(Class actionFilter) { - actionFilters.add(actionFilter); - return this; + for (ActionPlugin plugin : actionPlugins) { + filters.addAll(plugin.getActionFilters()); + } + return unmodifiableList(filters); } @Override protected void configure() { - if (proxy == false) { - if (ingestEnabled) { - registerFilter(IngestActionFilter.class); - } else { - registerFilter(IngestProxyActionFilter.class); - } - } - Multibinder actionFilterMultibinder = Multibinder.newSetBinder(binder(), ActionFilter.class); for (Class actionFilter : actionFilters) { actionFilterMultibinder.addBinding().to(actionFilter); } bind(ActionFilters.class).asEagerSingleton(); - bind(AutoCreateIndex.class).asEagerSingleton(); - bind(DestructiveOperations.class).asEagerSingleton(); - registerAction(MainAction.INSTANCE, TransportMainAction.class); - registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); - registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); - registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); - registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class); - registerAction(GetTaskAction.INSTANCE, TransportGetTaskAction.class); - registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class); - - registerAction(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); - registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); - registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class); - registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class); - registerAction(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class); - registerAction(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class); - registerAction(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class); - registerAction(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class); - registerAction(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class); - registerAction(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class); - registerAction(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class); - registerAction(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class); - registerAction(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class); - registerAction(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class); - registerAction(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class); - registerAction(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class); - registerAction(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class); - - registerAction(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); - registerAction(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); - registerAction(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); - registerAction(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); - registerAction(ShrinkAction.INSTANCE, TransportShrinkAction.class); - registerAction(RolloverAction.INSTANCE, TransportRolloverAction.class); - registerAction(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); - registerAction(GetIndexAction.INSTANCE, TransportGetIndexAction.class); - registerAction(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class); - registerAction(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class); - registerAction(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class); - registerAction(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class); - registerAction(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class); - registerAction(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class, TransportGetFieldMappingsIndexAction.class); - registerAction(PutMappingAction.INSTANCE, TransportPutMappingAction.class); - registerAction(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class); - registerAction(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class); - registerAction(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class); - registerAction(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class); - registerAction(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class); - registerAction(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class); - registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); - registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class); - registerAction(FlushAction.INSTANCE, TransportFlushAction.class); - registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); - registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); - registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class); - registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); - registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class); - registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class); - registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class); - registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class); - registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class); - - registerAction(IndexAction.INSTANCE, TransportIndexAction.class); - registerAction(GetAction.INSTANCE, TransportGetAction.class); - registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class); - registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class, - TransportShardMultiTermsVectorAction.class); - registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class); - registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class); - registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class, - TransportShardMultiGetAction.class); - registerAction(BulkAction.INSTANCE, TransportBulkAction.class, - TransportShardBulkAction.class); - registerAction(SearchAction.INSTANCE, TransportSearchAction.class); - registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); - registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); - registerAction(ExplainAction.INSTANCE, TransportExplainAction.class); - registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); - registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class); - - //Indexed scripts - registerAction(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); - registerAction(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class); - registerAction(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class); - - registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class); - - registerAction(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class); - registerAction(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class); - registerAction(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class); - registerAction(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class); + bind(DestructiveOperations.class).toInstance(destructiveOperations); // register Name -> GenericAction Map that can be injected to instances. + @SuppressWarnings("rawtypes") MapBinder actionsBinder = MapBinder.newMapBinder(binder(), String.class, GenericAction.class); - for (Map.Entry entry : actions.entrySet()) { - actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().action); + for (Map.Entry> entry : actions.entrySet()) { + actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().getAction()); } // register GenericAction -> transportAction Map that can be injected to instances. // also register any supporting classes - if (!proxy) { + if (false == transportClient) { + bind(AutoCreateIndex.class).toInstance(autoCreateIndex); bind(TransportLivenessAction.class).asEagerSingleton(); + @SuppressWarnings("rawtypes") MapBinder transportActionsBinder = MapBinder.newMapBinder(binder(), GenericAction.class, TransportAction.class); - for (Map.Entry entry : actions.entrySet()) { + for (ActionHandler action : actions.values()) { // bind the action as eager singleton, so the map binder one will reuse it - bind(entry.getValue().transportAction).asEagerSingleton(); - transportActionsBinder.addBinding(entry.getValue().action).to(entry.getValue().transportAction).asEagerSingleton(); - for (Class supportAction : entry.getValue().supportTransportActions) { + bind(action.getTransportAction()).asEagerSingleton(); + transportActionsBinder.addBinding(action.getAction()).to(action.getTransportAction()).asEagerSingleton(); + for (Class supportAction : action.getSupportTransportActions()) { bind(supportAction).asEagerSingleton(); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java index 339abcb22bc..d4ddae78225 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java +++ b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java @@ -47,7 +47,6 @@ public final class AutoCreateIndex { private final IndexNameExpressionResolver resolver; private final AutoCreate autoCreate; - @Inject public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) { this.resolver = resolver; dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings); diff --git a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 31fc1d06175..56d5bf206f3 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.support; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -38,7 +37,6 @@ public final class DestructiveOperations extends AbstractComponent { Setting.boolSetting("action.destructive_requires_name", false, Property.Dynamic, Property.NodeScope); private volatile boolean destructiveRequiresName; - @Inject public DestructiveOperations(Settings settings, ClusterSettings clusterSettings) { super(settings); destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings); diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index f36fd199292..5786f6b1cfb 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.SearchModule; @@ -125,6 +126,15 @@ public class TransportClient extends AbstractClient { final NetworkService networkService = new NetworkService(settings); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(); try { + final List> additionalSettings = new ArrayList<>(); + final List additionalSettingsFilter = new ArrayList<>(); + additionalSettings.addAll(pluginsService.getPluginSettings()); + additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter()); + for (final ExecutorBuilder builder : threadPool.builders()) { + additionalSettings.addAll(builder.getRegisteredSettings()); + } + SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter); + ModulesBuilder modules = new ModulesBuilder(); // plugin modules must be added here, before others or we can get crazy injection errors... for (Module pluginModule : pluginsService.nodeModules()) { @@ -138,17 +148,10 @@ public class TransportClient extends AbstractClient { // noop } }); - modules.add(new ActionModule(false, true)); + modules.add(new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(), + pluginsService.filterPlugins(ActionPlugin.class))); pluginsService.processModules(modules); - final List> additionalSettings = new ArrayList<>(); - final List additionalSettingsFilter = new ArrayList<>(); - additionalSettings.addAll(pluginsService.getPluginSettings()); - additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter()); - for (final ExecutorBuilder builder : threadPool.builders()) { - additionalSettings.addAll(builder.getRegisteredSettings()); - } - SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter); CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(), settingsModule.getClusterSettings()); resourcesToClose.add(circuitBreakerService); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index cc11b58727d..47dcb6170bc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.metadata.MetaDataMappingService; import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService; import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.routing.DelayedAllocationService; -import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -101,6 +100,7 @@ public class ClusterModule extends AbstractModule { private final ExtensionPoint.ClassSet allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class); private final ExtensionPoint.ClassSet indexTemplateFilters = new ExtensionPoint.ClassSet<>("index_template_filter", IndexTemplateFilter.class); private final ClusterService clusterService; + private final IndexNameExpressionResolver indexNameExpressionResolver; // pkg private so tests can mock Class clusterInfoServiceImpl = InternalClusterInfoService.class; @@ -113,6 +113,7 @@ public class ClusterModule extends AbstractModule { registerShardsAllocator(ClusterModule.BALANCED_ALLOCATOR, BalancedShardsAllocator.class); registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class); this.clusterService = clusterService; + indexNameExpressionResolver = new IndexNameExpressionResolver(settings); } public void registerAllocationDecider(Class allocationDecider) { @@ -127,6 +128,10 @@ public class ClusterModule extends AbstractModule { indexTemplateFilters.registerExtension(indexTemplateFilter); } + public IndexNameExpressionResolver getIndexNameExpressionResolver() { + return indexNameExpressionResolver; + } + @Override protected void configure() { // bind ShardsAllocator @@ -151,7 +156,7 @@ public class ClusterModule extends AbstractModule { bind(MetaDataIndexAliasesService.class).asEagerSingleton(); bind(MetaDataUpdateSettingsService.class).asEagerSingleton(); bind(MetaDataIndexTemplateService.class).asEagerSingleton(); - bind(IndexNameExpressionResolver.class).asEagerSingleton(); + bind(IndexNameExpressionResolver.class).toInstance(indexNameExpressionResolver); bind(RoutingService.class).asEagerSingleton(); bind(DelayedAllocationService.class).asEagerSingleton(); bind(ShardStateAction.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/common/NamedRegistry.java b/core/src/main/java/org/elasticsearch/common/NamedRegistry.java new file mode 100644 index 00000000000..7573b5268fd --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/NamedRegistry.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static java.util.Objects.requireNonNull; + +/** + * A registry from String to some class implementation. Used to ensure implementations are registered only once. + */ +public class NamedRegistry { + private final Map registry = new HashMap<>(); + private final String targetName; + + public NamedRegistry(String targetName) { + this.targetName = targetName; + } + + public Map getRegistry() { + return registry; + } + + public void register(String name, T t) { + requireNonNull(name, "name is required"); + requireNonNull(t, targetName + " is required"); + if (registry.putIfAbsent(name, t) != null) { + throw new IllegalArgumentException(targetName + " for name " + name + " already registered"); + } + } + + public

void extractAndRegister(List

plugins, Function> lookup) { + for (P plugin : plugins) { + for (Map.Entry entry : lookup.apply(plugin).entrySet()) { + register(entry.getKey(), entry.getValue()); + } + } + } +} diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 12d0b8bbb6e..52647c8b8ff 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.analysis; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.NamedRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -143,12 +144,7 @@ import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFil import org.elasticsearch.plugins.AnalysisPlugin; import java.io.IOException; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.function.Function; - -import static java.util.Objects.requireNonNull; /** * Sets up {@link AnalysisRegistry}. @@ -170,12 +166,12 @@ public final class AnalysisModule { public AnalysisModule(Environment environment, List plugins) throws IOException { NamedRegistry> charFilters = setupCharFilters(plugins); NamedRegistry hunspellDictionaries = setupHunspellDictionaries(plugins); - hunspellService = new HunspellService(environment.settings(), environment, hunspellDictionaries.registry); + hunspellService = new HunspellService(environment.settings(), environment, hunspellDictionaries.getRegistry()); NamedRegistry> tokenFilters = setupTokenFilters(plugins, hunspellService); NamedRegistry> tokenizers = setupTokenizers(plugins); NamedRegistry>> analyzers = setupAnalyzers(plugins); - analysisRegistry = new AnalysisRegistry(environment, charFilters.registry, tokenFilters.registry, - tokenizers.registry, analyzers.registry); + analysisRegistry = new AnalysisRegistry(environment, charFilters.getRegistry(), tokenFilters.getRegistry(), + tokenizers.getRegistry(), analyzers.getRegistry()); } HunspellService getHunspellService() { @@ -191,13 +187,13 @@ public final class AnalysisModule { charFilters.register("html_strip", HtmlStripCharFilterFactory::new); charFilters.register("pattern_replace", requriesAnalysisSettings(PatternReplaceCharFilterFactory::new)); charFilters.register("mapping", requriesAnalysisSettings(MappingCharFilterFactory::new)); - charFilters.registerPlugins(plugins, AnalysisPlugin::getCharFilters); + charFilters.extractAndRegister(plugins, AnalysisPlugin::getCharFilters); return charFilters; } public NamedRegistry setupHunspellDictionaries(List plugins) { NamedRegistry hunspellDictionaries = new NamedRegistry<>("dictionary"); - hunspellDictionaries.registerPlugins(plugins, AnalysisPlugin::getHunspellDictionaries); + hunspellDictionaries.extractAndRegister(plugins, AnalysisPlugin::getHunspellDictionaries); return hunspellDictionaries; } @@ -262,7 +258,7 @@ public final class AnalysisModule { tokenFilters.register("classic", ClassicFilterFactory::new); tokenFilters.register("decimal_digit", DecimalDigitFilterFactory::new); tokenFilters.register("fingerprint", FingerprintTokenFilterFactory::new); - tokenFilters.registerPlugins(plugins, AnalysisPlugin::getTokenFilters); + tokenFilters.extractAndRegister(plugins, AnalysisPlugin::getTokenFilters); return tokenFilters; } @@ -283,7 +279,7 @@ public final class AnalysisModule { tokenizers.register("pattern", PatternTokenizerFactory::new); tokenizers.register("classic", ClassicTokenizerFactory::new); tokenizers.register("thai", ThaiTokenizerFactory::new); - tokenizers.registerPlugins(plugins, AnalysisPlugin::getTokenizers); + tokenizers.extractAndRegister(plugins, AnalysisPlugin::getTokenizers); return tokenizers; } @@ -333,7 +329,7 @@ public final class AnalysisModule { analyzers.register("turkish", TurkishAnalyzerProvider::new); analyzers.register("thai", ThaiAnalyzerProvider::new); analyzers.register("fingerprint", FingerprintAnalyzerProvider::new); - analyzers.registerPlugins(plugins, AnalysisPlugin::getAnalyzers); + analyzers.extractAndRegister(plugins, AnalysisPlugin::getAnalyzers); return analyzers; } @@ -392,29 +388,4 @@ public final class AnalysisModule { return false; } } - - private static class NamedRegistry { - private final Map registry = new HashMap<>(); - private final String targetName; - - public NamedRegistry(String targetName) { - this.targetName = targetName; - } - - private void register(String name, T t) { - requireNonNull(name, "name is required"); - requireNonNull(t, targetName + " is required"); - if (registry.putIfAbsent(name, t) != null) { - throw new IllegalArgumentException(targetName + " for name " + name + " already registered"); - } - } - - private

void registerPlugins(List

plugins, Function> lookup) { - for (P plugin : plugins) { - for (Map.Entry entry : lookup.apply(plugin).entrySet()) { - register(entry.getKey(), entry.getValue()); - } - } - } - } } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index f30fc9005b7..319cb14b110 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -86,8 +86,9 @@ import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.AnalysisPlugin; +import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ScriptPlugin; @@ -258,10 +259,13 @@ public class Node implements Closeable { modules.add(new NodeModule(this, monitorService)); modules.add(new NetworkModule(networkService, settings, false, namedWriteableRegistry)); modules.add(new DiscoveryModule(this.settings)); - modules.add(new ClusterModule(this.settings, clusterService)); + ClusterModule clusterModule = new ClusterModule(settings, clusterService); + modules.add(clusterModule); modules.add(new IndicesModule(namedWriteableRegistry, pluginsService.filterPlugins(MapperPlugin.class))); modules.add(new SearchModule(settings, namedWriteableRegistry)); - modules.add(new ActionModule(DiscoveryNode.isIngestNode(settings), false)); + modules.add(new ActionModule(DiscoveryNode.isIngestNode(settings), false, settings, + clusterModule.getIndexNameExpressionResolver(), settingsModule.getClusterSettings(), + pluginsService.filterPlugins(ActionPlugin.class))); modules.add(new GatewayModule()); modules.add(new RepositoriesModule()); pluginsService.processModules(modules); diff --git a/core/src/main/java/org/elasticsearch/plugins/ActionPlugin.java b/core/src/main/java/org/elasticsearch/plugins/ActionPlugin.java new file mode 100644 index 00000000000..0d4ee16ab89 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/ActionPlugin.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.action.support.TransportActions; + +import java.util.List; + +import static java.util.Collections.emptyList; + +/** + * An additional extension point for {@link Plugin}s that extends Elasticsearch's scripting functionality. Implement it like this: + *

{@code
+ *   {@literal @}Override
+ *   public List> getActions() {
+ *       return Arrays.asList(new ActionHandler<>(ReindexAction.INSTANCE, TransportReindexAction.class),
+ *               new ActionHandler<>(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class),
+ *               new ActionHandler<>(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class),
+ *               new ActionHandler<>(RethrottleAction.INSTANCE, TransportRethrottleAction.class));
+ *   }
+ * }
+ */ +public interface ActionPlugin { + /** + * Actions added by this plugin. + */ + default List, ? extends ActionResponse>> getActions() { + return emptyList(); + } + /** + * Action filters added by this plugin. + */ + default List> getActionFilters() { + return emptyList(); + } + + public static final class ActionHandler, Response extends ActionResponse> { + private final GenericAction action; + private final Class> transportAction; + private final Class[] supportTransportActions; + + /** + * Create a record of an action, the {@linkplain TransportAction} that handles it, and any supporting {@linkplain TransportActions} + * that are needed by that {@linkplain TransportAction}. + */ + public ActionHandler(GenericAction action, Class> transportAction, + Class... supportTransportActions) { + this.action = action; + this.transportAction = transportAction; + this.supportTransportActions = supportTransportActions; + } + + public GenericAction getAction() { + return action; + } + + public Class> getTransportAction() { + return transportAction; + } + + public Class[] getSupportTransportActions() { + return supportTransportActions; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/plugins/Plugin.java b/core/src/main/java/org/elasticsearch/plugins/Plugin.java index 08a8ce7124c..2b8f59d15f3 100644 --- a/core/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -19,6 +19,7 @@ package org.elasticsearch.plugins; +import org.elasticsearch.action.ActionModule; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Setting; @@ -112,6 +113,14 @@ public abstract class Plugin { @Deprecated public final void onModule(AnalysisModule module) {} + /** + * Old-style action extension point. + * + * @deprecated implement {@link ActionPlugin} instead + */ + @Deprecated + public final void onModule(ActionModule module) {} + /** * Provides the list of this plugin's custom thread pools, empty if * none. diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 812691f064b..122ae910e7f 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -20,8 +20,9 @@ package org.elasticsearch.action.admin.cluster.node.tasks; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionModule; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; @@ -46,6 +47,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -55,6 +57,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -64,11 +67,12 @@ import static org.elasticsearch.test.ESTestCase.awaitBusy; /** * A plugin that adds a cancellable blocking test task of integration testing of the task manager. */ -public class TestTaskPlugin extends Plugin { +public class TestTaskPlugin extends Plugin implements ActionPlugin { - public void onModule(ActionModule module) { - module.registerAction(TestTaskAction.INSTANCE, TransportTestTaskAction.class); - module.registerAction(UnblockTestTasksAction.INSTANCE, TransportUnblockTestTasksAction.class); + @Override + public List, ? extends ActionResponse>> getActions() { + return Arrays.asList(new ActionHandler<>(TestTaskAction.INSTANCE, TransportTestTaskAction.class), + new ActionHandler<>(UnblockTestTasksAction.INSTANCE, TransportUnblockTestTasksAction.class)); } static class TestTask extends CancellableTask { diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 27b9192006d..f0e45ff37bb 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; @@ -31,7 +30,6 @@ import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; @@ -44,6 +42,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -56,11 +55,13 @@ import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collection; +import java.util.List; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.emptySet; +import static java.util.Collections.singletonList; import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -74,10 +75,10 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class ClusterInfoServiceIT extends ESIntegTestCase { - public static class TestPlugin extends Plugin { - - public void onModule(ActionModule module) { - module.registerFilter(BlockingActionFilter.class); + public static class TestPlugin extends Plugin implements ActionPlugin { + @Override + public List> getActionFilters() { + return singletonList(BlockingActionFilter.class); } } diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index a5e2efb9f38..e9ec3787afc 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.transport; import org.apache.http.message.BasicHeader; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -45,6 +44,7 @@ import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.indices.TermsLookup; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.test.ESIntegTestCase; @@ -61,6 +61,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; +import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; @@ -284,15 +285,16 @@ public class ContextAndHeaderTransportIT extends ESIntegTestCase { return internalCluster().transportClient().filterWithHeader(Collections.singletonMap(randomHeaderKey, randomHeaderValue)); } - public static class ActionLoggingPlugin extends Plugin { + public static class ActionLoggingPlugin extends Plugin implements ActionPlugin { @Override public Collection nodeModules() { return Collections.singletonList(new ActionLoggingModule()); } - public void onModule(ActionModule module) { - module.registerFilter(LoggingFilter.class); + @Override + public List> getActionFilters() { + return singletonList(LoggingFilter.class); } } diff --git a/docs/reference/migration/migrate_5_0/plugins.asciidoc b/docs/reference/migration/migrate_5_0/plugins.asciidoc index 8d35098deb9..e1e8e6c614b 100644 --- a/docs/reference/migration/migrate_5_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_5_0/plugins.asciidoc @@ -133,4 +133,7 @@ Plugins that register custom analysis components should implement Plugins that register custom mappers should implement `MapperPlugin` and remove their `onModule(IndicesModule)` implementation. +==== ActionPlugin +Plugins that register custom actions should implement `ActionPlugin` and +remove their `onModule(ActionModule)` implementation. diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java index 42f0da3c109..50bae57887c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java @@ -19,13 +19,15 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.action.ActionModule; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.template.MultiSearchTemplateAction; import org.elasticsearch.action.search.template.SearchTemplateAction; import org.elasticsearch.action.search.template.TransportMultiSearchTemplateAction; import org.elasticsearch.action.search.template.TransportSearchTemplateAction; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.rest.action.search.template.RestDeleteSearchTemplateAction; @@ -34,20 +36,22 @@ import org.elasticsearch.rest.action.search.template.RestMultiSearchTemplateActi import org.elasticsearch.rest.action.search.template.RestPutSearchTemplateAction; import org.elasticsearch.rest.action.search.template.RestRenderSearchTemplateAction; import org.elasticsearch.rest.action.search.template.RestSearchTemplateAction; -import org.elasticsearch.script.ScriptEngineRegistry; import org.elasticsearch.script.ScriptEngineService; -import org.elasticsearch.script.ScriptModule; -public class MustachePlugin extends Plugin implements ScriptPlugin { +import java.util.Arrays; +import java.util.List; + +public class MustachePlugin extends Plugin implements ScriptPlugin, ActionPlugin { @Override public ScriptEngineService getScriptEngineService(Settings settings) { return new MustacheScriptEngineService(settings); } - public void onModule(ActionModule module) { - module.registerAction(SearchTemplateAction.INSTANCE, TransportSearchTemplateAction.class); - module.registerAction(MultiSearchTemplateAction.INSTANCE, TransportMultiSearchTemplateAction.class); + @Override + public List, ? extends ActionResponse>> getActions() { + return Arrays.asList(new ActionHandler<>(SearchTemplateAction.INSTANCE, TransportSearchTemplateAction.class), + new ActionHandler<>(MultiSearchTemplateAction.INSTANCE, TransportMultiSearchTemplateAction.class)); } public void onModule(NetworkModule module) { diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java index 7cbf07e1981..87fc6726c56 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java @@ -19,23 +19,25 @@ package org.elasticsearch.percolator; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import org.elasticsearch.action.ActionModule; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchModule; -public class PercolatorPlugin extends Plugin implements MapperPlugin { +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class PercolatorPlugin extends Plugin implements MapperPlugin, ActionPlugin { public static final String NAME = "percolator"; @@ -47,9 +49,10 @@ public class PercolatorPlugin extends Plugin implements MapperPlugin { this.settings = settings; } - public void onModule(ActionModule module) { - module.registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class); - module.registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class); + @Override + public List, ? extends ActionResponse>> getActions() { + return Arrays.asList(new ActionHandler<>(PercolateAction.INSTANCE, TransportPercolateAction.class), + new ActionHandler<>(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class)); } public void onModule(NetworkModule module) { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java index d4c81177571..89e6c1cc753 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java @@ -19,18 +19,24 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.ActionModule; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -public class ReindexPlugin extends Plugin { +import java.util.Arrays; +import java.util.List; + +public class ReindexPlugin extends Plugin implements ActionPlugin { public static final String NAME = "reindex"; - public void onModule(ActionModule actionModule) { - actionModule.registerAction(ReindexAction.INSTANCE, TransportReindexAction.class); - actionModule.registerAction(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class); - actionModule.registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class); - actionModule.registerAction(RethrottleAction.INSTANCE, TransportRethrottleAction.class); + @Override + public List, ? extends ActionResponse>> getActions() { + return Arrays.asList(new ActionHandler<>(ReindexAction.INSTANCE, TransportReindexAction.class), + new ActionHandler<>(UpdateByQueryAction.INSTANCE, TransportUpdateByQueryAction.class), + new ActionHandler<>(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class), + new ActionHandler<>(RethrottleAction.INSTANCE, TransportRethrottleAction.class)); } public void onModule(NetworkModule networkModule) { From ef0e3db0de6928ddca70d18e929223980f283fe1 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 28 Jun 2016 12:16:17 +0200 Subject: [PATCH 15/43] Validates new dynamic settings from the current state Thanks to https://github.com/elastic/elasticsearch/pull/19088 the settings are now validated against dynamic updaters on the master. Though only the new settings are applied to the IndexService created for the validation. Because of this we cannot check the transition from one value to another in a dynamic updaters. This change creates the IndexService from the current settings and validates that the new dynamic settings can replace the current settings. This change also removes the validation of dynamic settings when an index is opened. The validation should have occurred when the settings have been updated. --- .../cluster/metadata/MetaDataIndexStateService.java | 2 +- .../metadata/MetaDataUpdateSettingsService.java | 10 +++++++--- .../main/java/org/elasticsearch/gateway/Gateway.java | 2 +- .../org/elasticsearch/indices/IndicesService.java | 12 ++++++++---- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index e6e7084e4d9..53a0ede809a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -172,7 +172,7 @@ public class MetaDataIndexStateService extends AbstractComponent { // We need to check that this index can be upgraded to the current version indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData); try { - indicesService.verifyIndexMetadata(nodeServiceProvider, indexMetaData); + indicesService.verifyIndexMetadata(nodeServiceProvider, indexMetaData, indexMetaData); } catch (Exception e) { throw new ElasticsearchException("Failed to verify index " + indexMetaData.getIndex(), e); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 54f6ad0705a..8bf7b5edd94 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -275,13 +275,17 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build(); try { for (Index index : openIndices) { - indicesService.verifyIndexMetadata(nodeServiceProvider, updatedState.getMetaData().getIndexSafe(index)); + final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index); + final IndexMetaData updatedMetaData = updatedState.metaData().getIndexSafe(index); + indicesService.verifyIndexMetadata(nodeServiceProvider, currentMetaData, updatedMetaData); } for (Index index : closeIndices) { - indicesService.verifyIndexMetadata(nodeServiceProvider, updatedState.getMetaData().getIndexSafe(index)); + final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index); + final IndexMetaData updatedMetaData = updatedState.metaData().getIndexSafe(index); + indicesService.verifyIndexMetadata(nodeServiceProvider, currentMetaData, updatedMetaData); } } catch (IOException ex) { - ExceptionsHelper.convertToElastic(ex); + throw ExceptionsHelper.convertToElastic(ex); } return updatedState; } diff --git a/core/src/main/java/org/elasticsearch/gateway/Gateway.java b/core/src/main/java/org/elasticsearch/gateway/Gateway.java index af86633e235..d83a738aedf 100644 --- a/core/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/core/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -141,7 +141,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { try { if (electedIndexMetaData.getState() == IndexMetaData.State.OPEN) { // verify that we can actually create this index - if not we recover it as closed with lots of warn logs - indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData); + indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData, electedIndexMetaData); } } catch (Exception e) { logger.warn("recovering index {} failed - recovering as closed", e, electedIndexMetaData.getIndex()); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 89cfad98c4f..b8b9b827e4b 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -412,10 +412,12 @@ public class IndicesService extends AbstractLifecycleComponent } /** - * This method verifies that the given {@link IndexMetaData} holds sane values to create an {@link IndexService}. This method will throw an - * exception if the creation fails. The created {@link IndexService} will not be registered and will be closed immediately. + * This method verifies that the given {@code metaData} holds sane values to create an {@link IndexService}. + * This method tries to update the meta data of the created {@link IndexService} if the given {@code metaDataUpdate} is different from the given {@code metaData}. + * This method will throw an exception if the creation or the update fails. + * The created {@link IndexService} will not be registered and will be closed immediately. */ - public synchronized void verifyIndexMetadata(final NodeServicesProvider nodeServicesProvider, IndexMetaData metaData) throws IOException { + public synchronized void verifyIndexMetadata(final NodeServicesProvider nodeServicesProvider, IndexMetaData metaData, IndexMetaData metaDataUpdate) throws IOException { final List closeables = new ArrayList<>(); try { IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {}); @@ -431,7 +433,9 @@ public class IndicesService extends AbstractLifecycleComponent service.mapperService().merge(typeMapping.value.type(), typeMapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true); } - service.getIndexSettings().getScopedSettings().validateUpdate(metaData.getSettings()); + if (metaData.equals(metaDataUpdate) == false) { + service.updateMetaData(metaDataUpdate); + } } finally { IOUtils.close(closeables); } From 6d069078d34cbc854a24c913e340485dc652d347 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Tue, 28 Jun 2016 16:14:57 +0200 Subject: [PATCH 16/43] Fixed tests that assumed that broken settings can be updated --- .../elasticsearch/gateway/GatewayIndexStateIT.java | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index d5dee3c1bdc..04810e6ebfd 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -446,13 +446,6 @@ public class GatewayIndexStateIT extends ESIntegTestCase { assertNotNull(ex.getCause()); assertEquals(IllegalArgumentException.class, ex.getCause().getClass()); assertEquals(ex.getCause().getMessage(), "Unknown tokenfilter type [icu_collation] for [myCollator]"); - - client().admin().indices().prepareUpdateSettings() - .setSettings(Settings.builder().putNull("index.analysis.filter.myCollator.type")).get(); - client().admin().indices().prepareOpen("test").get(); - ensureYellow(); - logger.info("--> verify 1 doc in the index"); - assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } /** @@ -510,13 +503,6 @@ public class GatewayIndexStateIT extends ESIntegTestCase { assertNotNull(ex.getCause()); assertEquals(MapperParsingException.class, ex.getCause().getClass()); assertEquals(ex.getCause().getMessage(), "analyzer [test] not found for field [field1]"); - - client().admin().indices().prepareUpdateSettings() - .setSettings(Settings.builder().put("index.analysis.analyzer.test.tokenizer", "keyword")).get(); - client().admin().indices().prepareOpen("test").get(); - ensureYellow(); - logger.info("--> verify 1 doc in the index"); - assertHitCount(client().prepareSearch().setQuery(matchQuery("field1", "value one")).get(), 1L); } public void testArchiveBrokenClusterSettings() throws Exception { From 2512594d9ec6a979b525038ba60ffdeb8af0eae8 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 28 Jun 2016 16:38:56 +0200 Subject: [PATCH 17/43] Testing infra - stablize data folder usage and clean up (#19111) The plan for persistent node ids ( #17811 ) is to tie the node identity to a file stored in it's data folders. As such it becomes important that nodes in our testing infra have better affinity with their data folders and that their data folders are not cleaned underneath them. The first is important because we fix the random seed used for node id generation (for reproducibility) and allowing the same node to use two different data folders causes two separate nodes to have the same id, which prevents the cluster from forming. The second is important, for example, where a full cluster restart / single node restart need to maintain node identity and wiping the data folders at the wrong moment prevents this. Concretely this commit does the following: 1) Remove previous attempts to have data folder per role using a prefix. This wasn't effective as it was using the data paths settings which are only used for part of the runs. An attempt to completely separate the paths via the home dir failed due to assumptions made by index custom path about node data folder ordinal uniqueness (see #19076) 2) Change full cluster restarts to start up nodes in the same order their were first created in, only randomly swapping nodes with the same roles. 3) Change test cluster reset methods to first shutdown the unneeded nodes and then re-start the shared nodes that were shut down, so they'll reclaim their data folders. 4) Improve data folder wiping logic and make sure it wipes only folders of "offline" nodes. 5) Add some very basic tests --- .../org/elasticsearch/gateway/Gateway.java | 11 - .../java/org/elasticsearch/node/Node.java | 10 +- .../discovery/zen/ZenDiscoveryIT.java | 2 +- .../gateway/GatewayIndexStateIT.java | 9 +- .../test/InternalTestCluster.java | 274 ++++++++++-------- .../test/test/InternalTestClusterTests.java | 152 ++++++++++ 6 files changed, 314 insertions(+), 144 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/gateway/Gateway.java b/core/src/main/java/org/elasticsearch/gateway/Gateway.java index d83a738aedf..919db87d386 100644 --- a/core/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/core/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -21,7 +21,6 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -38,7 +37,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.indices.IndicesService; -import java.nio.file.Path; import java.util.Arrays; import java.util.function.Supplier; @@ -159,15 +157,6 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { builder.metaData(metaDataBuilder); listener.onSuccess(builder.build()); } - public void reset() throws Exception { - try { - Path[] dataPaths = nodeEnv.nodeDataPaths(); - logger.trace("removing node data paths: [{}]", (Object)dataPaths); - IOUtils.rm(dataPaths); - } catch (Exception ex) { - logger.debug("failed to delete shard locations", ex); - } - } @Override public void clusterChanged(final ClusterChangedEvent event) { diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 319cb14b110..675d29e9991 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -163,6 +163,7 @@ public class Node implements Closeable { private final Injector injector; private final Settings settings; private final Environment environment; + private final NodeEnvironment nodeEnvironment; private final PluginsService pluginsService; private final Client client; @@ -236,7 +237,6 @@ public class Node implements Closeable { // this is as early as we can validate settings at this point. we already pass them to ScriptModule as well as ThreadPool // so we might be late here already final SettingsModule settingsModule = new SettingsModule(this.settings, additionalSettings, additionalSettingsFilter); - final NodeEnvironment nodeEnvironment; try { nodeEnvironment = new NodeEnvironment(this.settings, this.environment); resourcesToClose.add(nodeEnvironment); @@ -324,6 +324,14 @@ public class Node implements Closeable { return environment; } + /** + * Returns the {@link NodeEnvironment} instance of this node + */ + public NodeEnvironment getNodeEnvironment() { + return nodeEnvironment; + } + + /** * Start the node. If the node is already started, this method is no-op. */ diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index c5f92e62de0..fd0b11eae01 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -297,7 +297,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { Settings nodeSettings = Settings.builder() .put("discovery.type", "zen") // <-- To override the local setting if set externally .build(); - String nodeName = internalCluster().startNode(nodeSettings, Version.CURRENT); + String nodeName = internalCluster().startNode(nodeSettings); ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName); ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0), diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 04810e6ebfd..049fd90cad6 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -298,15 +298,10 @@ public class GatewayIndexStateIT extends ESIntegTestCase { assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); logger.info("--> restarting the nodes"); - final Gateway gateway1 = internalCluster().getInstance(GatewayService.class, node_1).getGateway(); internalCluster().fullRestart(new RestartCallback() { @Override - public Settings onNodeStopped(String nodeName) throws Exception { - if (node_1.equals(nodeName)) { - logger.info("--> deleting the data for the first node"); - gateway1.reset(); - } - return null; + public boolean clearData(String nodeName) { + return node_1.equals(nodeName); } }); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 9a00fbc4557..d2014ebbd6e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -28,6 +28,7 @@ import org.apache.lucene.store.StoreRateLimiting; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -38,6 +39,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNode.Role; import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; @@ -367,15 +369,6 @@ public final class InternalTestCluster extends TestCluster { private Settings getSettings(int nodeOrdinal, long nodeSeed, Settings others) { Builder builder = Settings.builder().put(defaultSettings) .put(getRandomNodeSettings(nodeSeed)); - Settings interimSettings = builder.build(); - final String dataSuffix = getRoleSuffix(interimSettings); - if (dataSuffix.isEmpty() == false) { - // to make sure that a master node will not pick up on the data folder of a data only node - // once restarted we append the role suffix to each path. - String[] dataPath = Environment.PATH_DATA_SETTING.get(interimSettings).stream() - .map(path -> path + dataSuffix).toArray(String[]::new); - builder.putArray(Environment.PATH_DATA_SETTING.getKey(), dataPath); - } Settings settings = nodeConfigurationSource.nodeSettings(nodeOrdinal); if (settings != null) { if (settings.get(ClusterName.CLUSTER_NAME_SETTING.getKey()) != null) { @@ -498,7 +491,7 @@ public final class InternalTestCluster extends TestCluster { return randomNodeAndClient; } NodeAndClient buildNode = buildNode(); - buildNode.node().start(); + buildNode.startNode(); publishNode(buildNode); return buildNode; } @@ -569,7 +562,7 @@ public final class InternalTestCluster extends TestCluster { n == 0 ? nodes.values().stream() : nodes.values().stream().filter(new DataNodePredicate().and(new MasterNodePredicate(getMasterName()).negate())); final Iterator values = collection.iterator(); - logger.info("changing cluster size from {} to {}, {} data nodes", size(), n + numSharedCoordOnlyNodes, n); + logger.info("changing cluster size from {} data nodes to {}", size, n); Set nodesToRemove = new HashSet<>(); int numNodesAndClients = 0; while (values.hasNext() && numNodesAndClients++ < size - n) { @@ -586,17 +579,17 @@ public final class InternalTestCluster extends TestCluster { } } - private NodeAndClient buildNode(Settings settings, Version version) { + private NodeAndClient buildNode(Settings settings) { int ord = nextNodeId.getAndIncrement(); - return buildNode(ord, random.nextLong(), settings, version, false); + return buildNode(ord, random.nextLong(), settings, false); } private NodeAndClient buildNode() { int ord = nextNodeId.getAndIncrement(); - return buildNode(ord, random.nextLong(), null, Version.CURRENT, false); + return buildNode(ord, random.nextLong(), null, false); } - private NodeAndClient buildNode(int nodeId, long seed, Settings settings, Version version, boolean reuseExisting) { + private NodeAndClient buildNode(int nodeId, long seed, Settings settings, boolean reuseExisting) { assert Thread.holdsLock(this); ensureOpen(); settings = getSettings(nodeId, seed, settings); @@ -615,7 +608,7 @@ public final class InternalTestCluster extends TestCluster { .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), seed) .build(); MockNode node = new MockNode(finalSettings, plugins); - return new NodeAndClient(name, node); + return new NodeAndClient(name, node, nodeId); } private String buildNodeName(int id, Settings settings) { @@ -630,10 +623,10 @@ public final class InternalTestCluster extends TestCluster { private String getRoleSuffix(Settings settings) { String suffix = ""; if (Node.NODE_MASTER_SETTING.exists(settings) && Node.NODE_MASTER_SETTING.get(settings)) { - suffix = suffix + DiscoveryNode.Role.MASTER.getAbbreviation(); + suffix = suffix + Role.MASTER.getAbbreviation(); } if (Node.NODE_DATA_SETTING.exists(settings) && Node.NODE_DATA_SETTING.get(settings)) { - suffix = suffix + DiscoveryNode.Role.DATA.getAbbreviation(); + suffix = suffix + Role.DATA.getAbbreviation(); } if (Node.NODE_MASTER_SETTING.exists(settings) && Node.NODE_MASTER_SETTING.get(settings) == false && Node.NODE_DATA_SETTING.exists(settings) && Node.NODE_DATA_SETTING.get(settings) == false @@ -709,7 +702,7 @@ public final class InternalTestCluster extends TestCluster { return getRandomNodeAndClient(new NoDataNoMasterNodePredicate()).client(random); } - public synchronized Client startCoordinatingOnlyNode(Settings settings) { + public synchronized String startCoordinatingOnlyNode(Settings settings) { ensureOpen(); // currently unused Builder builder = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false); @@ -717,8 +710,7 @@ public final class InternalTestCluster extends TestCluster { // if we are the first node - don't wait for a state builder.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); } - String name = startNode(builder); - return nodes.get(name).nodeClient(); + return startNode(builder); } /** @@ -771,7 +763,7 @@ public final class InternalTestCluster extends TestCluster { } @Override - public void close() { + public synchronized void close() { if (this.open.compareAndSet(true, false)) { if (activeDisruptionScheme != null) { activeDisruptionScheme.testClusterClosed(); @@ -793,10 +785,13 @@ public final class InternalTestCluster extends TestCluster { private Client transportClient; private final AtomicBoolean closed = new AtomicBoolean(false); private final String name; + private final int nodeAndClientId; - NodeAndClient(String name, MockNode node) { + NodeAndClient(String name, MockNode node, int nodeAndClientId) { this.node = node; this.name = name; + this.nodeAndClientId = nodeAndClientId; + markNodeDataDirsAsNotEligableForWipe(node); } Node node() { @@ -806,6 +801,10 @@ public final class InternalTestCluster extends TestCluster { return node; } + public int nodeAndClientId() { + return nodeAndClientId; + } + Client client(Random random) { if (closed.get()) { throw new RuntimeException("already closed"); @@ -860,12 +859,16 @@ public final class InternalTestCluster extends TestCluster { } } + void startNode() { + node.start(); + } + void closeNode() throws IOException { - registerDataPath(); + markNodeDataDirsAsPendingForWipe(node); node.close(); } - void restart(RestartCallback callback) throws Exception { + void restart(RestartCallback callback, boolean clearDataIfNeeded) throws Exception { assert callback != null; resetClient(); if (!node.isClosed()) { @@ -875,31 +878,32 @@ public final class InternalTestCluster extends TestCluster { if (newSettings == null) { newSettings = Settings.EMPTY; } - if (callback.clearData(name)) { - NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, node); - if (nodeEnv.hasNodeFile()) { - IOUtils.rm(nodeEnv.nodeDataPaths()); - } + if (clearDataIfNeeded) { + clearDataIfNeeded(callback); } - startNewNode(newSettings); + createNewNode(newSettings); + startNode(); } - private void startNewNode(final Settings newSettings) { + private void clearDataIfNeeded(RestartCallback callback) throws IOException { + if (callback.clearData(name)) { + NodeEnvironment nodeEnv = node.getNodeEnvironment(); + if (nodeEnv.hasNodeFile()) { + final Path[] locations = nodeEnv.nodeDataPaths(); + logger.debug("removing node data paths: [{}]", (Object[]) locations); + IOUtils.rm(locations); + } + } + } + + private void createNewNode(final Settings newSettings) { final long newIdSeed = DiscoveryNodeService.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); Collection> plugins = node.getPlugins(); node = new MockNode(finalSettings, plugins); - node.start(); + markNodeDataDirsAsNotEligableForWipe(node); } - void registerDataPath() { - NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, node); - if (nodeEnv.hasNodeFile()) { - dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths())); - } - } - - @Override public void close() throws IOException { try { @@ -972,25 +976,42 @@ public final class InternalTestCluster extends TestCluster { } } randomlyResetClients(); - if (wipeData) { - wipeDataDirectories(); - } - if (nextNodeId.get() == sharedNodesSeeds.length && nodes.size() == sharedNodesSeeds.length) { - logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length); + final int newSize = sharedNodesSeeds.length; + if (nextNodeId.get() == newSize && nodes.size() == newSize) { + if (wipeData) { + wipePendingDataDirectories(); + } + logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize); return; } - logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length); + logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize); + + // trash all nodes with id >= sharedNodesSeeds.length - they are non shared - Set sharedNodes = new HashSet<>(); - assert sharedNodesSeeds.length == numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; + for (Iterator iterator = nodes.values().iterator(); iterator.hasNext();) { + NodeAndClient nodeAndClient = iterator.next(); + if (nodeAndClient.nodeAndClientId() >= sharedNodesSeeds.length) { + logger.debug("Close Node [{}] not shared", nodeAndClient.name); + nodeAndClient.close(); + iterator.remove(); + } + } + + // clean up what the nodes left that is unused + if (wipeData) { + wipePendingDataDirectories(); + } + + // start any missing node + assert newSize == numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; for (int i = 0; i < numSharedDedicatedMasterNodes; i++) { final Settings.Builder settings = Settings.builder(); settings.put(Node.NODE_MASTER_SETTING.getKey(), true).build(); settings.put(Node.NODE_DATA_SETTING.getKey(), false).build(); - NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), Version.CURRENT, true); - nodeAndClient.node().start(); - sharedNodes.add(nodeAndClient); + NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true); + nodeAndClient.startNode(); + publishNode(nodeAndClient); } for (int i = numSharedDedicatedMasterNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes; i++) { final Settings.Builder settings = Settings.builder(); @@ -999,44 +1020,35 @@ public final class InternalTestCluster extends TestCluster { settings.put(Node.NODE_MASTER_SETTING.getKey(), false).build(); settings.put(Node.NODE_DATA_SETTING.getKey(), true).build(); } - NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), Version.CURRENT, true); - nodeAndClient.node().start(); - sharedNodes.add(nodeAndClient); + NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true); + nodeAndClient.startNode(); + publishNode(nodeAndClient); } for (int i = numSharedDedicatedMasterNodes + numSharedDataNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) { final Builder settings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false); - NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), Version.CURRENT, true); - nodeAndClient.node().start(); - sharedNodes.add(nodeAndClient); - } - - for (NodeAndClient nodeAndClient : sharedNodes) { - nodes.remove(nodeAndClient.name); - } - - // trash the remaining nodes - final Collection toShutDown = nodes.values(); - for (NodeAndClient nodeAndClient : toShutDown) { - logger.debug("Close Node [{}] not shared", nodeAndClient.name); - nodeAndClient.close(); - } - nodes.clear(); - for (NodeAndClient nodeAndClient : sharedNodes) { + NodeAndClient nodeAndClient = buildNode(i, sharedNodesSeeds[i], settings.build(), true); + nodeAndClient.startNode(); publishNode(nodeAndClient); } - nextNodeId.set(sharedNodesSeeds.length); - assert size() == sharedNodesSeeds.length; - if (size() > 0) { - client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get(); + + nextNodeId.set(newSize); + assert size() == newSize; + if (newSize > 0) { + ClusterHealthResponse response = client().admin().cluster().prepareHealth() + .setWaitForNodes(Integer.toString(newSize)).get(); + if (response.isTimedOut()) { + logger.warn("failed to wait for a cluster of size [{}], got", newSize, response); + throw new IllegalStateException("cluster failed to reach the expected size of [" + newSize + "]"); + } } - logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length); + logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), newSize); } @Override public synchronized void afterTest() throws IOException { - wipeDataDirectories(); + wipePendingDataDirectories(); randomlyResetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */ } @@ -1098,7 +1110,8 @@ public final class InternalTestCluster extends TestCluster { } } - private void wipeDataDirectories() { + private void wipePendingDataDirectories() { + assert Thread.holdsLock(this); if (!dataDirToClean.isEmpty()) { try { for (Path path : dataDirToClean) { @@ -1115,6 +1128,22 @@ public final class InternalTestCluster extends TestCluster { } } + private void markNodeDataDirsAsPendingForWipe(Node node) { + assert Thread.holdsLock(this); + NodeEnvironment nodeEnv = node.getNodeEnvironment(); + if (nodeEnv.hasNodeFile()) { + dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths())); + } + } + + private void markNodeDataDirsAsNotEligableForWipe(Node node) { + assert Thread.holdsLock(this); + NodeEnvironment nodeEnv = node.getNodeEnvironment(); + if (nodeEnv.hasNodeFile()) { + dataDirToClean.removeAll(Arrays.asList(nodeEnv.nodeDataPaths())); + } + } + /** * Returns a reference to a random node's {@link ClusterService} */ @@ -1254,7 +1283,7 @@ public final class InternalTestCluster extends TestCluster { /** * Stops any of the current nodes but not the master node. */ - public void stopRandomNonMasterNode() throws IOException { + public synchronized void stopRandomNonMasterNode() throws IOException { NodeAndClient nodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()).negate()); if (nodeAndClient != null) { logger.info("Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName()); @@ -1295,28 +1324,28 @@ public final class InternalTestCluster extends TestCluster { /** * Restarts a random node in the cluster and calls the callback during restart. */ - private void restartRandomNode(Predicate predicate, RestartCallback callback) throws Exception { + private synchronized void restartRandomNode(Predicate predicate, RestartCallback callback) throws Exception { ensureOpen(); NodeAndClient nodeAndClient = getRandomNodeAndClient(predicate); if (nodeAndClient != null) { logger.info("Restarting random node [{}] ", nodeAndClient.name); - nodeAndClient.restart(callback); + nodeAndClient.restart(callback, true); } } /** * Restarts a node and calls the callback during restart. */ - public void restartNode(String nodeName, RestartCallback callback) throws Exception { + synchronized public void restartNode(String nodeName, RestartCallback callback) throws Exception { ensureOpen(); NodeAndClient nodeAndClient = nodes.get(nodeName); if (nodeAndClient != null) { logger.info("Restarting node [{}] ", nodeAndClient.name); - nodeAndClient.restart(callback); + nodeAndClient.restart(callback, true); } } - private void restartAllNodes(boolean rollingRestart, RestartCallback callback) throws Exception { + synchronized private void restartAllNodes(boolean rollingRestart, RestartCallback callback) throws Exception { ensureOpen(); List toRemove = new ArrayList<>(); try { @@ -1344,13 +1373,15 @@ public final class InternalTestCluster extends TestCluster { if (activeDisruptionScheme != null) { activeDisruptionScheme.removeFromNode(nodeAndClient.name, this); } - nodeAndClient.restart(callback); + nodeAndClient.restart(callback, true); if (activeDisruptionScheme != null) { activeDisruptionScheme.applyToNode(nodeAndClient.name, this); } } } else { int numNodesRestarted = 0; + Set[] nodesRoleOrder = new Set[nextNodeId.get()]; + Map, List> nodesByRoles = new HashMap<>(); for (NodeAndClient nodeAndClient : nodes.values()) { callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient()); logger.info("Stopping node [{}] ", nodeAndClient.name); @@ -1358,25 +1389,37 @@ public final class InternalTestCluster extends TestCluster { activeDisruptionScheme.removeFromNode(nodeAndClient.name, this); } nodeAndClient.closeNode(); + // delete data folders now, before we start other nodes that may claim it + nodeAndClient.clearDataIfNeeded(callback); + + + DiscoveryNode discoveryNode = getInstanceFromNode(ClusterService.class, nodeAndClient.node()).localNode(); + nodesRoleOrder[nodeAndClient.nodeAndClientId()] = discoveryNode.getRoles(); + nodesByRoles.computeIfAbsent(discoveryNode.getRoles(), k -> new ArrayList<>()).add(nodeAndClient); } - // starting master nodes first, for now so restart will be quick. If we'll start - // the data nodes first, they will wait for 30s for a master - List discoveryNodes = new ArrayList<>(); - for (ClusterService clusterService : getInstances(ClusterService.class)) { - discoveryNodes.add(clusterService.localNode()); + assert nodesByRoles.values().stream().collect(Collectors.summingInt(List::size)) == nodes.size(); + + // randomize start up order, but making sure that: + // 1) A data folder that was assigned to a data node will stay so + // 2) Data nodes will get the same node lock ordinal range, so custom index paths (where the ordinal is used) + // will still belong to data nodes + for (List sameRoleNodes : nodesByRoles.values()) { + Collections.shuffle(sameRoleNodes, random); } - discoveryNodes.sort((n1, n2) -> Boolean.compare(n1.isMasterNode() == false, n2.isMasterNode() == false)); - - - for (DiscoveryNode node : discoveryNodes) { - NodeAndClient nodeAndClient = nodes.get(node.getName()); + for (Set roles : nodesRoleOrder) { + if (roles == null) { + // if some nodes were stopped, we want have a role for them + continue; + } + NodeAndClient nodeAndClient = nodesByRoles.get(roles).remove(0); logger.info("Starting node [{}] ", nodeAndClient.name); if (activeDisruptionScheme != null) { activeDisruptionScheme.removeFromNode(nodeAndClient.name, this); } - nodeAndClient.restart(callback); + // we already cleared data folders, before starting nodes up + nodeAndClient.restart(callback, false); if (activeDisruptionScheme != null) { activeDisruptionScheme.applyToNode(nodeAndClient.name, this); } @@ -1487,36 +1530,22 @@ public final class InternalTestCluster extends TestCluster { * Starts a node with default settings and returns it's name. */ public synchronized String startNode() { - return startNode(Settings.EMPTY, Version.CURRENT); - } - - /** - * Starts a node with default settings ad the specified version and returns it's name. - */ - public synchronized String startNode(Version version) { - return startNode(Settings.EMPTY, version); + return startNode(Settings.EMPTY); } /** * Starts a node with the given settings builder and returns it's name. */ public synchronized String startNode(Settings.Builder settings) { - return startNode(settings.build(), Version.CURRENT); + return startNode(settings.build()); } /** * Starts a node with the given settings and returns it's name. */ public synchronized String startNode(Settings settings) { - return startNode(settings, Version.CURRENT); - } - - /** - * Starts a node with the given settings and version and returns it's name. - */ - public synchronized String startNode(Settings settings, Version version) { - NodeAndClient buildNode = buildNode(settings, version); - buildNode.node().start(); + NodeAndClient buildNode = buildNode(settings); + buildNode.startNode(); publishNode(buildNode); return buildNode.name; } @@ -1550,7 +1579,7 @@ public final class InternalTestCluster extends TestCluster { public synchronized String startMasterOnlyNode(Settings settings) { Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), true).put(Node.NODE_DATA_SETTING.getKey(), false).build(); - return startNode(settings1, Version.CURRENT); + return startNode(settings1); } public synchronized Async startDataOnlyNodeAsync() { @@ -1564,7 +1593,7 @@ public final class InternalTestCluster extends TestCluster { public synchronized String startDataOnlyNode(Settings settings) { Settings settings1 = Settings.builder().put(settings).put(Node.NODE_MASTER_SETTING.getKey(), false).put(Node.NODE_DATA_SETTING.getKey(), true).build(); - return startNode(settings1, Version.CURRENT); + return startNode(settings1); } /** @@ -1585,9 +1614,9 @@ public final class InternalTestCluster extends TestCluster { * Starts a node in an async manner with the given settings and version and returns future with its name. */ public synchronized Async startNodeAsync(final Settings settings, final Version version) { - final NodeAndClient buildNode = buildNode(settings, version); + final NodeAndClient buildNode = buildNode(settings); final Future submit = executor.submit(() -> { - buildNode.node().start(); + buildNode.startNode(); publishNode(buildNode); return buildNode.name; }); @@ -1646,10 +1675,6 @@ public final class InternalTestCluster extends TestCluster { private synchronized void publishNode(NodeAndClient nodeAndClient) { assert !nodeAndClient.node().isClosed(); - NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, nodeAndClient.node); - if (nodeEnv.hasNodeFile()) { - dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths())); - } nodes.put(nodeAndClient.name, nodeAndClient); applyDisruptionSchemeToNode(nodeAndClient); } @@ -1927,7 +1952,8 @@ public final class InternalTestCluster extends TestCluster { public void assertAfterTest() throws IOException { super.assertAfterTest(); assertRequestsFinished(); - for (NodeEnvironment env : this.getInstances(NodeEnvironment.class)) { + for (NodeAndClient nodeAndClient : nodes.values()) { + NodeEnvironment env = nodeAndClient.node().getNodeEnvironment(); Set shardIds = env.lockedShards(); for (ShardId id : shardIds) { try { diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index ae774220ccf..63c5eb01fb0 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -22,23 +22,39 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.transport.TransportSettings; +import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; +import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; +import static org.elasticsearch.cluster.node.DiscoveryNode.Role.DATA; +import static org.elasticsearch.cluster.node.DiscoveryNode.Role.INGEST; +import static org.elasticsearch.cluster.node.DiscoveryNode.Role.MASTER; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.not; /** * Basic test that ensure that the internal cluster reproduces the same @@ -153,4 +169,140 @@ public class InternalTestClusterTests extends ESTestCase { IOUtils.close(cluster0, cluster1); } } + + public void testDataFolderAssignmentAndCleaning() throws IOException, InterruptedException { + long clusterSeed = randomLong(); + boolean masterNodes = randomBoolean(); + // we need one stable node + int minNumDataNodes = 2; + int maxNumDataNodes = 2; + final String clusterName1 = "shared1"; + NodeConfigurationSource nodeConfigurationSource = NodeConfigurationSource.EMPTY; + int numClientNodes = 0; + boolean enableHttpPipelining = randomBoolean(); + String nodePrefix = "test"; + Path baseDir = createTempDir(); + InternalTestCluster cluster = new InternalTestCluster("local", clusterSeed, baseDir, masterNodes, + minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, + enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); + try { + cluster.beforeTest(random(), 0.0); + final Map shardNodePaths = new HashMap<>(); + for (String name: cluster.getNodeNames()) { + shardNodePaths.put(name, getNodePaths(cluster, name)); + } + String poorNode = randomFrom(cluster.getNodeNames()); + Path dataPath = getNodePaths(cluster, poorNode)[0]; + final Path testMarker = dataPath.resolve("testMarker"); + Files.createDirectories(testMarker); + cluster.stopRandomNode(InternalTestCluster.nameFilter(poorNode)); + assertFileExists(testMarker); // stopping a node half way shouldn't clean data + + final String stableNode = randomFrom(cluster.getNodeNames()); + final Path stableDataPath = getNodePaths(cluster, stableNode)[0]; + final Path stableTestMarker = stableDataPath.resolve("stableTestMarker"); + assertThat(stableDataPath, not(dataPath)); + Files.createDirectories(stableTestMarker); + + final String newNode1 = cluster.startNode(); + assertThat(getNodePaths(cluster, newNode1)[0], equalTo(dataPath)); + assertFileExists(testMarker); // starting a node should re-use data folders and not clean it + + final String newNode2 = cluster.startNode(); + final Path newDataPath = getNodePaths(cluster, newNode2)[0]; + final Path newTestMarker = newDataPath.resolve("newTestMarker"); + assertThat(newDataPath, not(dataPath)); + Files.createDirectories(newTestMarker); + cluster.beforeTest(random(), 0.0); + assertFileNotExists(newTestMarker); // the cluster should be reset for a new test, cleaning up the extra path we made + assertFileNotExists(testMarker); // a new unknown node used this path, it should be cleaned + assertFileExists(stableTestMarker); // but leaving the structure of existing, reused nodes + for (String name: cluster.getNodeNames()) { + assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), + equalTo(shardNodePaths.get(name))); + } + + cluster.beforeTest(random(), 0.0); + assertFileExists(stableTestMarker); // but leaving the structure of existing, reused nodes + for (String name: cluster.getNodeNames()) { + assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), + equalTo(shardNodePaths.get(name))); + } + + } finally { + cluster.close(); + } + } + + private Path[] getNodePaths(InternalTestCluster cluster, String name) { + final NodeEnvironment nodeEnvironment = cluster.getInstance(NodeEnvironment.class, name); + if (nodeEnvironment.hasNodeFile()) { + return nodeEnvironment.nodeDataPaths(); + } else { + return new Path[0]; + } + } + + public void testDifferentRolesMaintainPathOnRestart() throws Exception { + final Path baseDir = createTempDir(); + InternalTestCluster cluster = new InternalTestCluster("local", randomLong(), baseDir, true, 0, 0, "test", + new NodeConfigurationSource() { + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0).build(); + } + + @Override + public Settings transportClientSettings() { + return Settings.EMPTY; + } + }, 0, randomBoolean(), "", Collections.emptyList(), Function.identity()); + cluster.beforeTest(random(), 0.0); + try { + Map> pathsPerRole = new HashMap<>(); + for (int i = 0; i < 5; i++) { + final DiscoveryNode.Role role = randomFrom(MASTER, DiscoveryNode.Role.DATA, DiscoveryNode.Role.INGEST); + final String node; + switch (role) { + case MASTER: + node = cluster.startMasterOnlyNode(Settings.EMPTY); + break; + case DATA: + node = cluster.startDataOnlyNode(Settings.EMPTY); + break; + case INGEST: + node = cluster.startCoordinatingOnlyNode(Settings.EMPTY); + break; + default: + throw new IllegalStateException("get your story straight"); + } + Set rolePaths = pathsPerRole.computeIfAbsent(role, k -> new HashSet<>()); + for (Path path : getNodePaths(cluster, node)) { + assertTrue(rolePaths.add(path.toString())); + } + } + cluster.fullRestart(); + + Map> result = new HashMap<>(); + for (String name : cluster.getNodeNames()) { + DiscoveryNode node = cluster.getInstance(ClusterService.class, name).localNode(); + List paths = Arrays.stream(getNodePaths(cluster, name)).map(Path::toString).collect(Collectors.toList()); + if (node.isMasterNode()) { + result.computeIfAbsent(MASTER, k -> new HashSet<>()).addAll(paths); + } else if (node.isDataNode()) { + result.computeIfAbsent(DATA, k -> new HashSet<>()).addAll(paths); + } else { + result.computeIfAbsent(INGEST, k -> new HashSet<>()).addAll(paths); + } + } + + assertThat(result.size(), equalTo(pathsPerRole.size())); + for (DiscoveryNode.Role role : result.keySet()) { + assertThat("path are not the same for " + role, result.get(role), equalTo(pathsPerRole.get(role))); + } + } finally { + cluster.close(); + } + + } } From 98276111e1fdd63ce00821118fea9a1c4edef595 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 28 Jun 2016 15:39:59 +0200 Subject: [PATCH 18/43] Re-enable logger usage checks It was inadvertently disabled after applying code review comments. This commit reenables the logger usage checker and makes it less naggy when encountering logging usages of the form logger.info(someStringBuilder). Previously it would fail with the error message "First argument must be a string constant so that we can statically ensure proper place holder usage". Now it will only fail in case any arguments are provided as well, for example logger.info(someStringBuilder, 42). --- .../loggerusage/ESLoggerUsageChecker.java | 33 +++++++++++-------- .../test/loggerusage/ESLoggerUsageTests.java | 8 +++-- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java index 25d4052c162..041d21cc762 100644 --- a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java +++ b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java @@ -82,7 +82,7 @@ public class ESLoggerUsageChecker { Files.walkFileTree(root, new SimpleFileVisitor() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - if (Files.isRegularFile(file) && file.endsWith(".class")) { + if (Files.isRegularFile(file) && file.getFileName().toString().endsWith(".class")) { try (InputStream in = Files.newInputStream(file)) { ESLoggerUsageChecker.check(wrongUsageCallback, in); } @@ -248,19 +248,6 @@ public class ESLoggerUsageChecker { if (LOGGER_METHODS.contains(methodInsn.name) == false) { continue; } - Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc); - BasicValue logMessageLengthObject = getStackValue(stringFrames[i], argumentTypes.length - 1); // first argument - if (logMessageLengthObject instanceof PlaceHolderStringBasicValue == false) { - wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber, - "First argument must be a string constant so that we can statically ensure proper place holder usage")); - continue; - } - PlaceHolderStringBasicValue logMessageLength = (PlaceHolderStringBasicValue) logMessageLengthObject; - if (logMessageLength.minValue != logMessageLength.maxValue) { - wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber, - "Multiple log messages with conflicting number of place holders")); - continue; - } BasicValue varArgsSizeObject = getStackValue(arraySizeFrames[i], 0); // last argument if (varArgsSizeObject instanceof ArraySizeBasicValue == false) { wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber, @@ -268,6 +255,24 @@ public class ESLoggerUsageChecker { continue; } ArraySizeBasicValue varArgsSize = (ArraySizeBasicValue) varArgsSizeObject; + Type[] argumentTypes = Type.getArgumentTypes(methodInsn.desc); + BasicValue logMessageLengthObject = getStackValue(stringFrames[i], argumentTypes.length - 1); // first argument + if (logMessageLengthObject instanceof PlaceHolderStringBasicValue == false) { + if (varArgsSize.minValue > 0) { + wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber, + "First argument must be a string constant so that we can statically ensure proper place holder usage")); + continue; + } else { + // don't check logger usage for logger.warn(someObject) as someObject will be fully logged + continue; + } + } + PlaceHolderStringBasicValue logMessageLength = (PlaceHolderStringBasicValue) logMessageLengthObject; + if (logMessageLength.minValue != logMessageLength.maxValue) { + wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber, + "Multiple log messages with conflicting number of place holders")); + continue; + } if (varArgsSize.minValue != varArgsSize.maxValue) { wrongUsageCallback.accept(new WrongLoggerUsage(className, methodNode.name, methodInsn.name, lineNumber, "Multiple parameter arrays with conflicting sizes")); diff --git a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java index ab07ecbf45e..73449f4351c 100644 --- a/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java +++ b/test/logger-usage/src/test/java/org/elasticsearch/test/loggerusage/ESLoggerUsageTests.java @@ -129,8 +129,12 @@ public class ESLoggerUsageTests extends ESTestCase { logger.info("Hello {}, {}", "world", 42, new Exception()); } - public void checkFailNonConstantMessage(boolean b) { - logger.info(Boolean.toString(b)); + public void checkNonConstantMessageWithZeroArguments(boolean b) { + logger.info(Boolean.toString(b), new Exception()); + } + + public void checkFailNonConstantMessageWithArguments(boolean b) { + logger.info(Boolean.toString(b), new Exception(), 42); } public void checkComplexUsage(boolean b) { From 051579184633030a07b1df08bedb16f6c7ade30c Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 28 Jun 2016 15:49:00 +0200 Subject: [PATCH 19/43] Fix logger usages --- .../search/AbstractSearchAsyncAction.java | 23 +++++-------------- .../SearchScrollQueryAndFetchAsyncAction.java | 4 ++-- ...SearchScrollQueryThenFetchAsyncAction.java | 2 +- .../cluster/NodeConnectionsService.java | 2 +- .../common/logging/ESLogger.java | 2 ++ .../zen/elect/ElectMasterService.java | 9 ++++---- .../elasticsearch/threadpool/ThreadPool.java | 2 +- .../bootstrap/MaxMapCountCheckTests.java | 2 ++ .../cluster/routing/PrimaryTermsTests.java | 4 ++-- .../AbstractLifecycleRunnableTests.java | 2 ++ .../gateway/GatewayIndexStateIT.java | 2 +- .../store/IndicesStoreIntegrationIT.java | 2 +- .../elasticsearch/recovery/RelocationIT.java | 2 +- .../AbstractAsyncBulkByScrollAction.java | 2 +- 14 files changed, 27 insertions(+), 33 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 805d09a0830..d843d8c0630 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -190,7 +190,7 @@ abstract class AbstractSearchAsyncAction innerMoveToSecondPhase(); } catch (Throwable e) { if (logger.isDebugEnabled()) { - logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e); + logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request); } raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures())); } @@ -210,11 +210,7 @@ abstract class AbstractSearchAsyncAction if (totalOps.incrementAndGet() == expectedTotalOps) { if (logger.isDebugEnabled()) { if (t != null && !TransportActions.isShardNotAvailableException(t)) { - if (shard != null) { - logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t); - } else { - logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t); - } + logger.debug("{}: Failed to execute [{}]", t, shard != null ? shard.shortSummary() : shardIt.shardId(), request); } else if (logger.isTraceEnabled()) { logger.trace("{}: Failed to execute [{}]", t, shard, request); } @@ -239,7 +235,8 @@ abstract class AbstractSearchAsyncAction final boolean lastShard = nextShard == null; // trace log this exception if (logger.isTraceEnabled()) { - logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t); + logger.trace("{}: Failed to execute [{}] lastShard [{}]", t, shard != null ? shard.shortSummary() : shardIt.shardId(), + request, lastShard); } if (!lastShard) { try { @@ -251,22 +248,14 @@ abstract class AbstractSearchAsyncAction // no more shards active, add a failure if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception if (t != null && !TransportActions.isShardNotAvailableException(t)) { - logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t); + logger.debug("{}: Failed to execute [{}] lastShard [{}]", t, + shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard); } } } } } - private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request, - boolean lastShard) { - if (shard != null) { - return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]"; - } else { - return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]"; - } - } - protected final ShardSearchFailure[] buildShardFailures() { AtomicArray shardFailures = this.shardFailures; if (shardFailures == null) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java index 4e0ee3ff5e5..90bb0937327 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java @@ -103,7 +103,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { executePhase(i, node, target.getScrollId()); } else { if (logger.isDebugEnabled()) { - logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]"); + logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource()); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { @@ -116,7 +116,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { DiscoveryNode node = nodes.get(target.getNode()); if (node == null) { if (logger.isDebugEnabled()) { - logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]"); + logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource()); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 8e822302d2f..694ee16beac 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -107,7 +107,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { executeQueryPhase(i, counter, node, target.getScrollId()); } else { if (logger.isDebugEnabled()) { - logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]"); + logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource()); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index 698f9d1090c..404ae57d5f7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -91,7 +91,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent runCallable = mock(Callable.class); diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 049fd90cad6..129495ea15e 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -343,7 +343,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase { } else { // test with a shadow replica index final Path dataPath = createTempDir(); - logger.info("--> created temp data path for shadow replicas [" + dataPath + "]"); + logger.info("--> created temp data path for shadow replicas [{}]", dataPath); logger.info("--> starting a cluster with " + numNodes + " nodes"); final Settings nodeSettings = Settings.builder() .put("node.add_id_to_custom_path", false) diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index b500ffb7dd2..b4f66c2e17b 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -263,7 +263,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { assertThat(waitForShardDeletion(node_3, index, 0), equalTo(false)); Path server2Shard = shardDirectory(node_2, index, 0); - logger.info("--> stopping node " + node_2); + logger.info("--> stopping node {}", node_2); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2)); logger.info("--> running cluster_health"); diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index bc9909b21c1..620dfeb94c2 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -479,7 +479,7 @@ public class RelocationIT extends ESIntegTestCase { indexRandom(true, docs); numDocs *= 2; - logger.info(" --> waiting for relocation to complete", numDocs); + logger.info(" --> waiting for relocation to complete"); ensureGreen("test");// move all shards to the new node (it waits on relocation) final int numIters = randomIntBetween(10, 20); for (int i = 0; i < numIters; i++) { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index ceb00b2f81f..3403a8077b9 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -404,7 +404,7 @@ public abstract class AbstractAsyncBulkByScrollAction Date: Tue, 28 Jun 2016 17:38:56 +0200 Subject: [PATCH 20/43] Fix number of arguments provided to logger calls --- .../main/java/org/elasticsearch/test/InternalTestCluster.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index d2014ebbd6e..20af5cf50b2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -890,7 +890,7 @@ public final class InternalTestCluster extends TestCluster { NodeEnvironment nodeEnv = node.getNodeEnvironment(); if (nodeEnv.hasNodeFile()) { final Path[] locations = nodeEnv.nodeDataPaths(); - logger.debug("removing node data paths: [{}]", (Object[]) locations); + logger.debug("removing node data paths: [{}]", Arrays.toString(locations)); IOUtils.rm(locations); } } @@ -1039,7 +1039,7 @@ public final class InternalTestCluster extends TestCluster { ClusterHealthResponse response = client().admin().cluster().prepareHealth() .setWaitForNodes(Integer.toString(newSize)).get(); if (response.isTimedOut()) { - logger.warn("failed to wait for a cluster of size [{}], got", newSize, response); + logger.warn("failed to wait for a cluster of size [{}], got [{}]", newSize, response); throw new IllegalStateException("cluster failed to reach the expected size of [" + newSize + "]"); } } From 9b9e17abf7ab34707e8a9264cbddc3afd196b7a3 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 28 Jun 2016 17:51:33 +0200 Subject: [PATCH 21/43] Cleanup Compressor interface (#19125) Today we have several deprecated methods, leaking netty interfaces, support for multiple compressors on the compressor interface. The netty interface can simply be replaced by BytesReference which we already have an implementation for, all the others are not used and are removed in this commit. --- .../common/compress/CompressedIndexInput.java | 215 ------------------ .../compress/CompressedStreamInput.java | 174 -------------- .../common/compress/CompressedXContent.java | 4 +- .../common/compress/Compressor.java | 14 -- .../common/compress/CompressorFactory.java | 58 +---- .../{deflate => }/DeflateCompressor.java | 42 +--- .../publish/PublishClusterStateAction.java | 4 +- .../blobstore/ChecksumBlobStoreFormat.java | 2 +- .../netty/MessageChannelHandler.java | 3 +- .../transport/netty/NettyTransport.java | 2 +- .../netty/NettyTransportChannel.java | 2 +- ...estCase.java => DeflateCompressTests.java} | 8 +- ...va => DeflateCompressedXContentTests.java} | 52 ++--- .../deflate/DeflateCompressedStreamTests.java | 30 --- .../deflate/DeflateXContentTests.java | 30 --- .../mapper/binary/BinaryMappingTests.java | 2 +- .../snapshots/BlobStoreFormatIT.java | 2 +- 17 files changed, 40 insertions(+), 604 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/common/compress/CompressedIndexInput.java delete mode 100644 core/src/main/java/org/elasticsearch/common/compress/CompressedStreamInput.java rename core/src/main/java/org/elasticsearch/common/compress/{deflate => }/DeflateCompressor.java (80%) rename core/src/test/java/org/elasticsearch/common/compress/{AbstractCompressedStreamTestCase.java => DeflateCompressTests.java} (98%) rename core/src/test/java/org/elasticsearch/common/compress/{AbstractCompressedXContentTestCase.java => DeflateCompressedXContentTests.java} (63%) delete mode 100644 core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateCompressedStreamTests.java delete mode 100644 core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java diff --git a/core/src/main/java/org/elasticsearch/common/compress/CompressedIndexInput.java b/core/src/main/java/org/elasticsearch/common/compress/CompressedIndexInput.java deleted file mode 100644 index 599eaeae337..00000000000 --- a/core/src/main/java/org/elasticsearch/common/compress/CompressedIndexInput.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.compress; - -import org.apache.lucene.store.IndexInput; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.LongArray; - -import java.io.EOFException; -import java.io.IOException; - -/** - * @deprecated Used only for backward comp. to read old compressed files, since we now use codec based compression - */ -@Deprecated -public abstract class CompressedIndexInput extends IndexInput { - - private IndexInput in; - - private int version; - private long totalUncompressedLength; - private LongArray offsets; - - private boolean closed; - - protected byte[] uncompressed; - protected int uncompressedLength; - private int position = 0; - private int valid = 0; - private int currentOffsetIdx; - private long currentUncompressedChunkPointer; - - public CompressedIndexInput(IndexInput in) throws IOException { - super("compressed(" + in.toString() + ")"); - this.in = in; - readHeader(in); - this.version = in.readInt(); - long metaDataPosition = in.readLong(); - long headerLength = in.getFilePointer(); - in.seek(metaDataPosition); - this.totalUncompressedLength = in.readVLong(); - int size = in.readVInt(); - offsets = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(size); - for (int i = 0; i < size; i++) { - offsets.set(i, in.readVLong()); - } - this.currentOffsetIdx = -1; - this.currentUncompressedChunkPointer = 0; - in.seek(headerLength); - } - - /** - * Method is overridden to report number of bytes that can now be read - * from decoded data buffer, without reading bytes from the underlying - * stream. - * Never throws an exception; returns number of bytes available without - * further reads from underlying source; -1 if stream has been closed, or - * 0 if an actual read (and possible blocking) is needed to find out. - */ - public int available() throws IOException { - // if closed, return -1; - if (closed) { - return -1; - } - int left = (valid - position); - return (left <= 0) ? 0 : left; - } - - @Override - public byte readByte() throws IOException { - if (!readyBuffer()) { - throw new EOFException(); - } - return uncompressed[position++]; - } - - public int read(byte[] buffer, int offset, int length, boolean fullRead) throws IOException { - if (length < 1) { - return 0; - } - if (!readyBuffer()) { - return -1; - } - // First let's read however much data we happen to have... - int chunkLength = Math.min(valid - position, length); - System.arraycopy(uncompressed, position, buffer, offset, chunkLength); - position += chunkLength; - - if (chunkLength == length || !fullRead) { - return chunkLength; - } - // Need more data, then - int totalRead = chunkLength; - do { - offset += chunkLength; - if (!readyBuffer()) { - break; - } - chunkLength = Math.min(valid - position, (length - totalRead)); - System.arraycopy(uncompressed, position, buffer, offset, chunkLength); - position += chunkLength; - totalRead += chunkLength; - } while (totalRead < length); - - return totalRead; - } - - @Override - public void readBytes(byte[] b, int offset, int len) throws IOException { - int result = read(b, offset, len, true /* we want to have full reads, that's the contract... */); - if (result < len) { - throw new EOFException(); - } - } - - @Override - public long getFilePointer() { - return currentUncompressedChunkPointer + position; - } - - @Override - public void seek(long pos) throws IOException { - int idx = (int) (pos / uncompressedLength); - if (idx >= offsets.size()) { - // set the next "readyBuffer" to EOF - currentOffsetIdx = idx; - position = 0; - valid = 0; - return; - } - - // TODO: optimize so we won't have to readyBuffer on seek, can keep the position around, and set it on readyBuffer in this case - if (idx != currentOffsetIdx) { - long pointer = offsets.get(idx); - in.seek(pointer); - position = 0; - valid = 0; - currentOffsetIdx = idx - 1; // we are going to increase it in readyBuffer... - readyBuffer(); - } - position = (int) (pos % uncompressedLength); - } - - @Override - public long length() { - return totalUncompressedLength; - } - - @Override - public void close() throws IOException { - position = valid = 0; - if (!closed) { - closed = true; - doClose(); - in.close(); - } - } - - protected abstract void doClose() throws IOException; - - protected boolean readyBuffer() throws IOException { - if (position < valid) { - return true; - } - if (closed) { - return false; - } - // we reached the end... - if (currentOffsetIdx + 1 >= offsets.size()) { - return false; - } - valid = uncompress(in, uncompressed); - if (valid < 0) { - return false; - } - currentOffsetIdx++; - currentUncompressedChunkPointer = ((long) currentOffsetIdx) * uncompressedLength; - position = 0; - return (position < valid); - } - - protected abstract void readHeader(IndexInput in) throws IOException; - - /** - * Uncompress the data into the out array, returning the size uncompressed - */ - protected abstract int uncompress(IndexInput in, byte[] out) throws IOException; - - @Override - public IndexInput clone() { - // we clone and we need to make sure we keep the same positions! - CompressedIndexInput cloned = (CompressedIndexInput) super.clone(); - cloned.uncompressed = new byte[uncompressedLength]; - System.arraycopy(uncompressed, 0, cloned.uncompressed, 0, uncompressedLength); - cloned.in = (IndexInput) cloned.in.clone(); - return cloned; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/compress/CompressedStreamInput.java b/core/src/main/java/org/elasticsearch/common/compress/CompressedStreamInput.java deleted file mode 100644 index bdacbd4727b..00000000000 --- a/core/src/main/java/org/elasticsearch/common/compress/CompressedStreamInput.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.compress; - -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.EOFException; -import java.io.IOException; - -/** - */ -public abstract class CompressedStreamInput extends StreamInput { - - private final StreamInput in; - - private boolean closed; - - protected byte[] uncompressed; - private int position = 0; - private int valid = 0; - - public CompressedStreamInput(StreamInput in) throws IOException { - this.in = in; - super.setVersion(in.getVersion()); - readHeader(in); - } - - @Override - public void setVersion(Version version) { - in.setVersion(version); - super.setVersion(version); - } - - /** - * Method is overridden to report number of bytes that can now be read - * from decoded data buffer, without reading bytes from the underlying - * stream. - * Never throws an exception; returns number of bytes available without - * further reads from underlying source; -1 if stream has been closed, or - * 0 if an actual read (and possible blocking) is needed to find out. - */ - @Override - public int available() throws IOException { - // if closed, return -1; - if (closed) { - return -1; - } - int left = (valid - position); - return (left <= 0) ? 0 : left; - } - - @Override - public int read() throws IOException { - if (!readyBuffer()) { - return -1; - } - return uncompressed[position++] & 255; - } - - @Override - public byte readByte() throws IOException { - if (!readyBuffer()) { - throw new EOFException(); - } - return uncompressed[position++]; - } - - @Override - public int read(byte[] buffer, int offset, int length) throws IOException { - return read(buffer, offset, length, false); - } - - public int read(byte[] buffer, int offset, int length, boolean fullRead) throws IOException { - if (length < 1) { - return 0; - } - if (!readyBuffer()) { - return -1; - } - // First let's read however much data we happen to have... - int chunkLength = Math.min(valid - position, length); - System.arraycopy(uncompressed, position, buffer, offset, chunkLength); - position += chunkLength; - - if (chunkLength == length || !fullRead) { - return chunkLength; - } - // Need more data, then - int totalRead = chunkLength; - do { - offset += chunkLength; - if (!readyBuffer()) { - break; - } - chunkLength = Math.min(valid - position, (length - totalRead)); - System.arraycopy(uncompressed, position, buffer, offset, chunkLength); - position += chunkLength; - totalRead += chunkLength; - } while (totalRead < length); - - return totalRead; - } - - @Override - public void readBytes(byte[] b, int offset, int len) throws IOException { - int result = read(b, offset, len, true /* we want to have full reads, that's the contract... */); - if (result < len) { - throw new EOFException(); - } - } - - @Override - public void reset() throws IOException { - this.position = 0; - this.valid = 0; - in.reset(); - } - - @Override - public void close() throws IOException { - position = valid = 0; - if (!closed) { - closed = true; - doClose(); - in.close(); - } - } - - protected abstract void doClose() throws IOException; - - /** - * Fill the uncompressed bytes buffer by reading the underlying inputStream. - */ - protected boolean readyBuffer() throws IOException { - if (position < valid) { - return true; - } - if (closed) { - return false; - } - valid = uncompress(in, uncompressed); - if (valid < 0) { - return false; - } - position = 0; - return (position < valid); - } - - protected abstract void readHeader(StreamInput in) throws IOException; - - /** - * Uncompress the data into the out array, returning the size uncompressed - */ - protected abstract int uncompress(StreamInput in, byte[] out) throws IOException; - -} diff --git a/core/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/core/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index 462b91aeef0..3864befcc04 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/core/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -80,7 +80,7 @@ public final class CompressedXContent { */ public CompressedXContent(ToXContent xcontent, XContentType type, ToXContent.Params params) throws IOException { BytesStreamOutput bStream = new BytesStreamOutput(); - OutputStream compressedStream = CompressorFactory.defaultCompressor().streamOutput(bStream); + OutputStream compressedStream = CompressorFactory.COMPRESSOR.streamOutput(bStream); CRC32 crc32 = new CRC32(); OutputStream checkedStream = new CheckedOutputStream(compressedStream, crc32); try (XContentBuilder builder = XContentFactory.contentBuilder(type, checkedStream)) { @@ -105,7 +105,7 @@ public final class CompressedXContent { this.crc32 = crc32(new BytesArray(uncompressed())); } else { BytesStreamOutput out = new BytesStreamOutput(); - try (OutputStream compressedOutput = CompressorFactory.defaultCompressor().streamOutput(out)) { + try (OutputStream compressedOutput = CompressorFactory.COMPRESSOR.streamOutput(out)) { data.writeTo(compressedOutput); } this.bytes = out.bytes().toBytes(); diff --git a/core/src/main/java/org/elasticsearch/common/compress/Compressor.java b/core/src/main/java/org/elasticsearch/common/compress/Compressor.java index 252fad09807..62ed1cb6edb 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/Compressor.java +++ b/core/src/main/java/org/elasticsearch/common/compress/Compressor.java @@ -33,21 +33,7 @@ public interface Compressor { boolean isCompressed(BytesReference bytes); - boolean isCompressed(ChannelBuffer buffer); - StreamInput streamInput(StreamInput in) throws IOException; StreamOutput streamOutput(StreamOutput out) throws IOException; - - /** - * @deprecated Used for backward comp. since we now use Lucene compressed codec. - */ - @Deprecated - boolean isCompressed(IndexInput in) throws IOException; - - /** - * @deprecated Used for backward comp. since we now use Lucene compressed codec. - */ - @Deprecated - CompressedIndexInput indexInput(IndexInput in) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java index e6c43a524ca..82e049704cc 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java +++ b/core/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java @@ -19,16 +19,13 @@ package org.elasticsearch.common.compress; -import org.apache.lucene.store.IndexInput; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.deflate.DeflateCompressor; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; -import org.jboss.netty.buffer.ChannelBuffer; import java.io.IOException; @@ -36,47 +33,21 @@ import java.io.IOException; */ public class CompressorFactory { - private static final Compressor[] compressors; - private static volatile Compressor defaultCompressor; - - static { - compressors = new Compressor[] { - new DeflateCompressor() - }; - defaultCompressor = new DeflateCompressor(); - } - - public static void setDefaultCompressor(Compressor defaultCompressor) { - CompressorFactory.defaultCompressor = defaultCompressor; - } - - public static Compressor defaultCompressor() { - return defaultCompressor; - } + public static final Compressor COMPRESSOR = new DeflateCompressor(); public static boolean isCompressed(BytesReference bytes) { return compressor(bytes) != null; } - /** - * @deprecated we don't compress lucene indexes anymore and rely on lucene codecs - */ - @Deprecated - public static boolean isCompressed(IndexInput in) throws IOException { - return compressor(in) != null; - } - @Nullable public static Compressor compressor(BytesReference bytes) { - for (Compressor compressor : compressors) { - if (compressor.isCompressed(bytes)) { + if (COMPRESSOR.isCompressed(bytes)) { // bytes should be either detected as compressed or as xcontent, // if we have bytes that can be either detected as compressed or // as a xcontent, we have a problem assert XContentFactory.xContentType(bytes) == null; - return compressor; + return COMPRESSOR; } - } XContentType contentType = XContentFactory.xContentType(bytes); if (contentType == null) { @@ -97,29 +68,6 @@ public class CompressorFactory { (bytes.get(2) == 0 || bytes.get(2) == 1); } - public static Compressor compressor(ChannelBuffer buffer) { - for (Compressor compressor : compressors) { - if (compressor.isCompressed(buffer)) { - return compressor; - } - } - throw new NotCompressedException(); - } - - /** - * @deprecated we don't compress lucene indexes anymore and rely on lucene codecs - */ - @Deprecated - @Nullable - public static Compressor compressor(IndexInput in) throws IOException { - for (Compressor compressor : compressors) { - if (compressor.isCompressed(in)) { - return compressor; - } - } - return null; - } - /** * Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(BytesReference)}. */ diff --git a/core/src/main/java/org/elasticsearch/common/compress/deflate/DeflateCompressor.java b/core/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java similarity index 80% rename from core/src/main/java/org/elasticsearch/common/compress/deflate/DeflateCompressor.java rename to core/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java index be396324788..42e2efa358c 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/deflate/DeflateCompressor.java +++ b/core/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java @@ -17,17 +17,14 @@ * under the License. */ -package org.elasticsearch.common.compress.deflate; +package org.elasticsearch.common.compress; -import org.apache.lucene.store.IndexInput; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressedIndexInput; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.jboss.netty.buffer.ChannelBuffer; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; @@ -35,6 +32,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.Arrays; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.zip.Deflater; import java.util.zip.DeflaterOutputStream; import java.util.zip.Inflater; @@ -69,20 +67,6 @@ public class DeflateCompressor implements Compressor { return true; } - @Override - public boolean isCompressed(ChannelBuffer buffer) { - if (buffer.readableBytes() < HEADER.length) { - return false; - } - final int offset = buffer.readerIndex(); - for (int i = 0; i < HEADER.length; ++i) { - if (buffer.getByte(offset + i) != HEADER[i]) { - return false; - } - } - return true; - } - @Override public StreamInput streamInput(StreamInput in) throws IOException { final byte[] headerBytes = new byte[HEADER.length]; @@ -103,16 +87,14 @@ public class DeflateCompressor implements Compressor { InputStream decompressedIn = new InflaterInputStream(in, inflater, BUFFER_SIZE); decompressedIn = new BufferedInputStream(decompressedIn, BUFFER_SIZE); return new InputStreamStreamInput(decompressedIn) { - private boolean closed = false; - + final AtomicBoolean closed = new AtomicBoolean(false); public void close() throws IOException { try { super.close(); } finally { - if (closed == false) { + if (closed.compareAndSet(false, true)) { // important to release native memory inflater.end(); - closed = true; } } } @@ -128,29 +110,17 @@ public class DeflateCompressor implements Compressor { OutputStream compressedOut = new DeflaterOutputStream(out, deflater, BUFFER_SIZE, syncFlush); compressedOut = new BufferedOutputStream(compressedOut, BUFFER_SIZE); return new OutputStreamStreamOutput(compressedOut) { - private boolean closed = false; - + final AtomicBoolean closed = new AtomicBoolean(false); public void close() throws IOException { try { super.close(); } finally { - if (closed == false) { + if (closed.compareAndSet(false, true)) { // important to release native memory deflater.end(); - closed = true; } } } }; } - - @Override - public boolean isCompressed(IndexInput in) throws IOException { - return false; - } - - @Override - public CompressedIndexInput indexInput(IndexInput in) throws IOException { - throw new UnsupportedOperationException(); - } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index c2333ff177d..48be9f64d5c 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -335,7 +335,7 @@ public class PublishClusterStateAction extends AbstractComponent { public static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException { BytesStreamOutput bStream = new BytesStreamOutput(); - try (StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream)) { + try (StreamOutput stream = CompressorFactory.COMPRESSOR.streamOutput(bStream)) { stream.setVersion(nodeVersion); stream.writeBoolean(true); clusterState.writeTo(stream); @@ -345,7 +345,7 @@ public class PublishClusterStateAction extends AbstractComponent { public static BytesReference serializeDiffClusterState(Diff diff, Version nodeVersion) throws IOException { BytesStreamOutput bStream = new BytesStreamOutput(); - try (StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream)) { + try (StreamOutput stream = CompressorFactory.COMPRESSOR.streamOutput(bStream)) { stream.setVersion(nodeVersion); stream.writeBoolean(false); diff.writeTo(stream); diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 6cea34cf679..37df2ddfb90 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -195,7 +195,7 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm protected BytesReference write(T obj) throws IOException { try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { if (compress) { - try (StreamOutput compressedStreamOutput = CompressorFactory.defaultCompressor().streamOutput(bytesStreamOutput)) { + try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { write(obj, compressedStreamOutput); } } else { diff --git a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java index e45635e3349..55793384b1b 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport.netty; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.ChannelBufferBytesReference; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; @@ -110,7 +111,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) { Compressor compressor; try { - compressor = CompressorFactory.compressor(buffer); + compressor = CompressorFactory.compressor(new ChannelBufferBytesReference(buffer)); } catch (NotCompressedException ex) { int maxToRead = Math.min(buffer.readableBytes(), 10); int offset = buffer.readerIndex(); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index c9f02066836..53eb63c86c5 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -886,7 +886,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem // the header part is compressed, and the "body" can't be extracted as compressed if (options.compress() && (!(request instanceof BytesTransportRequest))) { status = TransportStatus.setCompress(status); - stream = CompressorFactory.defaultCompressor().streamOutput(stream); + stream = CompressorFactory.COMPRESSOR.streamOutput(stream); } // we pick the smallest of the 2, to support both backward and forward compatibility diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java index 03856017c36..65ea00d75e5 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java @@ -99,7 +99,7 @@ public class NettyTransportChannel implements TransportChannel { StreamOutput stream = bStream; if (options.compress()) { status = TransportStatus.setCompress(status); - stream = CompressorFactory.defaultCompressor().streamOutput(stream); + stream = CompressorFactory.COMPRESSOR.streamOutput(stream); } stream.setVersion(version); response.writeTo(stream); diff --git a/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedStreamTestCase.java b/core/src/test/java/org/elasticsearch/common/compress/DeflateCompressTests.java similarity index 98% rename from core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedStreamTestCase.java rename to core/src/test/java/org/elasticsearch/common/compress/DeflateCompressTests.java index 0e94f6eaf80..33d11aa23d8 100644 --- a/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedStreamTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/compress/DeflateCompressTests.java @@ -37,13 +37,9 @@ import java.util.concurrent.CountDownLatch; /** * Test streaming compression (e.g. used for recovery) */ -public abstract class AbstractCompressedStreamTestCase extends ESTestCase { +public class DeflateCompressTests extends ESTestCase { - private final Compressor compressor; - - protected AbstractCompressedStreamTestCase(Compressor compressor) { - this.compressor = compressor; - } + private final Compressor compressor = new DeflateCompressor(); public void testRandom() throws IOException { Random r = random(); diff --git a/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedXContentTestCase.java b/core/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java similarity index 63% rename from core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedXContentTestCase.java rename to core/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java index d1c862f8a69..72866d082ae 100644 --- a/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedXContentTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java @@ -35,13 +35,9 @@ import static org.hamcrest.Matchers.not; /** * */ -public abstract class AbstractCompressedXContentTestCase extends ESTestCase { +public class DeflateCompressedXContentTests extends ESTestCase { - private final Compressor compressor; - - protected AbstractCompressedXContentTestCase(Compressor compressor) { - this.compressor = compressor; - } + private final Compressor compressor = new DeflateCompressor(); private void assertEquals(CompressedXContent s1, CompressedXContent s2) { Assert.assertEquals(s1, s2); @@ -50,38 +46,26 @@ public abstract class AbstractCompressedXContentTestCase extends ESTestCase { } public void simpleTests() throws IOException { - Compressor defaultCompressor = CompressorFactory.defaultCompressor(); - try { - CompressorFactory.setDefaultCompressor(compressor); - String str = "---\nf:this is a simple string"; - CompressedXContent cstr = new CompressedXContent(str); - assertThat(cstr.string(), equalTo(str)); - assertThat(new CompressedXContent(str), equalTo(cstr)); + String str = "---\nf:this is a simple string"; + CompressedXContent cstr = new CompressedXContent(str); + assertThat(cstr.string(), equalTo(str)); + assertThat(new CompressedXContent(str), equalTo(cstr)); - String str2 = "---\nf:this is a simple string 2"; - CompressedXContent cstr2 = new CompressedXContent(str2); - assertThat(cstr2.string(), not(equalTo(str))); - assertThat(new CompressedXContent(str2), not(equalTo(cstr))); - assertEquals(new CompressedXContent(str2), cstr2); - } finally { - CompressorFactory.setDefaultCompressor(defaultCompressor); - } + String str2 = "---\nf:this is a simple string 2"; + CompressedXContent cstr2 = new CompressedXContent(str2); + assertThat(cstr2.string(), not(equalTo(str))); + assertThat(new CompressedXContent(str2), not(equalTo(cstr))); + assertEquals(new CompressedXContent(str2), cstr2); } public void testRandom() throws IOException { - Compressor defaultCompressor = CompressorFactory.defaultCompressor(); - try { - CompressorFactory.setDefaultCompressor(compressor); - Random r = random(); - for (int i = 0; i < 1000; i++) { - String string = TestUtil.randomUnicodeString(r, 10000); - // hack to make it detected as YAML - string = "---\n" + string; - CompressedXContent compressedXContent = new CompressedXContent(string); - assertThat(compressedXContent.string(), equalTo(string)); - } - } finally { - CompressorFactory.setDefaultCompressor(defaultCompressor); + Random r = random(); + for (int i = 0; i < 1000; i++) { + String string = TestUtil.randomUnicodeString(r, 10000); + // hack to make it detected as YAML + string = "---\n" + string; + CompressedXContent compressedXContent = new CompressedXContent(string); + assertThat(compressedXContent.string(), equalTo(string)); } } diff --git a/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateCompressedStreamTests.java b/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateCompressedStreamTests.java deleted file mode 100644 index a6d33585dbc..00000000000 --- a/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateCompressedStreamTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.compress.deflate; - -import org.elasticsearch.common.compress.AbstractCompressedStreamTestCase; - -public class DeflateCompressedStreamTests extends AbstractCompressedStreamTestCase { - - public DeflateCompressedStreamTests() { - super(new DeflateCompressor()); - } - -} diff --git a/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java b/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java deleted file mode 100644 index 359a582e169..00000000000 --- a/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.compress.deflate; - -import org.elasticsearch.common.compress.AbstractCompressedXContentTestCase; - -public class DeflateXContentTests extends AbstractCompressedXContentTestCase { - - public DeflateXContentTests() { - super(new DeflateCompressor()); - } - -} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java index 7be0cc8031b..fc8e2ba1872 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java @@ -76,7 +76,7 @@ public class BinaryMappingTests extends ESSingleNodeTestCase { // case 2: a value that looks compressed: this used to fail in 1.x BytesStreamOutput out = new BytesStreamOutput(); - try (StreamOutput compressed = CompressorFactory.defaultCompressor().streamOutput(out)) { + try (StreamOutput compressed = CompressorFactory.COMPRESSOR.streamOutput(out)) { new BytesArray(binaryValue1).writeTo(compressed); } final byte[] binaryValue2 = out.bytes().toBytes(); diff --git a/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java index b2b9e780205..e1589b4cd2f 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java @@ -138,7 +138,7 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { private BytesReference write(T obj) throws IOException { try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { if (compress) { - try (StreamOutput compressedStreamOutput = CompressorFactory.defaultCompressor().streamOutput(bytesStreamOutput)) { + try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { write(obj, compressedStreamOutput); } } else { From 44ee56c07367fe97585f4b933b97e464938fd168 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 28 Jun 2016 19:05:52 +0100 Subject: [PATCH 22/43] Added documentation for aggregation profiling --- docs/reference/search/profile.asciidoc | 216 +++++++++++++++++++------ 1 file changed, 166 insertions(+), 50 deletions(-) diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 25820d04800..62e04e669a1 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -4,25 +4,12 @@ experimental[] The Profile API provides detailed timing information about the execution of individual components -in a query. It gives the user insight into how queries are executed at a low level so that -the user can understand why certain queries are slow, and take steps to improve their slow queries. +in a search request. It gives the user insight into how search requests are executed at a low level so that +the user can understand why certain requests are slow, and take steps to improve them. -The output from the Profile API is *very* verbose, especially for complicated queries executed across +The output from the Profile API is *very* verbose, especially for complicated requests executed across many shards. Pretty-printing the response is recommended to help understand the output -[NOTE] -======================================= -The details provided by the Profile API directly expose Lucene class names and concepts, which means -that complete interpretation of the results require fairly advanced knowledge of Lucene. This -page attempts to give a crash-course in how Lucene executes queries so that you can use the Profile API to successfully -diagnose and debug queries, but it is only an overview. For complete understanding, please refer -to Lucene's documentation and, in places, the code. - -With that said, a complete understanding is often not required to fix a slow query. It is usually -sufficient to see that a particular component of a query is slow, and not necessarily understand why -the `advance` phase of that query is the cause, for example. -======================================= - [float] === Usage @@ -35,7 +22,7 @@ curl -XGET 'localhost:9200/_search' -d '{ "query" : { "match" : { "message" : "search test" } } -} +}' -------------------------------------------------- <1> Setting the top-level `profile` parameter to `true` will enable profiling for the search @@ -141,7 +128,8 @@ First, the overall structure of the profile response is as follows: "rewrite_time": 185002, <3> "collector": [...] <4> } - ] + ], + "aggregations": [...] <5> } ] } @@ -152,6 +140,7 @@ by a unique ID <2> Each profile contains a section which holds details about the query execution <3> Each profile has a single time representing the cumulative rewrite time <4> Each profile also contains a section about the Lucene Collectors which run the search +<5> Each profile contains a section which holds the details about the aggregation execution Because a search request may be executed against one or more shards in an index, and a search may cover one or more indices, the top level element in the profile response is an array of `shard` objects. @@ -164,12 +153,26 @@ But occasionally multiple searches will be executed, such as including a global a secondary "match_all" query for the global context). Inside each `search` object there will be two arrays of profiled information: -a `query` array and a `collector` array. In the future, more sections may be added, such as `suggest`, `highlight`, -`aggregations`, etc +a `query` array and a `collector` array. Alongside the `search` object is an `aggregations` object that contains the profile information for the aggregations. In the future, more sections may be added, such as `suggest`, `highlight`, etc There will also be a `rewrite` metric showing the total time spent rewriting the query (in nanoseconds). -=== `query` Section +=== Profiling Queries + +[NOTE] +======================================= +The details provided by the Profile API directly expose Lucene class names and concepts, which means +that complete interpretation of the results require fairly advanced knowledge of Lucene. This +page attempts to give a crash-course in how Lucene executes queries so that you can use the Profile API to successfully +diagnose and debug queries, but it is only an overview. For complete understanding, please refer +to Lucene's documentation and, in places, the code. + +With that said, a complete understanding is often not required to fix a slow query. It is usually +sufficient to see that a particular component of a query is slow, and not necessarily understand why +the `advance` phase of that query is the cause, for example. +======================================= + +==== `query` Section The `query` section contains detailed timing of the query tree executed by Lucene on a particular shard. The overall structure of this query tree will resemble your original Elasticsearch query, but may be slightly @@ -217,9 +220,9 @@ that in a moment. Finally, the `"children"` array lists any sub-queries that ma values ("search test"), our BooleanQuery holds two children TermQueries. They have identical information (type, time, breakdown, etc). Children are allowed to have their own children. -==== Timing Breakdown +===== Timing Breakdown -The `breakdown` component lists detailed timing statistics about low-level Lucene execution: +The `"breakdown"` component lists detailed timing statistics about low-level Lucene execution: [source,js] -------------------------------------------------- @@ -235,14 +238,14 @@ The `breakdown` component lists detailed timing statistics about low-level Lucen -------------------------------------------------- Timings are listed in wall-clock nanoseconds and are not normalized at all. All caveats about the overall -`time` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Lucene is +`"time"` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Lucene is actually eating time, and B) the magnitude of differences in times between the various components. Like the overall time, the breakdown is inclusive of all children times. The meaning of the stats are as follows: [float] -=== All parameters: +==== All parameters: [horizontal] `create_weight`:: @@ -303,7 +306,7 @@ The meaning of the stats are as follows: This records the time taken to score a particular document via it's Scorer -=== `collectors` Section +==== `collectors` Section The Collectors portion of the response shows high-level execution details. Lucene works by defining a "Collector" which is responsible for coordinating the traversal, scoring and collection of matching documents. Collectors @@ -379,7 +382,7 @@ For reference, the various collector reason's are: -=== `rewrite` Section +==== `rewrite` Section All queries in Lucene undergo a "rewriting" process. A query (and its sub-queries) may be rewritten one or more times, and the process continues until the query stops changing. This process allows Lucene to perform @@ -391,7 +394,7 @@ The rewriting process is complex and difficult to display, since queries can cha showing the intermediate results, the total rewrite time is simply displayed as a value (in nanoseconds). This value is cumulative and contains the total time for all queries being rewritten. -=== A more complex example +==== A more complex example To demonstrate a slightly more complex query and the associated results, we can profile the following query: @@ -563,28 +566,7 @@ The Collector tree is fairly straightforward, showing how a single MultiCollecto to execute the post_filter (and in turn wraps the normal scoring SimpleCollector), a BucketCollector to run all scoped aggregations. In the MatchAll search, there is a single GlobalAggregator to run the global aggregation. -=== Performance Notes - -Like any profiler, the Profile API introduce a non-negligible overhead to query execution. The act of instrumenting -low-level method calls such as `advance` and `next_doc` can be fairly expensive, since these methods are called -in tight loops. Therefore, profiling should not be enabled in production settings by default, and should not -be compared against non-profiled query times. Profiling is just a diagnostic tool. - -There are also cases where special Lucene optimizations are disabled, since they are not amenable to profiling. This -could cause some queries to report larger relative times than their non-profiled counterparts, but in general should -not have a drastic effect compared to other components in the profiled query. - -=== Limitations - -- Profiling statistics are currently not available for suggestions, highlighting, `dfs_query_then_fetch` -- Detailed breakdown for aggregations is not currently available past the high-level overview provided -from the Collectors -- The Profiler is still highly experimental. The Profiler is instrumenting parts of Lucene that were -never designed to be exposed in this manner, and so all results should be viewed as a best effort to provide detailed -diagnostics. We hope to improve this over time. If you find obviously wrong numbers, strange query structures or -other bugs, please report them! - -=== Understanding MultiTermQuery output +==== Understanding MultiTermQuery output A special note needs to be made about the `MultiTermQuery` class of queries. This includes wildcards, regex and fuzzy queries. These queries emit very verbose responses, and are not overly structured. @@ -602,3 +584,137 @@ just not the physical layout in the response, so it is sufficient to just analyz ignore it's children if you find the details too tricky to interpret. Hopefully this will be fixed in future iterations, but it is a tricky problem to solve and still in-progress :) + +=== Profiling Aggregations + +==== `aggregations` Section + +The `aggregations` section contains detailed timing of the aggregation tree executed by a particular shard. +The overall structure of this aggregation tree will resemble your original Elasticsearch request. Let's consider +the following example aggregations request: + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/house-prices/_search" -d' +{ + "profile": true, + "size": 0, + "aggs": { + "property_type": { + "terms": { + "field": "propertyType" + }, + "aggs": { + "avg_price": { + "avg": { + "field": "price" + } + } + } + } + } +}' +-------------------------------------------------- + +Which yields the following aggregation profile output + +[source,js] +-------------------------------------------------- +"aggregations": [ + { + "type": "org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator", + "description": "property_type", + "time": "4280.456978ms", + "breakdown": { + "reduce": 0, + "build_aggregation": 49765, + "initialise": 52785, + "collect": 3155490036 + }, + "children": [ + { + "type": "org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator", + "description": "avg_price", + "time": "1124.864392ms", + "breakdown": { + "reduce": 0, + "build_aggregation": 1394, + "initialise": 2883, + "collect": 1124860115 + } + } + ] + } +] +-------------------------------------------------- + +From the profile structure we can see our `property_type` terms aggregation which is internally represented by the +`GlobalOrdinalsStringTermsAggregator` class and the sub aggregator `avg_price` which is internally represented by the `AvgAggregator` class. The `type` field displays the class used internally to represent the aggregation. The `description` field displays the name of the aggregation. + +The `"time"` field shows that it took ~4 seconds for the entire aggregation to execute. The recorded time is inclusive +of all children. + +The `"breakdown"` field will give detailed stats about how the time was spent, we'll look at +that in a moment. Finally, the `"children"` array lists any sub-aggregations that may be present. Because we have an `avg_price` aggregation as a sub-aggregation to the `property_type` aggregation we see it listed as a child of the `property_type` aggregation. the two aggregation outputs have identical information (type, time, +breakdown, etc). Children are allowed to have their own children. + +===== Timing Breakdown + +The `"breakdown"` component lists detailed timing statistics about low-level Lucene execution: + +[source,js] +-------------------------------------------------- +"breakdown": { + "reduce": 0, + "build_aggregation": 49765, + "initialise": 52785, + "collect": 3155490036 +} +-------------------------------------------------- + +Timings are listed in wall-clock nanoseconds and are not normalized at all. All caveats about the overall +`time` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Elasticsearch is +actually eating time, and B) the magnitude of differences in times between the various components. Like the overall time, +the breakdown is inclusive of all children times. + +The meaning of the stats are as follows: + +[float] +==== All parameters: + +[horizontal] +`initialise`:: + + This times how long it takes to create and initialise the aggregation before starting to collect documents. + +`collect`:: + + This represents the cumulative time spent in the collect phase of the aggregation. This is where matching documents are passed to the aggregation and the state of the aggregator is updated based on the information contained in the documents. + +`build_aggregation`:: + + This represents the time spent creating the shard level results of the aggregation ready to pass back to the reducing node after the collection of documents is finished. + +`reduce`:: + + This is not currently used and will always report `0`. Currently aggregation profiling only times the shard level parts of the aggregation execution. Timing of the reduce phase will be added later. + +=== Performance Notes + +Like any profiler, the Profile API introduces a non-negligible overhead to search execution. The act of instrumenting +low-level method calls such as `collect`, `advance` and `next_doc` can be fairly expensive, since these methods are called +in tight loops. Therefore, profiling should not be enabled in production settings by default, and should not +be compared against non-profiled query times. Profiling is just a diagnostic tool. + +There are also cases where special Lucene optimizations are disabled, since they are not amenable to profiling. This +could cause some queries to report larger relative times than their non-profiled counterparts, but in general should +not have a drastic effect compared to other components in the profiled query. + +=== Limitations + +- Profiling statistics are currently not available for suggestions, highlighting, `dfs_query_then_fetch` +- Profiling of the reduce phase of aggregation is currently not available +- The Profiler is still highly experimental. The Profiler is instrumenting parts of Lucene that were +never designed to be exposed in this manner, and so all results should be viewed as a best effort to provide detailed +diagnostics. We hope to improve this over time. If you find obviously wrong numbers, strange query structures or +other bugs, please report them! From 67bfecc070392a222c53627438f290a3b3c78a3e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 24 Jun 2016 13:26:46 -0400 Subject: [PATCH 23/43] Painless: add "".replaceAll and "".replaceFirst These are useful methods in groovy that give you control over the replacements used: ``` 'the quick brown fox'.replaceAll(/[aeiou]/, m -> m.group().toUpperCase(Locale.ROOT)) ``` --- .../modules/scripting/painless.asciidoc | 81 +++++++++++++++---- .../elasticsearch/painless/Augmentation.java | 44 ++++++++++ .../org/elasticsearch/painless/java.lang.txt | 2 + .../elasticsearch/painless/RegexTests.java | 56 +++++++++++++ 4 files changed, 169 insertions(+), 14 deletions(-) diff --git a/docs/reference/modules/scripting/painless.asciidoc b/docs/reference/modules/scripting/painless.asciidoc index db68db2c7a3..93fb136913b 100644 --- a/docs/reference/modules/scripting/painless.asciidoc +++ b/docs/reference/modules/scripting/painless.asciidoc @@ -237,8 +237,8 @@ POST hockey/player/_update_by_query ---------------------------------------------------------------- // CONSOLE -Or you can use the `Pattern.matcher` directory to get a `Matcher` instance and -remove all of the vowels in all of their names: +You can use the `Pattern.matcher` directly to get a `Matcher` instance and +remove all of the vowels in all of their last names: [source,js] ---------------------------------------------------------------- @@ -252,6 +252,59 @@ POST hockey/player/_update_by_query ---------------------------------------------------------------- // CONSOLE +`Matcher.replaceAll` is just a call to Java's `Matcher`'s +http://docs.oracle.com/javase/8/docs/api/java/util/regex/Matcher.html#replaceAll-java.lang.String-[replaceAll] +method so it supports `$1` and `\1` for replacements: + +[source,js] +---------------------------------------------------------------- +POST hockey/player/_update_by_query +{ + "script": { + "lang": "painless", + "inline": "ctx._source.last = /n([aeiou])/.matcher(ctx._source.last).replaceAll('$1')" + } +} +---------------------------------------------------------------- +// CONSOLE + +If you need more control over replacements you can call `replaceAll` on a +`CharSequence` with a `Function` that builds the replacement. +This does not support `$1` or `\1` to access replacements because you already +have a reference to the matcher and can get them with `m.group(1)`. + +IMPORTANT: Calling `Matcher.find` inside of the function that builds the +replacement is rude and will likely break the replacement process. + +This will make all of the vowels in the hockey player's last names upper case: + +[source,js] +---------------------------------------------------------------- +POST hockey/player/_update_by_query +{ + "script": { + "lang": "painless", + "inline": "ctx._source.last = ctx._source.last.replaceAll(/[aeiou]/, m -> m.group().toUpperCase(Locale.ROOT))" + } +} +---------------------------------------------------------------- +// CONSOLE + +Or you can use the `CharSequence.replaceFirst` to make the first vowel in their +last names upper case: + +[source,js] +---------------------------------------------------------------- +POST hockey/player/_update_by_query +{ + "script": { + "lang": "painless", + "inline": "ctx._source.last = ctx._source.last.replaceFirst(/[aeiou]/, m -> m.group().toUpperCase(Locale.ROOT))" + } +} +---------------------------------------------------------------- +// CONSOLE + Note: all of the `_update_by_query` examples above could really do with a `query` to limit the data that they pull back. While you *could* use a @@ -265,18 +318,18 @@ documents that they have to check. The following Java packages are available for use in the Painless language: -* https://docs.oracle.com/javase/8/docs/api/java/lang/package-summary.html[java.lang] -* https://docs.oracle.com/javase/8/docs/api/java/math/package-summary.html[java.math] -* https://docs.oracle.com/javase/8/docs/api/java/text/package-summary.html[java.text] -* https://docs.oracle.com/javase/8/docs/api/java/time/package-summary.html[java.time] -* https://docs.oracle.com/javase/8/docs/api/java/time/chrono/package-summary.html[java.time.chrono] -* https://docs.oracle.com/javase/8/docs/api/java/time/format/package-summary.html[java.time.format] -* https://docs.oracle.com/javase/8/docs/api/java/time/temporal/package-summary.html[java.time.temporal] -* https://docs.oracle.com/javase/8/docs/api/java/time/zone/package-summary.html[java.time.zone] -* https://docs.oracle.com/javase/8/docs/api/java/util/package-summary.html[java.util] -* https://docs.oracle.com/javase/8/docs/api/java/util/function/package-summary.html[java.util.function] -* https://docs.oracle.com/javase/8/docs/api/java/util/regex/package-summary.html[java.util.regex] -* https://docs.oracle.com/javase/8/docs/api/java/util/stream/package-summary.html[java.util.stream] +* https://docs.oracle.com/javase/8/docs/api/java/lang/package-summary.html[java.lang] +* https://docs.oracle.com/javase/8/docs/api/java/math/package-summary.html[java.math] +* https://docs.oracle.com/javase/8/docs/api/java/text/package-summary.html[java.text] +* https://docs.oracle.com/javase/8/docs/api/java/time/package-summary.html[java.time] +* https://docs.oracle.com/javase/8/docs/api/java/time/chrono/package-summary.html[java.time.chrono] +* https://docs.oracle.com/javase/8/docs/api/java/time/format/package-summary.html[java.time.format] +* https://docs.oracle.com/javase/8/docs/api/java/time/temporal/package-summary.html[java.time.temporal] +* https://docs.oracle.com/javase/8/docs/api/java/time/zone/package-summary.html[java.time.zone] +* https://docs.oracle.com/javase/8/docs/api/java/util/package-summary.html[java.util] +* https://docs.oracle.com/javase/8/docs/api/java/util/function/package-summary.html[java.util.function] +* https://docs.oracle.com/javase/8/docs/api/java/util/regex/package-summary.html[java.util.regex] +* https://docs.oracle.com/javase/8/docs/api/java/util/stream/package-summary.html[java.util.stream] Note that unsafe classes and methods are not included, there is no support for: diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Augmentation.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Augmentation.java index edd0eeab67f..9302f3c899c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Augmentation.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Augmentation.java @@ -34,6 +34,7 @@ import java.util.function.ObjIntConsumer; import java.util.function.Predicate; import java.util.function.ToDoubleFunction; import java.util.regex.Matcher; +import java.util.regex.Pattern; /** Additional methods added to classes. These must be static methods with receiver as first argument */ public class Augmentation { @@ -442,4 +443,47 @@ public class Augmentation { } return map; } + + // CharSequence augmentation + /** + * Replace all matches. Similar to {@link Matcher#replaceAll(String)} but allows you to customize the replacement based on the match. + */ + public static String replaceAll(CharSequence receiver, Pattern pattern, Function replacementBuilder) { + Matcher m = pattern.matcher(receiver); + if (false == m.find()) { + // CharSequqence's toString is *supposed* to always return the characters in the sequence as a String + return receiver.toString(); + } + StringBuffer result = new StringBuffer(initialBufferForReplaceWith(receiver)); + do { + m.appendReplacement(result, Matcher.quoteReplacement(replacementBuilder.apply(m))); + } while (m.find()); + m.appendTail(result); + return result.toString(); + } + + /** + * Replace the first match. Similar to {@link Matcher#replaceFirst(String)} but allows you to customize the replacement based on the + * match. + */ + public static String replaceFirst(CharSequence receiver, Pattern pattern, Function replacementBuilder) { + Matcher m = pattern.matcher(receiver); + if (false == m.find()) { + // CharSequqence's toString is *supposed* to always return the characters in the sequence as a String + return receiver.toString(); + } + StringBuffer result = new StringBuffer(initialBufferForReplaceWith(receiver)); + m.appendReplacement(result, Matcher.quoteReplacement(replacementBuilder.apply(m))); + m.appendTail(result); + return result.toString(); + } + + /** + * The initial size of the {@link StringBuilder} used for {@link #replaceFirst(CharSequence, Pattern, Function)} and + * {@link #replaceAll(CharSequence, Pattern, Function)} for a particular sequence. We ape + * {{@link StringBuilder#StringBuilder(CharSequence)} here and add 16 extra chars to the buffer to have a little room for growth. + */ + private static int initialBufferForReplaceWith(CharSequence seq) { + return seq.length() + 16; + } } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt index 17016f1d318..13f28d3ebeb 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/java.lang.txt @@ -36,6 +36,8 @@ class CharSequence -> java.lang.CharSequence { IntStream chars() IntStream codePoints() int length() + String replaceAll*(Pattern,Function) + String replaceFirst*(Pattern,Function) CharSequence subSequence(int,int) String toString() } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java index e255a776bed..615dec67dc4 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.painless; +import java.nio.CharBuffer; import java.util.Arrays; import java.util.HashSet; import java.util.regex.Pattern; @@ -175,6 +176,61 @@ public class RegexTests extends ScriptTestCase { assertEquals(Pattern.CANON_EQ | Pattern.CASE_INSENSITIVE | Pattern.UNICODE_CASE | Pattern.COMMENTS, exec("/./ciux.flags()")); } + public void testReplaceAllMatchesString() { + assertEquals("thE qUIck brOwn fOx", exec("'the quick brown fox'.replaceAll(/[aeiou]/, m -> m.group().toUpperCase(Locale.ROOT))")); + } + + public void testReplaceAllMatchesCharSequence() { + CharSequence charSequence = CharBuffer.wrap("the quick brown fox"); + assertEquals("thE qUIck brOwn fOx", + exec("params.a.replaceAll(/[aeiou]/, m -> m.group().toUpperCase(Locale.ROOT))", singletonMap("a", charSequence))); + } + + public void testReplaceAllNoMatchString() { + assertEquals("i am cat", exec("'i am cat'.replaceAll(/dolphin/, m -> m.group().toUpperCase(Locale.ROOT))")); + } + + public void testReplaceAllNoMatchCharSequence() { + CharSequence charSequence = CharBuffer.wrap("i am cat"); + assertEquals("i am cat", + exec("params.a.replaceAll(/dolphin/, m -> m.group().toUpperCase(Locale.ROOT))", singletonMap("a", charSequence))); + } + + public void testReplaceAllQuoteReplacement() { + assertEquals("th/E q/U/Ick br/Own f/Ox", + exec("'the quick brown fox'.replaceAll(/[aeiou]/, m -> '/' + m.group().toUpperCase(Locale.ROOT))")); + assertEquals("th$E q$U$Ick br$Own f$Ox", + exec("'the quick brown fox'.replaceAll(/[aeiou]/, m -> '$' + m.group().toUpperCase(Locale.ROOT))")); + } + + public void testReplaceFirstMatchesString() { + assertEquals("thE quick brown fox", + exec("'the quick brown fox'.replaceFirst(/[aeiou]/, m -> m.group().toUpperCase(Locale.ROOT))")); + } + + public void testReplaceFirstMatchesCharSequence() { + CharSequence charSequence = CharBuffer.wrap("the quick brown fox"); + assertEquals("thE quick brown fox", + exec("params.a.replaceFirst(/[aeiou]/, m -> m.group().toUpperCase(Locale.ROOT))", singletonMap("a", charSequence))); + } + + public void testReplaceFirstNoMatchString() { + assertEquals("i am cat", exec("'i am cat'.replaceFirst(/dolphin/, m -> m.group().toUpperCase(Locale.ROOT))")); + } + + public void testReplaceFirstNoMatchCharSequence() { + CharSequence charSequence = CharBuffer.wrap("i am cat"); + assertEquals("i am cat", + exec("params.a.replaceFirst(/dolphin/, m -> m.group().toUpperCase(Locale.ROOT))", singletonMap("a", charSequence))); + } + + public void testReplaceFirstQuoteReplacement() { + assertEquals("th/E quick brown fox", + exec("'the quick brown fox'.replaceFirst(/[aeiou]/, m -> '/' + m.group().toUpperCase(Locale.ROOT))")); + assertEquals("th$E quick brown fox", + exec("'the quick brown fox'.replaceFirst(/[aeiou]/, m -> '$' + m.group().toUpperCase(Locale.ROOT))")); + } + public void testCantUsePatternCompile() { IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> { exec("Pattern.compile('aa')"); From 6590e77c1a2c07c2627471fbbaff81381bb21d0e Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 28 Jun 2016 16:08:22 -0700 Subject: [PATCH 24/43] Plugins: Make plugins closeable This change allows Plugin implementions to implement Closeable when they have resources that should be released. As a first example of how this can be used, I switched over ingest plugins, which just had the geoip processor. The ingest framework had chains of closeable to support this, which is now removed. --- .../elasticsearch/ingest/IngestService.java | 8 +------ .../elasticsearch/ingest/PipelineStore.java | 9 +------- .../ingest/ProcessorsRegistry.java | 22 ++++--------------- .../java/org/elasticsearch/node/Node.java | 1 + .../node/service/NodeService.java | 1 - .../ingest/geoip/GeoIpProcessor.java | 7 +----- .../ingest/geoip/IngestGeoIpPlugin.java | 18 +++++++++++++-- 7 files changed, 24 insertions(+), 42 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/ingest/IngestService.java b/core/src/main/java/org/elasticsearch/ingest/IngestService.java index 228de5b37bc..ce7d6dadee5 100644 --- a/core/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/core/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -33,7 +33,7 @@ import java.util.Map; /** * Holder class for several ingest related services. */ -public class IngestService implements Closeable { +public class IngestService { private final PipelineStore pipelineStore; private final PipelineExecutionService pipelineExecutionService; @@ -65,10 +65,4 @@ public class IngestService implements Closeable { } return new IngestInfo(processorInfoList); } - - @Override - public void close() throws IOException { - pipelineStore.close(); - } - } diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java index f96a7179656..3118d2b5076 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java @@ -47,7 +47,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -public class PipelineStore extends AbstractComponent implements Closeable, ClusterStateListener { +public class PipelineStore extends AbstractComponent implements ClusterStateListener { private final Pipeline.Factory factory = new Pipeline.Factory(); private ProcessorsRegistry processorRegistry; @@ -67,13 +67,6 @@ public class PipelineStore extends AbstractComponent implements Closeable, Clust this.processorRegistry = processorsRegistryBuilder.build(scriptService, clusterService); } - @Override - public void close() throws IOException { - // TODO: When org.elasticsearch.node.Node can close Closable instances we should try to remove this code, - // since any wired closable should be able to close itself - processorRegistry.close(); - } - @Override public void clusterChanged(ClusterChangedEvent event) { innerUpdatePipelines(event.state()); diff --git a/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java b/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java index 28abfc45313..dd16898aea2 100644 --- a/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java +++ b/core/src/main/java/org/elasticsearch/ingest/ProcessorsRegistry.java @@ -19,20 +19,17 @@ package org.elasticsearch.ingest; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.script.ScriptService; - import java.io.Closeable; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.function.Function; -public final class ProcessorsRegistry implements Closeable { +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.script.ScriptService; + +public final class ProcessorsRegistry { private final Map processorFactories; private final TemplateService templateService; @@ -67,17 +64,6 @@ public final class ProcessorsRegistry implements Closeable { return processorFactories.get(name); } - @Override - public void close() throws IOException { - List closeables = new ArrayList<>(); - for (Processor.Factory factory : processorFactories.values()) { - if (factory instanceof Closeable) { - closeables.add((Closeable) factory); - } - } - IOUtils.close(closeables); - } - // For testing: Map getProcessorFactories() { return processorFactories; diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 675d29e9991..211ff9ffe65 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -552,6 +552,7 @@ public class Node implements Closeable { toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getName() + ")")); toClose.add(injector.getInstance(plugin)); } + toClose.addAll(pluginsService.filterPlugins(Closeable.class)); toClose.add(() -> stopWatch.stop().start("script")); toClose.add(injector.getInstance(ScriptService.class)); diff --git a/core/src/main/java/org/elasticsearch/node/service/NodeService.java b/core/src/main/java/org/elasticsearch/node/service/NodeService.java index ed03dc9375c..0920a84bfd6 100644 --- a/core/src/main/java/org/elasticsearch/node/service/NodeService.java +++ b/core/src/main/java/org/elasticsearch/node/service/NodeService.java @@ -199,7 +199,6 @@ public class NodeService extends AbstractComponent implements Closeable { @Override public void close() throws IOException { - ingestService.close(); indicesService.close(); } } diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index c29bf4aa65d..0b18cae25e7 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -217,7 +217,7 @@ public final class GeoIpProcessor extends AbstractProcessor { return geoData; } - public static final class Factory extends AbstractProcessorFactory implements Closeable { + public static final class Factory extends AbstractProcessorFactory { static final Set DEFAULT_CITY_PROPERTIES = EnumSet.of( Property.CONTINENT_NAME, Property.COUNTRY_ISO_CODE, Property.REGION_NAME, Property.CITY_NAME, Property.LOCATION @@ -267,11 +267,6 @@ public final class GeoIpProcessor extends AbstractProcessor { return new GeoIpProcessor(processorTag, ipField, databaseReader, targetField, properties); } - - @Override - public void close() throws IOException { - IOUtils.close(databaseReaders.values()); - } } // Geoip2's AddressNotFoundException is checked and due to the fact that we need run their code diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 8339ff1fc60..d814ae46bea 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -20,9 +20,11 @@ package org.elasticsearch.ingest.geoip; import com.maxmind.geoip2.DatabaseReader; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.node.NodeModule; import org.elasticsearch.plugins.Plugin; +import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.nio.file.Files; @@ -36,11 +38,16 @@ import java.util.Map; import java.util.stream.Stream; import java.util.zip.GZIPInputStream; -public class IngestGeoIpPlugin extends Plugin { +public class IngestGeoIpPlugin extends Plugin implements Closeable { + + private Map databaseReaders; public void onModule(NodeModule nodeModule) throws IOException { + if (databaseReaders != null) { + throw new IllegalStateException("called onModule twice for geoip plugin!!"); + } Path geoIpConfigDirectory = nodeModule.getNode().getEnvironment().configFile().resolve("ingest-geoip"); - Map databaseReaders = loadDatabaseReaders(geoIpConfigDirectory); + databaseReaders = loadDatabaseReaders(geoIpConfigDirectory); nodeModule.registerProcessor(GeoIpProcessor.TYPE, (registry) -> new GeoIpProcessor.Factory(databaseReaders)); } @@ -65,4 +72,11 @@ public class IngestGeoIpPlugin extends Plugin { } return Collections.unmodifiableMap(databaseReaders); } + + @Override + public void close() throws IOException { + if (databaseReaders != null) { + IOUtils.close(databaseReaders.values()); + } + } } From b97ea9954c1ff975e848cdfcafeb49cc73a5be26 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 28 Jun 2016 11:21:08 +0200 Subject: [PATCH 25/43] percolator: Use RamDirectory for percolating nested document instead of using multiple MemoryIndex instances with SlowCompositeReaderWrapper workaround --- .../percolator/PercolateQueryBuilder.java | 29 +++++++------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 8ce12c1f7e6..3ce3726aa5e 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -22,11 +22,11 @@ package org.elasticsearch.percolator; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.index.Term; import org.apache.lucene.index.memory.MemoryIndex; @@ -36,6 +36,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.Weight; +import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; @@ -62,7 +63,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; @@ -74,7 +74,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import java.io.IOException; -import java.util.List; import java.util.Objects; import java.util.Optional; @@ -454,19 +453,13 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder docs = doc.docs(); - int rootDocIndex = docs.size() - 1; - assert rootDocIndex > 0; - for (int i = 0; i < docs.size(); i++) { - ParseContext.Document d = docs.get(i); - MemoryIndex memoryIndex = MemoryIndex.fromDocument(d, analyzer, true, false); - memoryIndices[i] = memoryIndex.createSearcher().getIndexReader(); - } - try { - MultiReader mReader = new MultiReader(memoryIndices, true); - LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); - final IndexSearcher slowSearcher = new IndexSearcher(slowReader) { + RAMDirectory ramDirectory = new RAMDirectory(); + try (IndexWriter indexWriter = new IndexWriter(ramDirectory, new IndexWriterConfig(analyzer))) { + indexWriter.addDocuments(doc.docs()); + indexWriter.commit(); + DirectoryReader directoryReader = DirectoryReader.open(ramDirectory); + assert directoryReader.leaves().size() == 1 : "Expected single leaf, but got [" + directoryReader.leaves().size() + "]"; + final IndexSearcher slowSearcher = new IndexSearcher(directoryReader) { @Override public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { From 4820d491209fd848ddb9bb307b8b1a7c90c676e7 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Tue, 14 Jun 2016 15:14:54 +0200 Subject: [PATCH 26/43] Mustache: Add util functions to render JSON and join array values This pull request adds two util functions to the Mustache templating engine: - {{#toJson}}my_map{{/toJson}} to render a Map parameter as a JSON string - {{#join}}my_iterable{{/join}} to render any iterable (including arrays) as a comma separated list of values like `1, 2, 3`. It's also possible de change the default delimiter (comma) to something else. closes #18970 --- .../reference/search/search-template.asciidoc | 123 ++++++++ .../mustache/CustomMustacheFactory.java | 279 ++++++++++++++++++ .../mustache/JsonEscapingMustacheFactory.java | 41 --- .../mustache/MustacheScriptEngineService.java | 33 +-- .../mustache/NoneEscapingMustacheFactory.java | 40 --- .../mustache/MustacheScriptEngineTests.java | 11 +- .../script/mustache/MustacheTests.java | 224 +++++++++++++- 7 files changed, 635 insertions(+), 116 deletions(-) create mode 100644 modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java delete mode 100644 modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java delete mode 100644 modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/NoneEscapingMustacheFactory.java diff --git a/docs/reference/search/search-template.asciidoc b/docs/reference/search/search-template.asciidoc index 8533984428b..359b692f528 100644 --- a/docs/reference/search/search-template.asciidoc +++ b/docs/reference/search/search-template.asciidoc @@ -89,6 +89,89 @@ which is rendered as: } ------------------------------------------ + +[float] +===== Concatenating array of values + +The `{{#join}}array{{/join}}` function can be used to concatenate the +values of an array as a comma delimited string: + +[source,js] +------------------------------------------ +GET /_search/template +{ + "inline": { + "query": { + "match": { + "emails": "{{#join}}emails{{/join}}" + } + } + }, + "params": { + "emails": [ "username@email.com", "lastname@email.com" ] + } +} +------------------------------------------ + +which is rendered as: + +[source,js] +------------------------------------------ +{ + "query" : { + "match" : { + "emails" : "username@email.com,lastname@email.com" + } + } +} +------------------------------------------ + +The function also accepts a custom delimiter: + +[source,js] +------------------------------------------ +GET /_search/template +{ + "inline": { + "query": { + "range": { + "born": { + "gte" : "{{date.min}}", + "lte" : "{{date.max}}", + "format": "{{#join delimiter='||'}}date.formats{{/join delimiter='||'}}" + } + } + } + }, + "params": { + "date": { + "min": "2016", + "max": "31/12/2017", + "formats": ["dd/MM/yyyy", "yyyy"] + } + } +} +------------------------------------------ + +which is rendered as: + +[source,js] +------------------------------------------ +{ + "query" : { + "range" : { + "born" : { + "gte" : "2016", + "lte" : "31/12/2017", + "format" : "dd/MM/yyyy||yyyy" + } + } + } +} + +------------------------------------------ + + [float] ===== Default values @@ -140,6 +223,46 @@ for `end`: } ------------------------------------------ +[float] +===== Converting parameters to JSON + +The `{{toJson}}parameter{{/toJson}}` function can be used to convert parameters +like maps and array to their JSON representation: + +[source,js] +------------------------------------------ +{ + "inline": "{\"query\":{\"bool\":{\"must\": {{#toJson}}clauses{{/toJson}} }}}", + "params": { + "clauses": [ + { "term": "foo" }, + { "term": "bar" } + ] + } +} +------------------------------------------ + +which is rendered as: + +[source,js] +------------------------------------------ +{ + "query" : { + "bool" : { + "must" : [ + { + "term" : "foo" + }, + { + "term" : "bar" + } + ] + } + } +} +------------------------------------------ + + [float] ===== Conditional clauses diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java new file mode 100644 index 00000000000..ceb187bcc63 --- /dev/null +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java @@ -0,0 +1,279 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.mustache; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; +import com.github.mustachejava.Code; +import com.github.mustachejava.DefaultMustacheFactory; +import com.github.mustachejava.DefaultMustacheVisitor; +import com.github.mustachejava.Mustache; +import com.github.mustachejava.MustacheException; +import com.github.mustachejava.MustacheVisitor; +import com.github.mustachejava.TemplateContext; +import com.github.mustachejava.codes.IterableCode; +import com.github.mustachejava.codes.WriteCode; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.io.StringWriter; +import java.io.Writer; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.StringJoiner; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class CustomMustacheFactory extends DefaultMustacheFactory { + + private final BiConsumer encoder; + + public CustomMustacheFactory(boolean escaping) { + super(); + setObjectHandler(new CustomReflectionObjectHandler()); + if (escaping) { + this.encoder = new JsonEscapeEncoder(); + } else { + this.encoder = new NoEscapeEncoder(); + } + } + + @Override + public void encode(String value, Writer writer) { + encoder.accept(value, writer); + } + + @Override + public MustacheVisitor createMustacheVisitor() { + return new CustomMustacheVisitor(this); + } + + class CustomMustacheVisitor extends DefaultMustacheVisitor { + + public CustomMustacheVisitor(DefaultMustacheFactory df) { + super(df); + } + + @Override + public void iterable(TemplateContext templateContext, String variable, Mustache mustache) { + if (ToJsonCode.match(variable)) { + list.add(new ToJsonCode(templateContext, df, mustache, variable)); + } else if (JoinerCode.match(variable)) { + list.add(new JoinerCode(templateContext, df, mustache)); + } else if (CustomJoinerCode.match(variable)) { + list.add(new CustomJoinerCode(templateContext, df, mustache, variable)); + } else { + list.add(new IterableCode(templateContext, df, mustache, variable)); + } + } + } + + /** + * Base class for custom Mustache functions + */ + static abstract class CustomCode extends IterableCode { + + private final String code; + + public CustomCode(TemplateContext tc, DefaultMustacheFactory df, Mustache mustache, String code) { + super(tc, df, mustache, extractVariableName(code, mustache, tc)); + this.code = Objects.requireNonNull(code); + } + + @Override + public Writer execute(Writer writer, final List scopes) { + Object resolved = get(scopes); + writer = handle(writer, createFunction(resolved), scopes); + appendText(writer); + return writer; + } + + @Override + protected void tag(Writer writer, String tag) throws IOException { + writer.write(tc.startChars()); + writer.write(tag); + writer.write(code); + writer.write(tc.endChars()); + } + + protected abstract Function createFunction(Object resolved); + + /** + * At compile time, this function extracts the name of the variable: + * {{#toJson}}variable_name{{/toJson}} + */ + protected static String extractVariableName(String fn, Mustache mustache, TemplateContext tc) { + Code[] codes = mustache.getCodes(); + if (codes == null || codes.length != 1) { + throw new MustacheException("Mustache function [" + fn + "] must contain one and only one identifier"); + } + + try (StringWriter capture = new StringWriter()) { + // Variable name is in plain text and has type WriteCode + if (codes[0] instanceof WriteCode) { + codes[0].execute(capture, Collections.emptyList()); + return capture.toString(); + } else { + codes[0].identity(capture); + return capture.toString(); + } + } catch (IOException e) { + throw new MustacheException("Exception while parsing mustache function [" + fn + "] at line " + tc.line(), e); + } + } + } + + /** + * This function renders {@link Iterable} and {@link Map} as their JSON representation + */ + static class ToJsonCode extends CustomCode { + + private static final String CODE = "toJson"; + + public ToJsonCode(TemplateContext tc, DefaultMustacheFactory df, Mustache mustache, String variable) { + super(tc, df, mustache, CODE); + if (CODE.equalsIgnoreCase(variable) == false) { + throw new MustacheException("Mismatch function code [" + CODE + "] cannot be applied to [" + variable + "]"); + } + } + + @Override + @SuppressWarnings("unchecked") + protected Function createFunction(Object resolved) { + return s -> { + if (resolved == null) { + return null; + } + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + if (resolved == null) { + builder.nullValue(); + } else if (resolved instanceof Iterable) { + builder.startArray(); + for (Object o : (Iterable) resolved) { + builder.value(o); + } + builder.endArray(); + } else if (resolved instanceof Map) { + builder.map((Map) resolved); + } else { + // Do not handle as JSON + return oh.stringify(resolved); + } + return builder.string(); + } catch (IOException e) { + throw new MustacheException("Failed to convert object to JSON", e); + } + }; + } + + static boolean match(String variable) { + return CODE.equalsIgnoreCase(variable); + } + } + + /** + * This function concatenates the values of an {@link Iterable} using a given delimiter + */ + static class JoinerCode extends CustomCode { + + protected static final String CODE = "join"; + private static final String DEFAULT_DELIMITER = ","; + + private final String delimiter; + + public JoinerCode(TemplateContext tc, DefaultMustacheFactory df, Mustache mustache, String delimiter) { + super(tc, df, mustache, CODE); + this.delimiter = delimiter; + } + + public JoinerCode(TemplateContext tc, DefaultMustacheFactory df, Mustache mustache) { + this(tc, df, mustache, DEFAULT_DELIMITER); + } + + @Override + protected Function createFunction(Object resolved) { + return s -> { + if (s == null) { + return null; + } else if (resolved instanceof Iterable) { + StringJoiner joiner = new StringJoiner(delimiter); + for (Object o : (Iterable) resolved) { + joiner.add(oh.stringify(o)); + } + return joiner.toString(); + } + return s; + }; + } + + static boolean match(String variable) { + return CODE.equalsIgnoreCase(variable); + } + } + + static class CustomJoinerCode extends JoinerCode { + + private static final Pattern PATTERN = Pattern.compile("^(?:" + CODE + " delimiter='(.*)')$"); + + public CustomJoinerCode(TemplateContext tc, DefaultMustacheFactory df, Mustache mustache, String variable) { + super(tc, df, mustache, extractDelimiter(variable)); + } + + private static String extractDelimiter(String variable) { + Matcher matcher = PATTERN.matcher(variable); + if (matcher.find()) { + return matcher.group(1); + } + throw new MustacheException("Failed to extract delimiter for join function"); + } + + static boolean match(String variable) { + return PATTERN.matcher(variable).matches(); + } + } + + class NoEscapeEncoder implements BiConsumer { + + @Override + public void accept(String s, Writer writer) { + try { + writer.write(s); + } catch (IOException e) { + throw new MustacheException("Failed to encode value: " + s); + } + } + } + + class JsonEscapeEncoder implements BiConsumer { + + @Override + public void accept(String s, Writer writer) { + try { + writer.write(JsonStringEncoder.getInstance().quoteAsString(s)); + } catch (IOException e) { + throw new MustacheException("Failed to escape and encode value: " + s); + } + } + } +} diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java deleted file mode 100644 index 38d48b98f4e..00000000000 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.script.mustache; - -import com.fasterxml.jackson.core.io.JsonStringEncoder; -import com.github.mustachejava.DefaultMustacheFactory; -import com.github.mustachejava.MustacheException; - -import java.io.IOException; -import java.io.Writer; - -/** - * A MustacheFactory that does simple JSON escaping. - */ -final class JsonEscapingMustacheFactory extends DefaultMustacheFactory { - - @Override - public void encode(String value, Writer writer) { - try { - writer.write(JsonStringEncoder.getInstance().quoteAsString(value)); - } catch (IOException e) { - throw new MustacheException("Failed to encode value: " + value); - } - } -} diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index 2a48567333b..66ecf23fa02 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.script.mustache; -import com.github.mustachejava.DefaultMustacheFactory; import com.github.mustachejava.Mustache; +import com.github.mustachejava.MustacheFactory; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; @@ -29,8 +29,8 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.script.GeneralScriptException; +import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; @@ -89,21 +89,13 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme * */ @Override public Object compile(String templateName, String templateSource, Map params) { - String contentType = params.getOrDefault(CONTENT_TYPE_PARAM, JSON_CONTENT_TYPE); - final DefaultMustacheFactory mustacheFactory; - switch (contentType){ - case PLAIN_TEXT_CONTENT_TYPE: - mustacheFactory = new NoneEscapingMustacheFactory(); - break; - case JSON_CONTENT_TYPE: - default: - // assume that the default is json encoding: - mustacheFactory = new JsonEscapingMustacheFactory(); - break; - } - mustacheFactory.setObjectHandler(new CustomReflectionObjectHandler()); + final MustacheFactory factory = new CustomMustacheFactory(isJsonEscapingEnabled(params)); Reader reader = new FastStringReader(templateSource); - return mustacheFactory.compile(reader, "query-template"); + return factory.compile(reader, "query-template"); + } + + private boolean isJsonEscapingEnabled(Map params) { + return JSON_CONTENT_TYPE.equals(params.getOrDefault(CONTENT_TYPE_PARAM, JSON_CONTENT_TYPE)); } @Override @@ -168,12 +160,9 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme if (sm != null) { sm.checkPermission(SPECIAL_PERMISSION); } - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - ((Mustache) template.compiled()).execute(writer, vars); - return null; - } + AccessController.doPrivileged((PrivilegedAction) () -> { + ((Mustache) template.compiled()).execute(writer, vars); + return null; }); } catch (Exception e) { logger.error("Error running {}", e, template); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/NoneEscapingMustacheFactory.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/NoneEscapingMustacheFactory.java deleted file mode 100644 index 3539402df98..00000000000 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/NoneEscapingMustacheFactory.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.script.mustache; - -import com.github.mustachejava.DefaultMustacheFactory; -import com.github.mustachejava.MustacheException; - -import java.io.IOException; -import java.io.Writer; - -/** - * A MustacheFactory that does no string escaping. - */ -final class NoneEscapingMustacheFactory extends DefaultMustacheFactory { - - @Override - public void encode(String value, Writer writer) { - try { - writer.write(value); - } catch (IOException e) { - throw new MustacheException("Failed to encode value: " + value); - } - } -} diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java index 254020066b5..054268ef681 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.script.mustache; +import com.github.mustachejava.MustacheFactory; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.CompiledScript; @@ -39,12 +40,12 @@ import static org.hamcrest.Matchers.equalTo; */ public class MustacheScriptEngineTests extends ESTestCase { private MustacheScriptEngineService qe; - private JsonEscapingMustacheFactory escaper; + private MustacheFactory factory; @Before public void setup() { qe = new MustacheScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - escaper = new JsonEscapingMustacheFactory(); + factory = new CustomMustacheFactory(true); } public void testSimpleParameterReplace() { @@ -75,12 +76,12 @@ public class MustacheScriptEngineTests extends ESTestCase { public void testEscapeJson() throws IOException { { StringWriter writer = new StringWriter(); - escaper.encode("hello \n world", writer); + factory.encode("hello \n world", writer); assertThat(writer.toString(), equalTo("hello \\n world")); } { StringWriter writer = new StringWriter(); - escaper.encode("\n", writer); + factory.encode("\n", writer); assertThat(writer.toString(), equalTo("\\n")); } @@ -135,7 +136,7 @@ public class MustacheScriptEngineTests extends ESTestCase { expect.append(escapedChars[charIndex]); } StringWriter target = new StringWriter(); - escaper.encode(writer.toString(), target); + factory.encode(writer.toString(), target); assertThat(expect.toString(), equalTo(target.toString())); } } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java index f850f117cb6..8b6d0e69573 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java @@ -19,13 +19,16 @@ package org.elasticsearch.script.mustache; import com.github.mustachejava.Mustache; +import com.github.mustachejava.MustacheException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptEngineService; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matcher; import java.util.Arrays; import java.util.Collections; @@ -38,6 +41,8 @@ import java.util.Set; import static java.util.Collections.singleton; import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.script.ScriptService.ScriptType.INLINE; import static org.elasticsearch.script.mustache.MustacheScriptEngineService.CONTENT_TYPE_PARAM; import static org.elasticsearch.script.mustache.MustacheScriptEngineService.JSON_CONTENT_TYPE; import static org.elasticsearch.script.mustache.MustacheScriptEngineService.PLAIN_TEXT_CONTENT_TYPE; @@ -45,6 +50,8 @@ import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.isEmptyOrNullString; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; public class MustacheTests extends ESTestCase { @@ -59,7 +66,7 @@ public class MustacheTests extends ESTestCase { Map params = Collections.singletonMap("boost_val", "0.2"); Mustache mustache = (Mustache) engine.compile(null, template, Collections.emptyMap()); - CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "my-name", "mustache", mustache); + CompiledScript compiledScript = new CompiledScript(INLINE, "my-name", "mustache", mustache); ExecutableScript result = engine.executable(compiledScript, params); assertEquals( "Mustache templating broken", @@ -71,7 +78,7 @@ public class MustacheTests extends ESTestCase { public void testArrayAccess() throws Exception { String template = "{{data.0}} {{data.1}}"; - CompiledScript mustache = new CompiledScript(ScriptService.ScriptType.INLINE, "inline", "mustache", engine.compile(null, template, Collections.emptyMap())); + CompiledScript mustache = new CompiledScript(INLINE, "inline", "mustache", engine.compile(null, template, Collections.emptyMap())); Map vars = new HashMap<>(); Object data = randomFrom( new String[] { "foo", "bar" }, @@ -97,7 +104,7 @@ public class MustacheTests extends ESTestCase { public void testArrayInArrayAccess() throws Exception { String template = "{{data.0.0}} {{data.0.1}}"; - CompiledScript mustache = new CompiledScript(ScriptService.ScriptType.INLINE, "inline", "mustache", engine.compile(null, template, Collections.emptyMap())); + CompiledScript mustache = new CompiledScript(INLINE, "inline", "mustache", engine.compile(null, template, Collections.emptyMap())); Map vars = new HashMap<>(); Object data = randomFrom( new String[][] { new String[] { "foo", "bar" }}, @@ -114,7 +121,7 @@ public class MustacheTests extends ESTestCase { public void testMapInArrayAccess() throws Exception { String template = "{{data.0.key}} {{data.1.key}}"; - CompiledScript mustache = new CompiledScript(ScriptService.ScriptType.INLINE, "inline", "mustache", engine.compile(null, template, Collections.emptyMap())); + CompiledScript mustache = new CompiledScript(INLINE, "inline", "mustache", engine.compile(null, template, Collections.emptyMap())); Map vars = new HashMap<>(); Object data = randomFrom( new Object[] { singletonMap("key", "foo"), singletonMap("key", "bar") }, @@ -142,7 +149,7 @@ public class MustacheTests extends ESTestCase { // json string escaping enabled: Map params = randomBoolean() ? Collections.emptyMap() : Collections.singletonMap(CONTENT_TYPE_PARAM, JSON_CONTENT_TYPE); Mustache mustache = (Mustache) engine.compile(null, "{ \"field1\": \"{{value}}\"}", Collections.emptyMap()); - CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "name", "mustache", mustache); + CompiledScript compiledScript = new CompiledScript(INLINE, "name", "mustache", mustache); ExecutableScript executableScript = engine.executable(compiledScript, Collections.singletonMap("value", "a \"value\"")); BytesReference rawResult = (BytesReference) executableScript.run(); String result = rawResult.toUtf8(); @@ -150,7 +157,7 @@ public class MustacheTests extends ESTestCase { // json string escaping disabled: mustache = (Mustache) engine.compile(null, "{ \"field1\": \"{{value}}\"}", Collections.singletonMap(CONTENT_TYPE_PARAM, PLAIN_TEXT_CONTENT_TYPE)); - compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "name", "mustache", mustache); + compiledScript = new CompiledScript(INLINE, "name", "mustache", mustache); executableScript = engine.executable(compiledScript, Collections.singletonMap("value", "a \"value\"")); rawResult = (BytesReference) executableScript.run(); result = rawResult.toUtf8(); @@ -162,7 +169,7 @@ public class MustacheTests extends ESTestCase { List randomList = Arrays.asList(generateRandomStringArray(10, 20, false)); String template = "{{data.array.size}} {{data.list.size}}"; - CompiledScript mustache = new CompiledScript(ScriptService.ScriptType.INLINE, "inline", "mustache", engine.compile(null, template, Collections.emptyMap())); + CompiledScript mustache = new CompiledScript(INLINE, "inline", "mustache", engine.compile(null, template, Collections.emptyMap())); Map data = new HashMap<>(); data.put("array", randomArrayValues); data.put("list", randomList); @@ -177,4 +184,205 @@ public class MustacheTests extends ESTestCase { String expectedString = String.format(Locale.ROOT, "%s %s", randomArrayValues.length, randomList.size()); assertThat(bytes.toUtf8(), equalTo(expectedString)); } + + public void testPrimitiveToJSON() throws Exception { + String template = "{{#toJson}}ctx{{/toJson}}"; + assertScript(template, Collections.singletonMap("ctx", "value"), equalTo("value")); + assertScript(template, Collections.singletonMap("ctx", ""), equalTo("")); + assertScript(template, Collections.singletonMap("ctx", true), equalTo("true")); + assertScript(template, Collections.singletonMap("ctx", 42), equalTo("42")); + assertScript(template, Collections.singletonMap("ctx", 42L), equalTo("42")); + assertScript(template, Collections.singletonMap("ctx", 42.5f), equalTo("42.5")); + assertScript(template, Collections.singletonMap("ctx", null), equalTo("")); + + template = "{{#toJson}}.{{/toJson}}"; + assertScript(template, Collections.singletonMap("ctx", "value"), equalTo("{\"ctx\":\"value\"}")); + assertScript(template, Collections.singletonMap("ctx", ""), equalTo("{\"ctx\":\"\"}")); + assertScript(template, Collections.singletonMap("ctx", true), equalTo("{\"ctx\":true}")); + assertScript(template, Collections.singletonMap("ctx", 42), equalTo("{\"ctx\":42}")); + assertScript(template, Collections.singletonMap("ctx", 42L), equalTo("{\"ctx\":42}")); + assertScript(template, Collections.singletonMap("ctx", 42.5f), equalTo("{\"ctx\":42.5}")); + assertScript(template, Collections.singletonMap("ctx", null), equalTo("{\"ctx\":null}")); + } + + public void testSimpleMapToJSON() throws Exception { + Map human0 = new HashMap<>(); + human0.put("age", 42); + human0.put("name", "John Smith"); + human0.put("height", 1.84); + + Map ctx = Collections.singletonMap("ctx", human0); + + assertScript("{{#toJson}}.{{/toJson}}", ctx, equalTo("{\"ctx\":{\"name\":\"John Smith\",\"age\":42,\"height\":1.84}}")); + assertScript("{{#toJson}}ctx{{/toJson}}", ctx, equalTo("{\"name\":\"John Smith\",\"age\":42,\"height\":1.84}")); + assertScript("{{#toJson}}ctx.name{{/toJson}}", ctx, equalTo("John Smith")); + } + + public void testMultipleMapsToJSON() throws Exception { + Map human0 = new HashMap<>(); + human0.put("age", 42); + human0.put("name", "John Smith"); + human0.put("height", 1.84); + + Map human1 = new HashMap<>(); + human1.put("age", 27); + human1.put("name", "Dave Smith"); + human1.put("height", 1.71); + + Map humans = new HashMap<>(); + humans.put("first", human0); + humans.put("second", human1); + + Map ctx = Collections.singletonMap("ctx", humans); + + assertScript("{{#toJson}}.{{/toJson}}", ctx, + equalTo("{\"ctx\":{\"first\":{\"name\":\"John Smith\",\"age\":42,\"height\":1.84},\"second\":{\"name\":\"Dave Smith\",\"age\":27,\"height\":1.71}}}")); + + assertScript("{{#toJson}}ctx{{/toJson}}", ctx, + equalTo("{\"first\":{\"name\":\"John Smith\",\"age\":42,\"height\":1.84},\"second\":{\"name\":\"Dave Smith\",\"age\":27,\"height\":1.71}}")); + + assertScript("{{#toJson}}ctx.first{{/toJson}}", ctx, + equalTo("{\"name\":\"John Smith\",\"age\":42,\"height\":1.84}")); + + assertScript("{{#toJson}}ctx.second{{/toJson}}", ctx, + equalTo("{\"name\":\"Dave Smith\",\"age\":27,\"height\":1.71}")); + } + + public void testSimpleArrayToJSON() throws Exception { + String[] array = new String[]{"one", "two", "three"}; + Map ctx = Collections.singletonMap("array", array); + + assertScript("{{#toJson}}.{{/toJson}}", ctx, equalTo("{\"array\":[\"one\",\"two\",\"three\"]}")); + assertScript("{{#toJson}}array{{/toJson}}", ctx, equalTo("[\"one\",\"two\",\"three\"]")); + assertScript("{{#toJson}}array.0{{/toJson}}", ctx, equalTo("one")); + assertScript("{{#toJson}}array.1{{/toJson}}", ctx, equalTo("two")); + assertScript("{{#toJson}}array.2{{/toJson}}", ctx, equalTo("three")); + assertScript("{{#toJson}}array.size{{/toJson}}", ctx, equalTo("3")); + } + + public void testSimpleListToJSON() throws Exception { + List list = Arrays.asList("one", "two", "three"); + Map ctx = Collections.singletonMap("ctx", list); + + assertScript("{{#toJson}}.{{/toJson}}", ctx, equalTo("{\"ctx\":[\"one\",\"two\",\"three\"]}")); + assertScript("{{#toJson}}ctx{{/toJson}}", ctx, equalTo("[\"one\",\"two\",\"three\"]")); + assertScript("{{#toJson}}ctx.0{{/toJson}}", ctx, equalTo("one")); + assertScript("{{#toJson}}ctx.1{{/toJson}}", ctx, equalTo("two")); + assertScript("{{#toJson}}ctx.2{{/toJson}}", ctx, equalTo("three")); + assertScript("{{#toJson}}ctx.size{{/toJson}}", ctx, equalTo("3")); + } + + public void testsUnsupportedTagsToJson() { + MustacheException e = expectThrows(MustacheException.class, () -> compile("{{#toJson}}{{foo}}{{bar}}{{/toJson}}")); + assertThat(e.getMessage(), containsString("Mustache function [toJson] must contain one and only one identifier")); + + e = expectThrows(MustacheException.class, () -> compile("{{#toJson}}{{/toJson}}")); + assertThat(e.getMessage(), containsString("Mustache function [toJson] must contain one and only one identifier")); + } + + public void testEmbeddedToJSON() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .startArray("bulks") + .startObject() + .field("index", "index-1") + .field("type", "type-1") + .field("id", 1) + .endObject() + .startObject() + .field("index", "index-2") + .field("type", "type-2") + .field("id", 2) + .endObject() + .endArray() + .endObject(); + + Map ctx = Collections.singletonMap("ctx", XContentHelper.convertToMap(builder.bytes(), false).v2()); + + assertScript("{{#ctx.bulks}}{{#toJson}}.{{/toJson}}{{/ctx.bulks}}", ctx, + equalTo("{\"index\":\"index-1\",\"id\":1,\"type\":\"type-1\"}{\"index\":\"index-2\",\"id\":2,\"type\":\"type-2\"}")); + + assertScript("{{#ctx.bulks}}<{{#toJson}}id{{/toJson}}>{{/ctx.bulks}}", ctx, + equalTo("<1><2>")); + } + + public void testSimpleArrayJoin() throws Exception { + String template = "{{#join}}array{{/join}}"; + assertScript(template, Collections.singletonMap("array", new String[]{"one", "two", "three"}), equalTo("one,two,three")); + assertScript(template, Collections.singletonMap("array", new int[]{1, 2, 3}), equalTo("1,2,3")); + assertScript(template, Collections.singletonMap("array", new long[]{1L, 2L, 3L}), equalTo("1,2,3")); + assertScript(template, Collections.singletonMap("array", new double[]{1.5, 2.5, 3.5}), equalTo("1.5,2.5,3.5")); + assertScript(template, Collections.singletonMap("array", new boolean[]{true, false, true}), equalTo("true,false,true")); + assertScript(template, Collections.singletonMap("array", new boolean[]{true, false, true}), equalTo("true,false,true")); + } + + public void testEmbeddedArrayJoin() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .startArray("people") + .startObject() + .field("name", "John Smith") + .startArray("emails") + .value("john@smith.com") + .value("john.smith@email.com") + .value("jsmith@email.com") + .endArray() + .endObject() + .startObject() + .field("name", "John Doe") + .startArray("emails") + .value("john@doe.com") + .value("john.doe@email.com") + .value("jdoe@email.com") + .endArray() + .endObject() + .endArray() + .endObject(); + + Map ctx = Collections.singletonMap("ctx", XContentHelper.convertToMap(builder.bytes(), false).v2()); + + assertScript("{{#join}}ctx.people.0.emails{{/join}}", ctx, + equalTo("john@smith.com,john.smith@email.com,jsmith@email.com")); + + assertScript("{{#join}}ctx.people.1.emails{{/join}}", ctx, + equalTo("john@doe.com,john.doe@email.com,jdoe@email.com")); + + assertScript("{{#ctx.people}}to: {{#join}}emails{{/join}};{{/ctx.people}}", ctx, + equalTo("to: john@smith.com,john.smith@email.com,jsmith@email.com;to: john@doe.com,john.doe@email.com,jdoe@email.com;")); + } + + public void testJoinWithToJson() { + Map params = Collections.singletonMap("terms", + Arrays.asList(singletonMap("term", "foo"), singletonMap("term", "bar"))); + + assertScript("{{#join}}{{#toJson}}terms{{/toJson}}{{/join}}", params, + equalTo("[{\"term\":\"foo\"},{\"term\":\"bar\"}]")); + } + + public void testsUnsupportedTagsJoin() { + MustacheException e = expectThrows(MustacheException.class, () -> compile("{{#join}}{{/join}}")); + assertThat(e.getMessage(), containsString("Mustache function [join] must contain one and only one identifier")); + + e = expectThrows(MustacheException.class, () -> compile("{{#join delimiter='a'}}{{/join delimiter='b'}}")); + assertThat(e.getMessage(), containsString("Mismatched start/end tags")); + } + + public void testJoinWithCustomDelimiter() { + Map params = Collections.singletonMap("params", Arrays.asList(1, 2, 3, 4)); + + assertScript("{{#join delimiter=''}}params{{/join delimiter=''}}", params, equalTo("1234")); + assertScript("{{#join delimiter=','}}params{{/join delimiter=','}}", params, equalTo("1,2,3,4")); + assertScript("{{#join delimiter='/'}}params{{/join delimiter='/'}}", params, equalTo("1/2/3/4")); + assertScript("{{#join delimiter=' and '}}params{{/join delimiter=' and '}}", params, equalTo("1 and 2 and 3 and 4")); + } + + private void assertScript(String script, Map vars, Matcher matcher) { + Object result = engine.executable(new CompiledScript(INLINE, "inline", "mustache", compile(script)), vars).run(); + assertThat(result, notNullValue()); + assertThat(result, instanceOf(BytesReference.class)); + assertThat(((BytesReference) result).toUtf8(), matcher); + } + + private Object compile(String script) { + assertThat("cannot compile null or empty script", script, not(isEmptyOrNullString())); + return engine.compile(null, script, Collections.emptyMap()); + } } From 6b7acc0ca221f13031024872879e0e0f01f8c135 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Wed, 29 Jun 2016 10:24:43 +0200 Subject: [PATCH 27/43] Update index.asciidoc In-flight requests circuit breaker is done --- docs/resiliency/index.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 24035e0772d..6f3ed169709 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -90,7 +90,7 @@ space. The following issues have been identified: * Set a hard limit on `from`/`size` parameters {GIT}9311[#9311]. (STATUS: DONE, v2.1.0) * Prevent combinatorial explosion in aggregations from causing OOM {GIT}8081[#8081]. (STATUS: ONGOING) * Add the byte size of each hit to the request circuit breaker {GIT}9310[#9310]. (STATUS: ONGOING) -* Limit the size of individual requests and also add a circuit breaker for the total memory used by in-flight request objects {GIT}16011[#16011]. (STATUS: ONGOING) +* Limit the size of individual requests and also add a circuit breaker for the total memory used by in-flight request objects {GIT}16011[#16011]. (STATUS: DONE, v5.0.0) Other safeguards are tracked in the meta-issue {GIT}11511[#11511]. From 872cdffc2748afdb08073f230a226e02e061eba0 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 29 Jun 2016 10:45:05 +0200 Subject: [PATCH 28/43] Factor out ChannelBuffer from BytesReference (#19129) The ChannelBuffer interface today leaks into the BytesReference abstraction which causes a hard dependency on Netty across the board. This chance moves this dependency and all BytesReference -> ChannelBuffer conversion into NettyUtlis and removes the abstraction leak on BytesReference. This change also removes unused methods on the BytesReference interface and simplifies access to internal pages. --- .../common/bytes/BytesArray.java | 14 -- .../common/bytes/BytesReference.java | 32 ++-- .../common/bytes/PagedBytesReference.java | 127 ++++----------- .../bytes/ReleasablePagedBytesReference.java | 2 +- .../ChannelBufferBytesReference.java | 16 +- .../common/netty/NettyUtils.java | 40 +++++ .../elasticsearch/common/util/ByteArray.java | 12 +- .../http/netty/NettyHttpChannel.java | 3 +- .../http/netty/NettyHttpRequest.java | 4 +- .../netty/ChannelBufferStreamInput.java | 8 +- .../netty/MessageChannelHandler.java | 4 +- .../transport/netty/NettyTransport.java | 6 +- .../netty/NettyTransportChannel.java | 5 +- .../bytes/PagedBytesReferenceTests.java | 153 ++++++++---------- .../common/netty/NettyUtilsTests.java | 95 +++++++++++ .../bytes/ByteBufferBytesReference.java | 14 -- 16 files changed, 278 insertions(+), 257 deletions(-) rename core/src/main/java/org/elasticsearch/common/{bytes => netty}/ChannelBufferBytesReference.java (88%) create mode 100644 core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java rename {core/src/test => test/framework/src/main}/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java (91%) diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 5cb690b7375..5da3c3c6bf3 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -20,14 +20,10 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.StreamInput; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; import java.io.IOException; import java.io.OutputStream; -import java.nio.channels.GatheringByteChannel; import java.nio.charset.StandardCharsets; import java.util.Arrays; @@ -103,11 +99,6 @@ public class BytesArray implements BytesReference { os.write(bytes, offset, length); } - @Override - public void writeTo(GatheringByteChannel channel) throws IOException { - Channels.writeToChannel(bytes, offset, length(), channel); - } - @Override public byte[] toBytes() { if (offset == 0 && bytes.length == length) { @@ -126,11 +117,6 @@ public class BytesArray implements BytesReference { return new BytesArray(Arrays.copyOfRange(bytes, offset, offset + length)); } - @Override - public ChannelBuffer toChannelBuffer() { - return ChannelBuffers.wrappedBuffer(bytes, offset, length); - } - @Override public boolean hasArray() { return true; diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index a72346f9ee8..a07e7cda267 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -19,19 +19,18 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.StreamInput; -import org.jboss.netty.buffer.ChannelBuffer; import java.io.IOException; import java.io.OutputStream; -import java.nio.channels.GatheringByteChannel; /** * A reference to bytes. */ public interface BytesReference { - public static class Helper { + class Helper { public static boolean bytesEqual(BytesReference a, BytesReference b) { if (a == b) { @@ -108,10 +107,6 @@ public interface BytesReference { */ void writeTo(OutputStream os) throws IOException; - /** - * Writes the bytes directly to the channel. - */ - void writeTo(GatheringByteChannel channel) throws IOException; /** * Returns the bytes as a single byte array. @@ -128,11 +123,6 @@ public interface BytesReference { */ BytesArray copyBytesArray(); - /** - * Returns the bytes as a channel buffer. - */ - ChannelBuffer toChannelBuffer(); - /** * Is there an underlying byte array for this bytes reference. */ @@ -162,4 +152,22 @@ public interface BytesReference { * Converts to a copied Lucene BytesRef. */ BytesRef copyBytesRef(); + + /** + * Returns a BytesRefIterator for this BytesReference. This method allows + * access to the internal pages of this reference without copying them. Use with care! + * @see BytesRefIterator + */ + default BytesRefIterator iterator() { + return new BytesRefIterator() { + BytesRef ref = toBytesRef(); + @Override + public BytesRef next() throws IOException { + BytesRef r = ref; + ref = null; // only return it once... + return r; + } + }; + } + } diff --git a/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java index 16ce91dc38f..af4e1cb29fd 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java @@ -20,19 +20,15 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; import java.io.EOFException; import java.io.IOException; import java.io.OutputStream; -import java.nio.channels.GatheringByteChannel; import java.util.Arrays; /** @@ -113,30 +109,6 @@ public class PagedBytesReference implements BytesReference { } } - @Override - public void writeTo(GatheringByteChannel channel) throws IOException { - // nothing to do - if (length == 0) { - return; - } - - int currentLength = length; - int currentOffset = offset; - BytesRef ref = new BytesRef(); - - while (currentLength > 0) { - // try to align to the underlying pages while writing, so no new arrays will be created. - int fragmentSize = Math.min(currentLength, PAGE_SIZE - (currentOffset % PAGE_SIZE)); - boolean newArray = bytearray.get(currentOffset, fragmentSize, ref); - assert !newArray : "PagedBytesReference failed to align with underlying bytearray. offset [" + currentOffset + "], size [" + fragmentSize + "]"; - Channels.writeToChannel(ref.bytes, ref.offset, ref.length, channel); - currentLength -= ref.length; - currentOffset += ref.length; - } - - assert currentLength == 0; - } - @Override public byte[] toBytes() { if (length == 0) { @@ -178,60 +150,6 @@ public class PagedBytesReference implements BytesReference { } } - @Override - public ChannelBuffer toChannelBuffer() { - // nothing to do - if (length == 0) { - return ChannelBuffers.EMPTY_BUFFER; - } - - ChannelBuffer[] buffers; - ChannelBuffer currentBuffer = null; - BytesRef ref = new BytesRef(); - int pos = 0; - - // are we a slice? - if (offset != 0) { - // remaining size of page fragment at offset - int fragmentSize = Math.min(length, PAGE_SIZE - (offset % PAGE_SIZE)); - bytearray.get(offset, fragmentSize, ref); - currentBuffer = ChannelBuffers.wrappedBuffer(ref.bytes, ref.offset, fragmentSize); - pos += fragmentSize; - } - - // no need to create a composite buffer for a single page - if (pos == length && currentBuffer != null) { - return currentBuffer; - } - - // a slice > pagesize will likely require extra buffers for initial/trailing fragments - int numBuffers = countRequiredBuffers((currentBuffer != null ? 1 : 0), length - pos); - - buffers = new ChannelBuffer[numBuffers]; - int bufferSlot = 0; - - if (currentBuffer != null) { - buffers[bufferSlot] = currentBuffer; - bufferSlot++; - } - - // handle remainder of pages + trailing fragment - while (pos < length) { - int remaining = length - pos; - int bulkSize = (remaining > PAGE_SIZE) ? PAGE_SIZE : remaining; - bytearray.get(offset + pos, bulkSize, ref); - currentBuffer = ChannelBuffers.wrappedBuffer(ref.bytes, ref.offset, bulkSize); - buffers[bufferSlot] = currentBuffer; - bufferSlot++; - pos += bulkSize; - } - - // this would indicate that our numBuffer calculation is off by one. - assert (numBuffers == bufferSlot); - - return ChannelBuffers.wrappedBuffer(NettyUtils.DEFAULT_GATHERING, buffers); - } - @Override public boolean hasArray() { return (offset + length <= PAGE_SIZE); @@ -338,17 +256,6 @@ public class PagedBytesReference implements BytesReference { return true; } - private int countRequiredBuffers(int initialCount, int numBytes) { - int numBuffers = initialCount; - // an "estimate" of how many pages remain - rounded down - int pages = numBytes / PAGE_SIZE; - // a remaining fragment < pagesize needs at least one buffer - numBuffers += (pages == 0) ? 1 : pages; - // a remainder that is not a multiple of pagesize also needs an extra buffer - numBuffers += (pages > 0 && numBytes % PAGE_SIZE > 0) ? 1 : 0; - return numBuffers; - } - private static class PagedBytesReferenceStreamInput extends StreamInput { private final ByteArray bytearray; @@ -451,4 +358,36 @@ public class PagedBytesReference implements BytesReference { } } + + @Override + public final BytesRefIterator iterator() { + final int offset = this.offset; + final int length = this.length; + // this iteration is page aligned to ensure we do NOT materialize the pages from the ByteArray + // we calculate the initial fragment size here to ensure that if this reference is a slice we are still page aligned + // across the entire iteration. The first page is smaller if our offset != 0 then we start in the middle of the page + // otherwise we iterate full pages until we reach the last chunk which also might end within a page. + final int initialFragmentSize = offset != 0 ? PAGE_SIZE - (offset % PAGE_SIZE) : PAGE_SIZE; + return new BytesRefIterator() { + int position = 0; + int nextFragmentSize = Math.min(length, initialFragmentSize); + // this BytesRef is reused across the iteration on purpose - BytesRefIterator interface was designed for this + final BytesRef slice = new BytesRef(); + + @Override + public BytesRef next() throws IOException { + if (nextFragmentSize != 0) { + final boolean materialized = bytearray.get(offset + position, nextFragmentSize, slice); + assert materialized == false : "iteration should be page aligned but array got materialized"; + position += nextFragmentSize; + final int remaining = length - position; + nextFragmentSize = Math.min(remaining, PAGE_SIZE); + return slice; + } else { + assert nextFragmentSize == 0 : "fragmentSize expected [0] but was: [" + nextFragmentSize + "]"; + return null; // we are done with this iteration + } + } + }; + } } diff --git a/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java index 603087a9213..2152aa226a8 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java @@ -28,7 +28,7 @@ import org.elasticsearch.common.util.ByteArray; * An extension to {@link PagedBytesReference} that requires releasing its content. This * class exists to make it explicit when a bytes reference needs to be released, and when not. */ -public class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable { +public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable { public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int length) { super(bigarrays, bytearray, length); diff --git a/core/src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java b/core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java similarity index 88% rename from core/src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java rename to core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java index 4d6c11214bb..51b3a57c5c1 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java @@ -16,26 +16,26 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common.bytes; +package org.elasticsearch.common.netty; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.transport.netty.ChannelBufferStreamInputFactory; import org.jboss.netty.buffer.ChannelBuffer; import java.io.IOException; import java.io.OutputStream; -import java.nio.channels.GatheringByteChannel; import java.nio.charset.StandardCharsets; /** */ -public class ChannelBufferBytesReference implements BytesReference { +final class ChannelBufferBytesReference implements BytesReference { private final ChannelBuffer buffer; - public ChannelBufferBytesReference(ChannelBuffer buffer) { + ChannelBufferBytesReference(ChannelBuffer buffer) { this.buffer = buffer; } @@ -64,11 +64,6 @@ public class ChannelBufferBytesReference implements BytesReference { buffer.getBytes(buffer.readerIndex(), os, length()); } - @Override - public void writeTo(GatheringByteChannel channel) throws IOException { - Channels.writeToChannel(buffer, buffer.readerIndex(), length(), channel); - } - @Override public byte[] toBytes() { return copyBytesArray().toBytes(); @@ -89,7 +84,6 @@ public class ChannelBufferBytesReference implements BytesReference { return new BytesArray(copy); } - @Override public ChannelBuffer toChannelBuffer() { return buffer.duplicate(); } diff --git a/core/src/main/java/org/elasticsearch/common/netty/NettyUtils.java b/core/src/main/java/org/elasticsearch/common/netty/NettyUtils.java index 92b82fd2d8c..c37ca3ad6fb 100644 --- a/core/src/main/java/org/elasticsearch/common/netty/NettyUtils.java +++ b/core/src/main/java/org/elasticsearch/common/netty/NettyUtils.java @@ -18,12 +18,20 @@ */ package org.elasticsearch.common.netty; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.transport.netty.NettyInternalESLoggerFactory; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.logging.InternalLogger; import org.jboss.netty.logging.InternalLoggerFactory; import org.jboss.netty.util.ThreadNameDeterminer; import org.jboss.netty.util.ThreadRenamingRunnable; +import java.io.IOException; +import java.util.ArrayList; + /** */ public class NettyUtils { @@ -98,4 +106,36 @@ public class NettyUtils { public static void setup() { } + + /** + * Turns the given BytesReference into a ChannelBuffer. Note: the returned ChannelBuffer will reference the internal + * pages of the BytesReference. Don't free the bytes of reference before the ChannelBuffer goes out of scope. + */ + public static ChannelBuffer toChannelBuffer(BytesReference reference) { + if (reference.length() == 0) { + return ChannelBuffers.EMPTY_BUFFER; + } + if (reference instanceof ChannelBufferBytesReference) { + return ((ChannelBufferBytesReference) reference).toChannelBuffer(); + } else { + final BytesRefIterator iterator = reference.iterator(); + BytesRef slice; + final ArrayList buffers = new ArrayList<>(); + try { + while ((slice = iterator.next()) != null) { + buffers.add(ChannelBuffers.wrappedBuffer(slice.bytes, slice.offset, slice.length)); + } + return ChannelBuffers.wrappedBuffer(DEFAULT_GATHERING, buffers.toArray(new ChannelBuffer[buffers.size()])); + } catch (IOException ex) { + throw new AssertionError("no IO happens here", ex); + } + } + } + + /** + * Wraps the given ChannelBuffer with a BytesReference + */ + public static BytesReference toBytesReference(ChannelBuffer channelBuffer) { + return new ChannelBufferBytesReference(channelBuffer); + } } diff --git a/core/src/main/java/org/elasticsearch/common/util/ByteArray.java b/core/src/main/java/org/elasticsearch/common/util/ByteArray.java index 719701a0182..3c7408480d8 100644 --- a/core/src/main/java/org/elasticsearch/common/util/ByteArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/ByteArray.java @@ -29,28 +29,28 @@ public interface ByteArray extends BigArray { /** * Get an element given its index. */ - public abstract byte get(long index); + byte get(long index); /** * Set a value at the given index and return the previous value. */ - public abstract byte set(long index, byte value); + byte set(long index, byte value); /** * Get a reference to a slice. - * + * * @return true when a byte[] was materialized, false otherwise. */ - public abstract boolean get(long index, int len, BytesRef ref); + boolean get(long index, int len, BytesRef ref); /** * Bulk set. */ - public abstract void set(long index, byte[] buf, int offset, int len); + void set(long index, byte[] buf, int offset, int len); /** * Fill slots between fromIndex inclusive to toIndex exclusive with value. */ - public abstract void fill(long fromIndex, long toIndex, byte value); + void fill(long fromIndex, long toIndex, byte value); } diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java index 22d6743b186..3413a746963 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpChannel.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.netty.ReleaseChannelFutureListener; import org.elasticsearch.http.netty.cors.CorsHandler; import org.elasticsearch.http.netty.pipelining.OrderedDownstreamChannelEvent; @@ -105,7 +106,7 @@ public final class NettyHttpChannel extends AbstractRestChannel { ChannelBuffer buffer; boolean addedReleaseListener = false; try { - buffer = content.toChannelBuffer(); + buffer = NettyUtils.toChannelBuffer(content); resp.setContent(buffer); // If our response doesn't specify a content-type header, set one diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java index 40ab2d184db..d26841ead97 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpRequest.java @@ -21,7 +21,7 @@ package org.elasticsearch.http.netty; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ChannelBufferBytesReference; +import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.support.RestUtils; import org.jboss.netty.channel.Channel; @@ -47,7 +47,7 @@ public class NettyHttpRequest extends RestRequest { this.channel = channel; this.params = new HashMap<>(); if (request.getContent().readable()) { - this.content = new ChannelBufferBytesReference(request.getContent()); + this.content = NettyUtils.toBytesReference(request.getContent()); } else { this.content = BytesArray.EMPTY; } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java b/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java index ce0eea860ef..695be3db2fd 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java @@ -21,8 +21,8 @@ package org.elasticsearch.transport.netty; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ChannelBufferBytesReference; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.netty.NettyUtils; import org.jboss.netty.buffer.ChannelBuffer; import java.io.EOFException; @@ -37,10 +37,6 @@ public class ChannelBufferStreamInput extends StreamInput { private final int startIndex; private final int endIndex; - public ChannelBufferStreamInput(ChannelBuffer buffer) { - this(buffer, buffer.readableBytes()); - } - public ChannelBufferStreamInput(ChannelBuffer buffer, int length) { if (length > buffer.readableBytes()) { throw new IndexOutOfBoundsException(); @@ -53,7 +49,7 @@ public class ChannelBufferStreamInput extends StreamInput { @Override public BytesReference readBytesReference(int length) throws IOException { - ChannelBufferBytesReference ref = new ChannelBufferBytesReference(buffer.slice(buffer.readerIndex(), length)); + BytesReference ref = NettyUtils.toBytesReference(buffer.slice(buffer.readerIndex(), length)); buffer.skipBytes(length); return ref; } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java index 55793384b1b..3274aa7b975 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -21,7 +21,6 @@ package org.elasticsearch.transport.netty; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; -import org.elasticsearch.common.bytes.ChannelBufferBytesReference; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; @@ -29,6 +28,7 @@ import org.elasticsearch.common.compress.NotCompressedException; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -111,7 +111,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { if (TransportStatus.isCompress(status) && hasMessageBytesToRead && buffer.readable()) { Compressor compressor; try { - compressor = CompressorFactory.compressor(new ChannelBufferBytesReference(buffer)); + compressor = CompressorFactory.compressor(NettyUtils.toBytesReference(buffer)); } catch (NotCompressedException ex) { int maxToRead = Math.min(buffer.readableBytes(), 10); int offset = buffer.readerIndex(); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 53eb63c86c5..be1305244bc 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -909,14 +909,14 @@ public class NettyTransport extends AbstractLifecycleComponent implem bRequest.writeThin(stream); stream.close(); bytes = bStream.bytes(); - ChannelBuffer headerBuffer = bytes.toChannelBuffer(); - ChannelBuffer contentBuffer = bRequest.bytes().toChannelBuffer(); + ChannelBuffer headerBuffer = NettyUtils.toChannelBuffer(bytes); + ChannelBuffer contentBuffer = NettyUtils.toChannelBuffer(bRequest.bytes()); buffer = ChannelBuffers.wrappedBuffer(NettyUtils.DEFAULT_GATHERING, headerBuffer, contentBuffer); } else { request.writeTo(stream); stream.close(); bytes = bStream.bytes(); - buffer = bytes.toChannelBuffer(); + buffer = NettyUtils.toChannelBuffer(bytes); } NettyHeader.writeHeader(buffer, requestId, status, version); ChannelFuture future = targetChannel.write(buffer); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java index 65ea00d75e5..0d5666408ea 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.netty.ReleaseChannelFutureListener; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportChannel; @@ -106,7 +107,7 @@ public class NettyTransportChannel implements TransportChannel { stream.close(); ReleasablePagedBytesReference bytes = bStream.bytes(); - ChannelBuffer buffer = bytes.toChannelBuffer(); + ChannelBuffer buffer = NettyUtils.toChannelBuffer(bytes); NettyHeader.writeHeader(buffer, requestId, status, version); ChannelFuture future = channel.write(buffer); ReleaseChannelFutureListener listener = new ReleaseChannelFutureListener(bytes); @@ -136,7 +137,7 @@ public class NettyTransportChannel implements TransportChannel { status = TransportStatus.setError(status); BytesReference bytes = stream.bytes(); - ChannelBuffer buffer = bytes.toChannelBuffer(); + ChannelBuffer buffer = NettyUtils.toChannelBuffer(bytes); NettyHeader.writeHeader(buffer, requestId, status, version); ChannelFuture future = channel.write(buffer); ChannelFutureListener onResponseSentListener = diff --git a/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java b/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java index 9cb633e410e..ccdd50faff7 100644 --- a/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java +++ b/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -29,38 +30,19 @@ import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import org.jboss.netty.buffer.ChannelBuffer; import org.junit.After; import org.junit.Before; import java.io.EOFException; import java.io.IOException; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; import java.util.Arrays; public class PagedBytesReferenceTests extends ESTestCase { private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE; + private BigArrays bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), false); - private BigArrays bigarrays; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), false); - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - } - - public void testGet() { + public void testGet() throws IOException { int length = randomIntBetween(1, PAGE_SIZE * 3); BytesReference pbr = getRandomizedPagedBytesReference(length); int sliceOffset = randomIntBetween(0, length / 2); @@ -70,7 +52,7 @@ public class PagedBytesReferenceTests extends ESTestCase { assertEquals(pbr.get(sliceOffset + sliceLength - 1), slice.get(sliceLength - 1)); } - public void testLength() { + public void testLength() throws IOException { int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomInt(PAGE_SIZE * 3)}; for (int i = 0; i < sizes.length; i++) { @@ -79,7 +61,7 @@ public class PagedBytesReferenceTests extends ESTestCase { } } - public void testSlice() { + public void testSlice() throws IOException { int length = randomInt(PAGE_SIZE * 3); BytesReference pbr = getRandomizedPagedBytesReference(length); int sliceOffset = randomIntBetween(0, length / 2); @@ -265,17 +247,6 @@ public class PagedBytesReferenceTests extends ESTestCase { out.close(); } - public void testWriteToChannel() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE * 4); - BytesReference pbr = getRandomizedPagedBytesReference(length); - Path tFile = createTempFile(); - try (FileChannel channel = FileChannel.open(tFile, StandardOpenOption.WRITE)) { - pbr.writeTo(channel); - assertEquals(pbr.length(), channel.position()); - } - assertArrayEquals(pbr.toBytes(), Files.readAllBytes(tFile)); - } - public void testSliceWriteToOutputStream() throws IOException { int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5)); BytesReference pbr = getRandomizedPagedBytesReference(length); @@ -289,21 +260,7 @@ public class PagedBytesReferenceTests extends ESTestCase { sliceOut.close(); } - public void testSliceWriteToChannel() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - int sliceOffset = randomIntBetween(1, length / 2); - int sliceLength = length - sliceOffset; - BytesReference slice = pbr.slice(sliceOffset, sliceLength); - Path tFile = createTempFile(); - try (FileChannel channel = FileChannel.open(tFile, StandardOpenOption.WRITE)) { - slice.writeTo(channel); - assertEquals(slice.length(), channel.position()); - } - assertArrayEquals(slice.toBytes(), Files.readAllBytes(tFile)); - } - - public void testToBytes() { + public void testToBytes() throws IOException { int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))}; for (int i = 0; i < sizes.length; i++) { @@ -319,7 +276,7 @@ public class PagedBytesReferenceTests extends ESTestCase { } } - public void testToBytesArraySharedPage() { + public void testToBytesArraySharedPage() throws IOException { int length = randomIntBetween(10, PAGE_SIZE); BytesReference pbr = getRandomizedPagedBytesReference(length); BytesArray ba = pbr.toBytesArray(); @@ -332,7 +289,7 @@ public class PagedBytesReferenceTests extends ESTestCase { assertSame(ba.array(), ba2.array()); } - public void testToBytesArrayMaterializedPages() { + public void testToBytesArrayMaterializedPages() throws IOException { // we need a length != (n * pagesize) to avoid page sharing at boundaries int length = 0; while ((length % PAGE_SIZE) == 0) { @@ -349,7 +306,7 @@ public class PagedBytesReferenceTests extends ESTestCase { assertNotSame(ba.array(), ba2.array()); } - public void testCopyBytesArray() { + public void testCopyBytesArray() throws IOException { // small PBR which would normally share the first page int length = randomIntBetween(10, PAGE_SIZE); BytesReference pbr = getRandomizedPagedBytesReference(length); @@ -360,7 +317,7 @@ public class PagedBytesReferenceTests extends ESTestCase { assertNotSame(ba.array(), ba2.array()); } - public void testSliceCopyBytesArray() { + public void testSliceCopyBytesArray() throws IOException { int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); BytesReference pbr = getRandomizedPagedBytesReference(length); int sliceOffset = randomIntBetween(0, pbr.length()); @@ -377,45 +334,67 @@ public class PagedBytesReferenceTests extends ESTestCase { assertArrayEquals(ba1.array(), ba2.array()); } - public void testToChannelBuffer() { + public void testEmptyToBytesRefIterator() throws IOException { + BytesReference pbr = getRandomizedPagedBytesReference(0); + assertNull(pbr.iterator().next()); + } + + public void testIterator() throws IOException { int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); BytesReference pbr = getRandomizedPagedBytesReference(length); - ChannelBuffer cb = pbr.toChannelBuffer(); - assertNotNull(cb); - byte[] bufferBytes = new byte[length]; - cb.getBytes(0, bufferBytes); - assertArrayEquals(pbr.toBytes(), bufferBytes); + BytesRefIterator iterator = pbr.iterator(); + BytesRef ref; + BytesRefBuilder builder = new BytesRefBuilder(); + while((ref = iterator.next()) != null) { + builder.append(ref); + } + assertArrayEquals(pbr.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes); } - public void testEmptyToChannelBuffer() { - BytesReference pbr = getRandomizedPagedBytesReference(0); - ChannelBuffer cb = pbr.toChannelBuffer(); - assertNotNull(cb); - assertEquals(0, pbr.length()); - assertEquals(0, cb.capacity()); - } - - public void testSliceToChannelBuffer() { + public void testSliceIterator() throws IOException { int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); BytesReference pbr = getRandomizedPagedBytesReference(length); int sliceOffset = randomIntBetween(0, pbr.length()); int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); BytesReference slice = pbr.slice(sliceOffset, sliceLength); - ChannelBuffer cbSlice = slice.toChannelBuffer(); - assertNotNull(cbSlice); - byte[] sliceBufferBytes = new byte[sliceLength]; - cbSlice.getBytes(0, sliceBufferBytes); - assertArrayEquals(slice.toBytes(), sliceBufferBytes); + BytesRefIterator iterator = slice.iterator(); + BytesRef ref = null; + BytesRefBuilder builder = new BytesRefBuilder(); + while((ref = iterator.next()) != null) { + builder.append(ref); + } + assertArrayEquals(slice.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes); } - public void testHasArray() { + public void testIteratorRandom() throws IOException { + int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); + BytesReference pbr = getRandomizedPagedBytesReference(length); + if (randomBoolean()) { + int sliceOffset = randomIntBetween(0, pbr.length()); + int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); + pbr = pbr.slice(sliceOffset, sliceLength); + } + + if (randomBoolean()) { + pbr = pbr.toBytesArray(); + } + BytesRefIterator iterator = pbr.iterator(); + BytesRef ref = null; + BytesRefBuilder builder = new BytesRefBuilder(); + while((ref = iterator.next()) != null) { + builder.append(ref); + } + assertArrayEquals(pbr.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes); + } + + public void testHasArray() throws IOException { int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(1, 3)); BytesReference pbr = getRandomizedPagedBytesReference(length); // must return true for <= pagesize assertEquals(length <= PAGE_SIZE, pbr.hasArray()); } - public void testArray() { + public void testArray() throws IOException { int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))}; for (int i = 0; i < sizes.length; i++) { @@ -437,7 +416,7 @@ public class PagedBytesReferenceTests extends ESTestCase { } } - public void testArrayOffset() { + public void testArrayOffset() throws IOException { int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); BytesReference pbr = getRandomizedPagedBytesReference(length); if (pbr.hasArray()) { @@ -452,7 +431,7 @@ public class PagedBytesReferenceTests extends ESTestCase { } } - public void testSliceArrayOffset() { + public void testSliceArrayOffset() throws IOException { int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); BytesReference pbr = getRandomizedPagedBytesReference(length); int sliceOffset = randomIntBetween(0, pbr.length()); @@ -477,7 +456,7 @@ public class PagedBytesReferenceTests extends ESTestCase { // TODO: good way to test? } - public void testToBytesRef() { + public void testToBytesRef() throws IOException { int length = randomIntBetween(0, PAGE_SIZE); BytesReference pbr = getRandomizedPagedBytesReference(length); BytesRef ref = pbr.toBytesRef(); @@ -486,7 +465,7 @@ public class PagedBytesReferenceTests extends ESTestCase { assertEquals(pbr.length(), ref.length); } - public void testSliceToBytesRef() { + public void testSliceToBytesRef() throws IOException { int length = randomIntBetween(0, PAGE_SIZE); BytesReference pbr = getRandomizedPagedBytesReference(length); // get a BytesRef from a slice @@ -498,7 +477,7 @@ public class PagedBytesReferenceTests extends ESTestCase { assertEquals(sliceLength, sliceRef.length); } - public void testCopyBytesRef() { + public void testCopyBytesRef() throws IOException { int length = randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5)); BytesReference pbr = getRandomizedPagedBytesReference(length); BytesRef ref = pbr.copyBytesRef(); @@ -506,7 +485,7 @@ public class PagedBytesReferenceTests extends ESTestCase { assertEquals(pbr.length(), ref.length); } - public void testHashCode() { + public void testHashCode() throws IOException { // empty content must have hash 1 (JDK compat) BytesReference pbr = getRandomizedPagedBytesReference(0); assertEquals(Arrays.hashCode(BytesRef.EMPTY_BYTES), pbr.hashCode()); @@ -542,7 +521,7 @@ public class PagedBytesReferenceTests extends ESTestCase { assertEquals(pbr, pbr2); } - public void testEqualsPeerClass() { + public void testEqualsPeerClass() throws IOException { int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5)); BytesReference pbr = getRandomizedPagedBytesReference(length); BytesReference ba = new BytesArray(pbr.toBytes()); @@ -569,15 +548,11 @@ public class PagedBytesReferenceTests extends ESTestCase { } } - private BytesReference getRandomizedPagedBytesReference(int length) { + private BytesReference getRandomizedPagedBytesReference(int length) throws IOException { // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); - try { - for (int i = 0; i < length; i++) { - out.writeByte((byte) random().nextInt(1 << 8)); - } - } catch (IOException e) { - fail("should not happen " + e.getMessage()); + for (int i = 0; i < length; i++) { + out.writeByte((byte) random().nextInt(1 << 8)); } assertThat(out.size(), Matchers.equalTo(length)); BytesReference ref = out.bytes(); diff --git a/core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java b/core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java new file mode 100644 index 00000000000..89c282e7d81 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.netty; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.buffer.CompositeChannelBuffer; +import org.junit.Before; + +import java.io.IOException; + +public class NettyUtilsTests extends ESTestCase { + + private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE; + private final BigArrays bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), false); + + public void testToChannelBufferWithEmptyRef() throws IOException { + ChannelBuffer channelBuffer = NettyUtils.toChannelBuffer(getRandomizedBytesReference(0)); + assertSame(ChannelBuffers.EMPTY_BUFFER, channelBuffer); + } + + public void testToChannelBufferWithSlice() throws IOException { + BytesReference ref = getRandomizedBytesReference(randomIntBetween(1, 3 * PAGE_SIZE)); + int sliceOffset = randomIntBetween(0, ref.length()); + int sliceLength = randomIntBetween(ref.length() - sliceOffset, ref.length() - sliceOffset); + BytesReference slice = ref.slice(sliceOffset, sliceLength); + ChannelBuffer channelBuffer = NettyUtils.toChannelBuffer(slice); + BytesReference bytesReference = NettyUtils.toBytesReference(channelBuffer); + assertArrayEquals(slice.toBytes(), bytesReference.toBytes()); + } + + public void testToChannelBufferWithSliceAfter() throws IOException { + BytesReference ref = getRandomizedBytesReference(randomIntBetween(1, 3 * PAGE_SIZE)); + int sliceOffset = randomIntBetween(0, ref.length()); + int sliceLength = randomIntBetween(ref.length() - sliceOffset, ref.length() - sliceOffset); + ChannelBuffer channelBuffer = NettyUtils.toChannelBuffer(ref); + BytesReference bytesReference = NettyUtils.toBytesReference(channelBuffer); + assertArrayEquals(ref.slice(sliceOffset, sliceLength).toBytes(), bytesReference.slice(sliceOffset, sliceLength).toBytes()); + } + + public void testToChannelBuffer() throws IOException { + BytesReference ref = getRandomizedBytesReference(randomIntBetween(1, 3 * PAGE_SIZE)); + ChannelBuffer channelBuffer = NettyUtils.toChannelBuffer(ref); + BytesReference bytesReference = NettyUtils.toBytesReference(channelBuffer); + if (ref instanceof ChannelBufferBytesReference) { + assertEquals(channelBuffer, ((ChannelBufferBytesReference) ref).toChannelBuffer()); + } else if (ref.hasArray() == false) { // we gather the buffers into a channel buffer + assertTrue(channelBuffer instanceof CompositeChannelBuffer); + } + assertArrayEquals(ref.toBytes(), bytesReference.toBytes()); + } + + private BytesReference getRandomizedBytesReference(int length) throws IOException { + // TODO we should factor out a BaseBytesReferenceTestCase + // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content + ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); + for (int i = 0; i < length; i++) { + out.writeByte((byte) random().nextInt(1 << 8)); + } + assertEquals(out.size(), length); + BytesReference ref = out.bytes(); + assertEquals(ref.length(), length); + if (randomBoolean()) { + return ref.toBytesArray(); + } else if (randomBoolean()) { + BytesArray bytesArray = ref.toBytesArray(); + return NettyUtils.toBytesReference(ChannelBuffers.wrappedBuffer(bytesArray.array(), bytesArray.arrayOffset(), + bytesArray.length())); + } else { + return ref; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java b/test/framework/src/main/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java similarity index 91% rename from core/src/test/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java rename to test/framework/src/main/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java index a272b6627e4..a1c9da18470 100644 --- a/core/src/test/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java +++ b/test/framework/src/main/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java @@ -20,18 +20,14 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import org.elasticsearch.common.io.stream.StreamInput; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.util.CharsetUtil; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.CharBuffer; -import java.nio.channels.GatheringByteChannel; import java.nio.charset.CharacterCodingException; import java.nio.charset.CharsetDecoder; import java.nio.charset.CoderResult; @@ -85,11 +81,6 @@ public class ByteBufferBytesReference implements BytesReference { } } - @Override - public void writeTo(GatheringByteChannel channel) throws IOException { - Channels.writeToChannel(buffer, channel); - } - @Override public byte[] toBytes() { if (!buffer.hasRemaining()) { @@ -113,11 +104,6 @@ public class ByteBufferBytesReference implements BytesReference { return new BytesArray(toBytes()); } - @Override - public ChannelBuffer toChannelBuffer() { - return ChannelBuffers.wrappedBuffer(buffer); - } - @Override public boolean hasArray() { return buffer.hasArray(); From 39d38e513d2c185204624c95f62ccd2c79c98709 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Wed, 29 Jun 2016 13:30:54 +0200 Subject: [PATCH 29/43] [TEST] add wait for yellow --- .../main/resources/rest-api-spec/test/msearch/11_status.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/11_status.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/11_status.yaml index 525ea7ac421..ef4d53167b6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/11_status.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/11_status.yaml @@ -3,6 +3,9 @@ setup: - do: indices.create: index: test_1 + - do: + cluster.health: + wait_for_status: yellow --- "Check Status": From 819fe40d616dd021ea7a80111c0c836caec8f7c3 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 29 Jun 2016 14:45:54 +0200 Subject: [PATCH 30/43] Extract AbstractBytesReferenceTestCase (#19141) We have a ton of tests for PagedBytesReference but not really many for the other implementation of BytesReference. This change factors out a basic AbstractBytesReferenceTestCase that simplifies testing other implementations. It also caught a couple of bug here and there like a missing mask when reading bytes as ints in PagedBytesReference. --- .../common/bytes/BytesArray.java | 2 +- .../common/bytes/BytesReference.java | 2 +- .../common/bytes/PagedBytesReference.java | 2 +- .../netty/ChannelBufferBytesReference.java | 2 +- .../netty/ChannelBufferStreamInput.java | 2 +- .../bytes/AbstractBytesReferenceTestCase.java | 509 ++++++++++++++++++ .../common/bytes/BytesArrayTests.java | 41 ++ .../bytes/PagedBytesReferenceTests.java | 509 ++---------------- .../ChannelBufferBytesReferenceTests.java | 61 +++ .../common/netty/NettyUtilsTests.java | 1 - 10 files changed, 646 insertions(+), 485 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java create mode 100644 core/src/test/java/org/elasticsearch/common/bytes/BytesArrayTests.java create mode 100644 core/src/test/java/org/elasticsearch/common/netty/ChannelBufferBytesReferenceTests.java diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 5da3c3c6bf3..1e5e76f7994 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -27,7 +27,7 @@ import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.util.Arrays; -public class BytesArray implements BytesReference { +public final class BytesArray implements BytesReference { public static final BytesArray EMPTY = new BytesArray(BytesRef.EMPTY_BYTES, 0, 0); diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index a07e7cda267..97d4e22f6b2 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -160,7 +160,7 @@ public interface BytesReference { */ default BytesRefIterator iterator() { return new BytesRefIterator() { - BytesRef ref = toBytesRef(); + BytesRef ref = length() == 0 ? null : toBytesRef(); @Override public BytesRef next() throws IOException { BytesRef r = ref; diff --git a/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java index af4e1cb29fd..70617f903f1 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java @@ -297,7 +297,7 @@ public class PagedBytesReference implements BytesReference { @Override public int read() throws IOException { - return (pos < length) ? bytearray.get(offset + pos++) : -1; + return (pos < length) ? Byte.toUnsignedInt(bytearray.get(offset + pos++)) : -1; } @Override diff --git a/core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java b/core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java index 51b3a57c5c1..42cfe3c611f 100644 --- a/core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/netty/ChannelBufferBytesReference.java @@ -51,7 +51,7 @@ final class ChannelBufferBytesReference implements BytesReference { @Override public BytesReference slice(int from, int length) { - return new ChannelBufferBytesReference(buffer.slice(from, length)); + return new ChannelBufferBytesReference(buffer.slice(buffer.readerIndex() + from, length)); } @Override diff --git a/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java b/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java index 695be3db2fd..9e2b43d43db 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/ChannelBufferStreamInput.java @@ -132,7 +132,7 @@ public class ChannelBufferStreamInput extends StreamInput { public void readBytes(byte[] b, int offset, int len) throws IOException { int read = read(b, offset, len); if (read < len) { - throw new EOFException(); + throw new IndexOutOfBoundsException(); } } diff --git a/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java b/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java new file mode 100644 index 00000000000..dd9d93dd22a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -0,0 +1,509 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.bytes; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ByteArray; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.io.EOFException; +import java.io.IOException; +import java.util.Arrays; + +public abstract class AbstractBytesReferenceTestCase extends ESTestCase { + + protected static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE; + protected final BigArrays bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), false); + + public void testGet() throws IOException { + int length = randomIntBetween(1, PAGE_SIZE * 3); + BytesReference pbr = newBytesReference(length); + int sliceOffset = randomIntBetween(0, length / 2); + int sliceLength = Math.max(1, length - sliceOffset - 1); + BytesReference slice = pbr.slice(sliceOffset, sliceLength); + assertEquals(pbr.get(sliceOffset), slice.get(0)); + assertEquals(pbr.get(sliceOffset + sliceLength - 1), slice.get(sliceLength - 1)); + } + + public void testLength() throws IOException { + int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomInt(PAGE_SIZE * 3)}; + + for (int i = 0; i < sizes.length; i++) { + BytesReference pbr = newBytesReference(sizes[i]); + assertEquals(sizes[i], pbr.length()); + } + } + + public void testSlice() throws IOException { + int length = randomInt(PAGE_SIZE * 3); + BytesReference pbr = newBytesReference(length); + int sliceOffset = randomIntBetween(0, length / 2); + int sliceLength = Math.max(0, length - sliceOffset - 1); + BytesReference slice = pbr.slice(sliceOffset, sliceLength); + assertEquals(sliceLength, slice.length()); + + if (slice.hasArray()) { + assertEquals(sliceOffset, slice.arrayOffset()); + } else { + expectThrows(IllegalStateException.class, () -> + slice.arrayOffset()); + } + } + + public void testStreamInput() throws IOException { + int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); + BytesReference pbr = newBytesReference(length); + StreamInput si = pbr.streamInput(); + assertNotNull(si); + + // read single bytes one by one + assertEquals(pbr.get(0), si.readByte()); + assertEquals(pbr.get(1), si.readByte()); + assertEquals(pbr.get(2), si.readByte()); + + // reset the stream for bulk reading + si.reset(); + + // buffer for bulk reads + byte[] origBuf = new byte[length]; + random().nextBytes(origBuf); + byte[] targetBuf = Arrays.copyOf(origBuf, origBuf.length); + + // bulk-read 0 bytes: must not modify buffer + si.readBytes(targetBuf, 0, 0); + assertEquals(origBuf[0], targetBuf[0]); + si.reset(); + + // read a few few bytes as ints + int bytesToRead = randomIntBetween(1, length / 2); + for (int i = 0; i < bytesToRead; i++) { + int b = si.read(); + assertEquals(pbr.get(i) & 0xff, b); + } + si.reset(); + + // bulk-read all + si.readFully(targetBuf); + assertArrayEquals(pbr.toBytes(), targetBuf); + + // continuing to read should now fail with EOFException + try { + si.readByte(); + fail("expected EOF"); + } catch (EOFException | IndexOutOfBoundsException eof) { + // yay + } + + // try to read more than the stream contains + si.reset(); + expectThrows(IndexOutOfBoundsException.class, () -> + si.readBytes(targetBuf, 0, length * 2)); + } + + public void testStreamInputBulkReadWithOffset() throws IOException { + final int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); + BytesReference pbr = newBytesReference(length); + StreamInput si = pbr.streamInput(); + assertNotNull(si); + + // read a bunch of single bytes one by one + int offset = randomIntBetween(1, length / 2); + for (int i = 0; i < offset; i++) { + assertEquals(si.available(), length - i); + assertEquals(pbr.get(i), si.readByte()); + } + + // now do NOT reset the stream - keep the stream's offset! + + // buffer to compare remaining bytes against bulk read + byte[] pbrBytesWithOffset = Arrays.copyOfRange(pbr.toBytes(), offset, length); + // randomized target buffer to ensure no stale slots + byte[] targetBytes = new byte[pbrBytesWithOffset.length]; + random().nextBytes(targetBytes); + + // bulk-read all + si.readFully(targetBytes); + assertArrayEquals(pbrBytesWithOffset, targetBytes); + assertEquals(si.available(), 0); + } + + public void testRandomReads() throws IOException { + int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); + BytesReference pbr = newBytesReference(length); + StreamInput streamInput = pbr.streamInput(); + BytesRefBuilder target = new BytesRefBuilder(); + while (target.length() < pbr.length()) { + switch (randomIntBetween(0, 10)) { + case 6: + case 5: + target.append(new BytesRef(new byte[]{streamInput.readByte()})); + break; + case 4: + case 3: + BytesRef bytesRef = streamInput.readBytesRef(scaledRandomIntBetween(1, pbr.length() - target.length())); + target.append(bytesRef); + break; + default: + byte[] buffer = new byte[scaledRandomIntBetween(1, pbr.length() - target.length())]; + int offset = scaledRandomIntBetween(0, buffer.length - 1); + int read = streamInput.read(buffer, offset, buffer.length - offset); + target.append(new BytesRef(buffer, offset, read)); + break; + } + } + assertEquals(pbr.length(), target.length()); + BytesRef targetBytes = target.get(); + assertArrayEquals(pbr.toBytes(), Arrays.copyOfRange(targetBytes.bytes, targetBytes.offset, targetBytes.length)); + } + + public void testSliceStreamInput() throws IOException { + int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); + BytesReference pbr = newBytesReference(length); + + // test stream input over slice (upper half of original) + int sliceOffset = randomIntBetween(1, length / 2); + int sliceLength = length - sliceOffset; + BytesReference slice = pbr.slice(sliceOffset, sliceLength); + StreamInput sliceInput = slice.streamInput(); + assertEquals(sliceInput.available(), sliceLength); + + // single reads + assertEquals(slice.get(0), sliceInput.readByte()); + assertEquals(slice.get(1), sliceInput.readByte()); + assertEquals(slice.get(2), sliceInput.readByte()); + assertEquals(sliceInput.available(), sliceLength - 3); + + // reset the slice stream for bulk reading + sliceInput.reset(); + assertEquals(sliceInput.available(), sliceLength); + + // bulk read + byte[] sliceBytes = new byte[sliceLength]; + sliceInput.readFully(sliceBytes); + assertEquals(sliceInput.available(), 0); + + // compare slice content with upper half of original + byte[] pbrSliceBytes = Arrays.copyOfRange(pbr.toBytes(), sliceOffset, length); + assertArrayEquals(pbrSliceBytes, sliceBytes); + + // compare slice bytes with bytes read from slice via streamInput :D + byte[] sliceToBytes = slice.toBytes(); + assertEquals(sliceBytes.length, sliceToBytes.length); + assertArrayEquals(sliceBytes, sliceToBytes); + + sliceInput.reset(); + assertEquals(sliceInput.available(), sliceLength); + byte[] buffer = new byte[sliceLength + scaledRandomIntBetween(1, 100)]; + int offset = scaledRandomIntBetween(0, Math.max(1, buffer.length - sliceLength - 1)); + int read = sliceInput.read(buffer, offset, sliceLength / 2); + assertEquals(sliceInput.available(), sliceLength - read); + sliceInput.read(buffer, offset + read, sliceLength - read); + assertArrayEquals(sliceBytes, Arrays.copyOfRange(buffer, offset, offset + sliceLength)); + assertEquals(sliceInput.available(), 0); + } + + public void testWriteToOutputStream() throws IOException { + int length = randomIntBetween(10, PAGE_SIZE * 4); + BytesReference pbr = newBytesReference(length); + BytesStreamOutput out = new BytesStreamOutput(); + pbr.writeTo(out); + assertEquals(pbr.length(), out.size()); + assertArrayEquals(pbr.toBytes(), out.bytes().toBytes()); + out.close(); + } + + public void testSliceWriteToOutputStream() throws IOException { + int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5)); + BytesReference pbr = newBytesReference(length); + int sliceOffset = randomIntBetween(1, length / 2); + int sliceLength = length - sliceOffset; + BytesReference slice = pbr.slice(sliceOffset, sliceLength); + BytesStreamOutput sliceOut = new BytesStreamOutput(sliceLength); + slice.writeTo(sliceOut); + assertEquals(slice.length(), sliceOut.size()); + assertArrayEquals(slice.toBytes(), sliceOut.bytes().toBytes()); + sliceOut.close(); + } + + public void testToBytes() throws IOException { + int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))}; + for (int i = 0; i < sizes.length; i++) { + BytesReference pbr = newBytesReference(sizes[i]); + byte[] bytes = pbr.toBytes(); + assertEquals(sizes[i], bytes.length); + } + } + + public void testToBytesArraySharedPage() throws IOException { + int length = randomIntBetween(10, PAGE_SIZE); + BytesReference pbr = newBytesReference(length); + BytesArray ba = pbr.toBytesArray(); + BytesArray ba2 = pbr.toBytesArray(); + assertNotNull(ba); + assertNotNull(ba2); + assertEquals(pbr.length(), ba.length()); + assertEquals(ba.length(), ba2.length()); + // single-page optimization + assertSame(ba.array(), ba2.array()); + } + + public void testToBytesArrayMaterializedPages() throws IOException { + // we need a length != (n * pagesize) to avoid page sharing at boundaries + int length = 0; + while ((length % PAGE_SIZE) == 0) { + length = randomIntBetween(PAGE_SIZE, PAGE_SIZE * randomIntBetween(2, 5)); + } + BytesReference pbr = newBytesReference(length); + BytesArray ba = pbr.toBytesArray(); + BytesArray ba2 = pbr.toBytesArray(); + assertNotNull(ba); + assertNotNull(ba2); + assertEquals(pbr.length(), ba.length()); + assertEquals(ba.length(), ba2.length()); + } + + public void testCopyBytesArray() throws IOException { + // small PBR which would normally share the first page + int length = randomIntBetween(10, PAGE_SIZE); + BytesReference pbr = newBytesReference(length); + BytesArray ba = pbr.copyBytesArray(); + BytesArray ba2 = pbr.copyBytesArray(); + assertNotNull(ba); + assertNotSame(ba, ba2); + assertNotSame(ba.array(), ba2.array()); + } + + public void testSliceCopyBytesArray() throws IOException { + int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); + BytesReference pbr = newBytesReference(length); + int sliceOffset = randomIntBetween(0, pbr.length()); + int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); + BytesReference slice = pbr.slice(sliceOffset, sliceLength); + + BytesArray ba1 = slice.copyBytesArray(); + BytesArray ba2 = slice.copyBytesArray(); + assertNotNull(ba1); + assertNotNull(ba2); + assertNotSame(ba1.array(), ba2.array()); + assertArrayEquals(slice.toBytes(), ba1.array()); + assertArrayEquals(slice.toBytes(), ba2.array()); + assertArrayEquals(ba1.array(), ba2.array()); + } + + public void testEmptyToBytesRefIterator() throws IOException { + BytesReference pbr = newBytesReference(0); + assertNull(pbr.iterator().next()); + } + + public void testIterator() throws IOException { + int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); + BytesReference pbr = newBytesReference(length); + BytesRefIterator iterator = pbr.iterator(); + BytesRef ref; + BytesRefBuilder builder = new BytesRefBuilder(); + while((ref = iterator.next()) != null) { + builder.append(ref); + } + assertArrayEquals(pbr.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes); + } + + public void testSliceIterator() throws IOException { + int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); + BytesReference pbr = newBytesReference(length); + int sliceOffset = randomIntBetween(0, pbr.length()); + int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); + BytesReference slice = pbr.slice(sliceOffset, sliceLength); + BytesRefIterator iterator = slice.iterator(); + BytesRef ref = null; + BytesRefBuilder builder = new BytesRefBuilder(); + while((ref = iterator.next()) != null) { + builder.append(ref); + } + assertArrayEquals(slice.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes); + } + + public void testIteratorRandom() throws IOException { + int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); + BytesReference pbr = newBytesReference(length); + if (randomBoolean()) { + int sliceOffset = randomIntBetween(0, pbr.length()); + int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); + pbr = pbr.slice(sliceOffset, sliceLength); + } + + if (randomBoolean()) { + pbr = pbr.toBytesArray(); + } + BytesRefIterator iterator = pbr.iterator(); + BytesRef ref = null; + BytesRefBuilder builder = new BytesRefBuilder(); + while((ref = iterator.next()) != null) { + builder.append(ref); + } + assertArrayEquals(pbr.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes); + } + + public void testArray() throws IOException { + int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))}; + + for (int i = 0; i < sizes.length; i++) { + BytesReference pbr = newBytesReference(sizes[i]); + byte[] array = pbr.array(); + assertNotNull(array); + assertEquals(sizes[i], array.length); + assertSame(array, pbr.array()); + } + } + + public void testArrayOffset() throws IOException { + int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); + BytesReference pbr = newBytesReference(length); + if (pbr.hasArray()) { + assertEquals(0, pbr.arrayOffset()); + } else { + expectThrows(IllegalStateException.class, () -> + pbr.arrayOffset()); + } + } + + public void testSliceArrayOffset() throws IOException { + int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); + BytesReference pbr = newBytesReference(length); + int sliceOffset = randomIntBetween(0, pbr.length()); + int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); + BytesReference slice = pbr.slice(sliceOffset, sliceLength); + if (slice.hasArray()) { + assertEquals(sliceOffset, slice.arrayOffset()); + } else { + expectThrows(IllegalStateException.class, () -> + slice.arrayOffset()); + } + } + + public void testToUtf8() throws IOException { + // test empty + BytesReference pbr = newBytesReference(0); + assertEquals("", pbr.toUtf8()); + // TODO: good way to test? + } + + public void testToBytesRef() throws IOException { + int length = randomIntBetween(0, PAGE_SIZE); + BytesReference pbr = newBytesReference(length); + BytesRef ref = pbr.toBytesRef(); + assertNotNull(ref); + assertEquals(pbr.arrayOffset(), ref.offset); + assertEquals(pbr.length(), ref.length); + } + + public void testSliceToBytesRef() throws IOException { + int length = randomIntBetween(0, PAGE_SIZE); + BytesReference pbr = newBytesReference(length); + // get a BytesRef from a slice + int sliceOffset = randomIntBetween(0, pbr.length()); + int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); + BytesRef sliceRef = pbr.slice(sliceOffset, sliceLength).toBytesRef(); + // note that these are only true if we have <= than a page, otherwise offset/length are shifted + assertEquals(sliceOffset, sliceRef.offset); + assertEquals(sliceLength, sliceRef.length); + } + + public void testCopyBytesRef() throws IOException { + int length = randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5)); + BytesReference pbr = newBytesReference(length); + BytesRef ref = pbr.copyBytesRef(); + assertNotNull(ref); + assertEquals(pbr.length(), ref.length); + } + + public void testHashCode() throws IOException { + // empty content must have hash 1 (JDK compat) + BytesReference pbr = newBytesReference(0); + assertEquals(Arrays.hashCode(BytesRef.EMPTY_BYTES), pbr.hashCode()); + + // test with content + pbr = newBytesReference(randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5))); + int jdkHash = Arrays.hashCode(pbr.toBytes()); + int pbrHash = pbr.hashCode(); + assertEquals(jdkHash, pbrHash); + + // test hashes of slices + int sliceFrom = randomIntBetween(0, pbr.length()); + int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom); + BytesReference slice = pbr.slice(sliceFrom, sliceLength); + int sliceJdkHash = Arrays.hashCode(slice.toBytes()); + int sliceHash = slice.hashCode(); + assertEquals(sliceJdkHash, sliceHash); + } + + public void testEquals() { + int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5)); + ByteArray ba1 = bigarrays.newByteArray(length, false); + ByteArray ba2 = bigarrays.newByteArray(length, false); + + // copy contents + for (long i = 0; i < length; i++) { + ba2.set(i, ba1.get(i)); + } + + // get refs & compare + BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length); + BytesReference pbr2 = new PagedBytesReference(bigarrays, ba2, length); + assertEquals(pbr, pbr2); + } + + public void testEqualsPeerClass() throws IOException { + int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5)); + BytesReference pbr = newBytesReference(length); + BytesReference ba = new BytesArray(pbr.toBytes()); + assertEquals(pbr, ba); + } + + public void testSliceEquals() { + int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5)); + ByteArray ba1 = bigarrays.newByteArray(length, false); + BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length); + + // test equality of slices + int sliceFrom = randomIntBetween(0, pbr.length()); + int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom); + BytesReference slice1 = pbr.slice(sliceFrom, sliceLength); + BytesReference slice2 = pbr.slice(sliceFrom, sliceLength); + assertArrayEquals(slice1.toBytes(), slice2.toBytes()); + + // test a slice with same offset but different length, + // unless randomized testing gave us a 0-length slice. + if (sliceLength > 0) { + BytesReference slice3 = pbr.slice(sliceFrom, sliceLength / 2); + assertFalse(Arrays.equals(slice1.toBytes(), slice3.toBytes())); + } + } + + protected abstract BytesReference newBytesReference(int length) throws IOException; + +} diff --git a/core/src/test/java/org/elasticsearch/common/bytes/BytesArrayTests.java b/core/src/test/java/org/elasticsearch/common/bytes/BytesArrayTests.java new file mode 100644 index 00000000000..61d24ef44c3 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/bytes/BytesArrayTests.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.bytes; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.hamcrest.Matchers; + +import java.io.IOException; + +public class BytesArrayTests extends AbstractBytesReferenceTestCase { + @Override + protected BytesReference newBytesReference(int length) throws IOException { + // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content + final BytesStreamOutput out = new BytesStreamOutput(length); + for (int i = 0; i < length; i++) { + out.writeByte((byte) random().nextInt(1 << 8)); + } + assertEquals(length, out.size()); + BytesArray ref = out.bytes().toBytesArray(); + assertEquals(length, ref.length()); + assertTrue(ref instanceof BytesArray); + assertThat(ref.length(), Matchers.equalTo(length)); + return ref; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java b/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java index ccdd50faff7..5a299d82de8 100644 --- a/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java +++ b/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTests.java @@ -30,263 +30,24 @@ import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import org.junit.After; -import org.junit.Before; import java.io.EOFException; import java.io.IOException; import java.util.Arrays; -public class PagedBytesReferenceTests extends ESTestCase { +public class PagedBytesReferenceTests extends AbstractBytesReferenceTestCase { - private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE; - private BigArrays bigarrays = new BigArrays(null, new NoneCircuitBreakerService(), false); - - public void testGet() throws IOException { - int length = randomIntBetween(1, PAGE_SIZE * 3); - BytesReference pbr = getRandomizedPagedBytesReference(length); - int sliceOffset = randomIntBetween(0, length / 2); - int sliceLength = Math.max(1, length - sliceOffset - 1); - BytesReference slice = pbr.slice(sliceOffset, sliceLength); - assertEquals(pbr.get(sliceOffset), slice.get(0)); - assertEquals(pbr.get(sliceOffset + sliceLength - 1), slice.get(sliceLength - 1)); - } - - public void testLength() throws IOException { - int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomInt(PAGE_SIZE * 3)}; - - for (int i = 0; i < sizes.length; i++) { - BytesReference pbr = getRandomizedPagedBytesReference(sizes[i]); - assertEquals(sizes[i], pbr.length()); + protected BytesReference newBytesReference(int length) throws IOException { + // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content + ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); + for (int i = 0; i < length; i++) { + out.writeByte((byte) random().nextInt(1 << 8)); } - } - - public void testSlice() throws IOException { - int length = randomInt(PAGE_SIZE * 3); - BytesReference pbr = getRandomizedPagedBytesReference(length); - int sliceOffset = randomIntBetween(0, length / 2); - int sliceLength = Math.max(0, length - sliceOffset - 1); - BytesReference slice = pbr.slice(sliceOffset, sliceLength); - assertEquals(sliceLength, slice.length()); - - if (slice.hasArray()) { - assertEquals(sliceOffset, slice.arrayOffset()); - } else { - try { - slice.arrayOffset(); - fail("expected IllegalStateException"); - } catch (IllegalStateException ise) { - // expected - } - } - } - - public void testStreamInput() throws IOException { - int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - StreamInput si = pbr.streamInput(); - assertNotNull(si); - - // read single bytes one by one - assertEquals(pbr.get(0), si.readByte()); - assertEquals(pbr.get(1), si.readByte()); - assertEquals(pbr.get(2), si.readByte()); - - // reset the stream for bulk reading - si.reset(); - - // buffer for bulk reads - byte[] origBuf = new byte[length]; - random().nextBytes(origBuf); - byte[] targetBuf = Arrays.copyOf(origBuf, origBuf.length); - - // bulk-read 0 bytes: must not modify buffer - si.readBytes(targetBuf, 0, 0); - assertEquals(origBuf[0], targetBuf[0]); - si.reset(); - - // read a few few bytes as ints - int bytesToRead = randomIntBetween(1, length / 2); - for (int i = 0; i < bytesToRead; i++) { - int b = si.read(); - assertEquals(pbr.get(i), b); - } - si.reset(); - - // bulk-read all - si.readFully(targetBuf); - assertArrayEquals(pbr.toBytes(), targetBuf); - - // continuing to read should now fail with EOFException - try { - si.readByte(); - fail("expected EOF"); - } catch (EOFException eof) { - // yay - } - - // try to read more than the stream contains - si.reset(); - try { - si.readBytes(targetBuf, 0, length * 2); - fail("expected IndexOutOfBoundsException: le > stream.length"); - } catch (IndexOutOfBoundsException ioob) { - // expected - } - } - - public void testStreamInputBulkReadWithOffset() throws IOException { - final int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - StreamInput si = pbr.streamInput(); - assertNotNull(si); - - // read a bunch of single bytes one by one - int offset = randomIntBetween(1, length / 2); - for (int i = 0; i < offset; i++) { - assertEquals(si.available(), length - i); - assertEquals(pbr.get(i), si.readByte()); - } - - // now do NOT reset the stream - keep the stream's offset! - - // buffer to compare remaining bytes against bulk read - byte[] pbrBytesWithOffset = Arrays.copyOfRange(pbr.toBytes(), offset, length); - // randomized target buffer to ensure no stale slots - byte[] targetBytes = new byte[pbrBytesWithOffset.length]; - random().nextBytes(targetBytes); - - // bulk-read all - si.readFully(targetBytes); - assertArrayEquals(pbrBytesWithOffset, targetBytes); - assertEquals(si.available(), 0); - } - - public void testRandomReads() throws IOException { - int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - StreamInput streamInput = pbr.streamInput(); - BytesRefBuilder target = new BytesRefBuilder(); - while (target.length() < pbr.length()) { - switch (randomIntBetween(0, 10)) { - case 6: - case 5: - target.append(new BytesRef(new byte[]{streamInput.readByte()})); - break; - case 4: - case 3: - BytesRef bytesRef = streamInput.readBytesRef(scaledRandomIntBetween(1, pbr.length() - target.length())); - target.append(bytesRef); - break; - default: - byte[] buffer = new byte[scaledRandomIntBetween(1, pbr.length() - target.length())]; - int offset = scaledRandomIntBetween(0, buffer.length - 1); - int read = streamInput.read(buffer, offset, buffer.length - offset); - target.append(new BytesRef(buffer, offset, read)); - break; - } - } - assertEquals(pbr.length(), target.length()); - BytesRef targetBytes = target.get(); - assertArrayEquals(pbr.toBytes(), Arrays.copyOfRange(targetBytes.bytes, targetBytes.offset, targetBytes.length)); - } - - public void testSliceStreamInput() throws IOException { - int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - - // test stream input over slice (upper half of original) - int sliceOffset = randomIntBetween(1, length / 2); - int sliceLength = length - sliceOffset; - BytesReference slice = pbr.slice(sliceOffset, sliceLength); - StreamInput sliceInput = slice.streamInput(); - assertEquals(sliceInput.available(), sliceLength); - - // single reads - assertEquals(slice.get(0), sliceInput.readByte()); - assertEquals(slice.get(1), sliceInput.readByte()); - assertEquals(slice.get(2), sliceInput.readByte()); - assertEquals(sliceInput.available(), sliceLength - 3); - - // reset the slice stream for bulk reading - sliceInput.reset(); - assertEquals(sliceInput.available(), sliceLength); - - // bulk read - byte[] sliceBytes = new byte[sliceLength]; - sliceInput.readFully(sliceBytes); - assertEquals(sliceInput.available(), 0); - - // compare slice content with upper half of original - byte[] pbrSliceBytes = Arrays.copyOfRange(pbr.toBytes(), sliceOffset, length); - assertArrayEquals(pbrSliceBytes, sliceBytes); - - // compare slice bytes with bytes read from slice via streamInput :D - byte[] sliceToBytes = slice.toBytes(); - assertEquals(sliceBytes.length, sliceToBytes.length); - assertArrayEquals(sliceBytes, sliceToBytes); - - sliceInput.reset(); - assertEquals(sliceInput.available(), sliceLength); - byte[] buffer = new byte[sliceLength + scaledRandomIntBetween(1, 100)]; - int offset = scaledRandomIntBetween(0, Math.max(1, buffer.length - sliceLength - 1)); - int read = sliceInput.read(buffer, offset, sliceLength / 2); - assertEquals(sliceInput.available(), sliceLength - read); - sliceInput.read(buffer, offset + read, sliceLength); - assertArrayEquals(sliceBytes, Arrays.copyOfRange(buffer, offset, offset + sliceLength)); - assertEquals(sliceInput.available(), 0); - } - - public void testWriteToOutputStream() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE * 4); - BytesReference pbr = getRandomizedPagedBytesReference(length); - BytesStreamOutput out = new BytesStreamOutput(); - pbr.writeTo(out); - assertEquals(pbr.length(), out.size()); - assertArrayEquals(pbr.toBytes(), out.bytes().toBytes()); - out.close(); - } - - public void testSliceWriteToOutputStream() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - int sliceOffset = randomIntBetween(1, length / 2); - int sliceLength = length - sliceOffset; - BytesReference slice = pbr.slice(sliceOffset, sliceLength); - BytesStreamOutput sliceOut = new BytesStreamOutput(sliceLength); - slice.writeTo(sliceOut); - assertEquals(slice.length(), sliceOut.size()); - assertArrayEquals(slice.toBytes(), sliceOut.bytes().toBytes()); - sliceOut.close(); - } - - public void testToBytes() throws IOException { - int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))}; - - for (int i = 0; i < sizes.length; i++) { - BytesReference pbr = getRandomizedPagedBytesReference(sizes[i]); - byte[] bytes = pbr.toBytes(); - assertEquals(sizes[i], bytes.length); - // verify that toBytes() is cheap for small payloads - if (sizes[i] <= PAGE_SIZE) { - assertSame(bytes, pbr.toBytes()); - } else { - assertNotSame(bytes, pbr.toBytes()); - } - } - } - - public void testToBytesArraySharedPage() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE); - BytesReference pbr = getRandomizedPagedBytesReference(length); - BytesArray ba = pbr.toBytesArray(); - BytesArray ba2 = pbr.toBytesArray(); - assertNotNull(ba); - assertNotNull(ba2); - assertEquals(pbr.length(), ba.length()); - assertEquals(ba.length(), ba2.length()); - // single-page optimization - assertSame(ba.array(), ba2.array()); + assertThat(out.size(), Matchers.equalTo(length)); + BytesReference ref = out.bytes(); + assertThat(ref.length(), Matchers.equalTo(length)); + assertThat(ref, Matchers.instanceOf(PagedBytesReference.class)); + return ref; } public void testToBytesArrayMaterializedPages() throws IOException { @@ -295,7 +56,7 @@ public class PagedBytesReferenceTests extends ESTestCase { while ((length % PAGE_SIZE) == 0) { length = randomIntBetween(PAGE_SIZE, PAGE_SIZE * randomIntBetween(2, 5)); } - BytesReference pbr = getRandomizedPagedBytesReference(length); + BytesReference pbr = newBytesReference(length); BytesArray ba = pbr.toBytesArray(); BytesArray ba2 = pbr.toBytesArray(); assertNotNull(ba); @@ -306,99 +67,11 @@ public class PagedBytesReferenceTests extends ESTestCase { assertNotSame(ba.array(), ba2.array()); } - public void testCopyBytesArray() throws IOException { - // small PBR which would normally share the first page - int length = randomIntBetween(10, PAGE_SIZE); - BytesReference pbr = getRandomizedPagedBytesReference(length); - BytesArray ba = pbr.copyBytesArray(); - BytesArray ba2 = pbr.copyBytesArray(); - assertNotNull(ba); - assertNotSame(ba, ba2); - assertNotSame(ba.array(), ba2.array()); - } - - public void testSliceCopyBytesArray() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - int sliceOffset = randomIntBetween(0, pbr.length()); - int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); - BytesReference slice = pbr.slice(sliceOffset, sliceLength); - - BytesArray ba1 = slice.copyBytesArray(); - BytesArray ba2 = slice.copyBytesArray(); - assertNotNull(ba1); - assertNotNull(ba2); - assertNotSame(ba1.array(), ba2.array()); - assertArrayEquals(slice.toBytes(), ba1.array()); - assertArrayEquals(slice.toBytes(), ba2.array()); - assertArrayEquals(ba1.array(), ba2.array()); - } - - public void testEmptyToBytesRefIterator() throws IOException { - BytesReference pbr = getRandomizedPagedBytesReference(0); - assertNull(pbr.iterator().next()); - } - - public void testIterator() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - BytesRefIterator iterator = pbr.iterator(); - BytesRef ref; - BytesRefBuilder builder = new BytesRefBuilder(); - while((ref = iterator.next()) != null) { - builder.append(ref); - } - assertArrayEquals(pbr.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes); - } - - public void testSliceIterator() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - int sliceOffset = randomIntBetween(0, pbr.length()); - int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); - BytesReference slice = pbr.slice(sliceOffset, sliceLength); - BytesRefIterator iterator = slice.iterator(); - BytesRef ref = null; - BytesRefBuilder builder = new BytesRefBuilder(); - while((ref = iterator.next()) != null) { - builder.append(ref); - } - assertArrayEquals(slice.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes); - } - - public void testIteratorRandom() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - if (randomBoolean()) { - int sliceOffset = randomIntBetween(0, pbr.length()); - int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); - pbr = pbr.slice(sliceOffset, sliceLength); - } - - if (randomBoolean()) { - pbr = pbr.toBytesArray(); - } - BytesRefIterator iterator = pbr.iterator(); - BytesRef ref = null; - BytesRefBuilder builder = new BytesRefBuilder(); - while((ref = iterator.next()) != null) { - builder.append(ref); - } - assertArrayEquals(pbr.toBytes(), BytesRef.deepCopyOf(builder.toBytesRef()).bytes); - } - - public void testHasArray() throws IOException { - int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(1, 3)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - // must return true for <= pagesize - assertEquals(length <= PAGE_SIZE, pbr.hasArray()); - } - public void testArray() throws IOException { int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))}; for (int i = 0; i < sizes.length; i++) { - BytesReference pbr = getRandomizedPagedBytesReference(sizes[i]); + BytesReference pbr = newBytesReference(sizes[i]); // verify that array() is cheap for small payloads if (sizes[i] <= PAGE_SIZE) { byte[] array = pbr.array(); @@ -416,149 +89,27 @@ public class PagedBytesReferenceTests extends ESTestCase { } } - public void testArrayOffset() throws IOException { - int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - if (pbr.hasArray()) { - assertEquals(0, pbr.arrayOffset()); - } else { - try { - pbr.arrayOffset(); - fail("expected IllegalStateException"); - } catch (IllegalStateException ise) { - // expected + public void testToBytes() throws IOException { + int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))}; + + for (int i = 0; i < sizes.length; i++) { + BytesReference pbr = newBytesReference(sizes[i]); + byte[] bytes = pbr.toBytes(); + assertEquals(sizes[i], bytes.length); + // verify that toBytes() is cheap for small payloads + if (sizes[i] <= PAGE_SIZE) { + assertSame(bytes, pbr.toBytes()); + } else { + assertNotSame(bytes, pbr.toBytes()); } } } - public void testSliceArrayOffset() throws IOException { - int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - int sliceOffset = randomIntBetween(0, pbr.length()); - int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); - BytesReference slice = pbr.slice(sliceOffset, sliceLength); - if (slice.hasArray()) { - assertEquals(sliceOffset, slice.arrayOffset()); - } else { - try { - slice.arrayOffset(); - fail("expected IllegalStateException"); - } catch (IllegalStateException ise) { - // expected - } - } - } - - public void testToUtf8() throws IOException { - // test empty - BytesReference pbr = getRandomizedPagedBytesReference(0); - assertEquals("", pbr.toUtf8()); - // TODO: good way to test? - } - - public void testToBytesRef() throws IOException { - int length = randomIntBetween(0, PAGE_SIZE); - BytesReference pbr = getRandomizedPagedBytesReference(length); - BytesRef ref = pbr.toBytesRef(); - assertNotNull(ref); - assertEquals(pbr.arrayOffset(), ref.offset); - assertEquals(pbr.length(), ref.length); - } - - public void testSliceToBytesRef() throws IOException { - int length = randomIntBetween(0, PAGE_SIZE); - BytesReference pbr = getRandomizedPagedBytesReference(length); - // get a BytesRef from a slice - int sliceOffset = randomIntBetween(0, pbr.length()); - int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset); - BytesRef sliceRef = pbr.slice(sliceOffset, sliceLength).toBytesRef(); - // note that these are only true if we have <= than a page, otherwise offset/length are shifted - assertEquals(sliceOffset, sliceRef.offset); - assertEquals(sliceLength, sliceRef.length); - } - - public void testCopyBytesRef() throws IOException { - int length = randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - BytesRef ref = pbr.copyBytesRef(); - assertNotNull(ref); - assertEquals(pbr.length(), ref.length); - } - - public void testHashCode() throws IOException { - // empty content must have hash 1 (JDK compat) - BytesReference pbr = getRandomizedPagedBytesReference(0); - assertEquals(Arrays.hashCode(BytesRef.EMPTY_BYTES), pbr.hashCode()); - - // test with content - pbr = getRandomizedPagedBytesReference(randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5))); - int jdkHash = Arrays.hashCode(pbr.toBytes()); - int pbrHash = pbr.hashCode(); - assertEquals(jdkHash, pbrHash); - - // test hashes of slices - int sliceFrom = randomIntBetween(0, pbr.length()); - int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom); - BytesReference slice = pbr.slice(sliceFrom, sliceLength); - int sliceJdkHash = Arrays.hashCode(slice.toBytes()); - int sliceHash = slice.hashCode(); - assertEquals(sliceJdkHash, sliceHash); - } - - public void testEquals() { - int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5)); - ByteArray ba1 = bigarrays.newByteArray(length, false); - ByteArray ba2 = bigarrays.newByteArray(length, false); - - // copy contents - for (long i = 0; i < length; i++) { - ba2.set(i, ba1.get(i)); - } - - // get refs & compare - BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length); - BytesReference pbr2 = new PagedBytesReference(bigarrays, ba2, length); - assertEquals(pbr, pbr2); - } - - public void testEqualsPeerClass() throws IOException { - int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5)); - BytesReference pbr = getRandomizedPagedBytesReference(length); - BytesReference ba = new BytesArray(pbr.toBytes()); - assertEquals(pbr, ba); - } - - public void testSliceEquals() { - int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5)); - ByteArray ba1 = bigarrays.newByteArray(length, false); - BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length); - - // test equality of slices - int sliceFrom = randomIntBetween(0, pbr.length()); - int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom); - BytesReference slice1 = pbr.slice(sliceFrom, sliceLength); - BytesReference slice2 = pbr.slice(sliceFrom, sliceLength); - assertArrayEquals(slice1.toBytes(), slice2.toBytes()); - - // test a slice with same offset but different length, - // unless randomized testing gave us a 0-length slice. - if (sliceLength > 0) { - BytesReference slice3 = pbr.slice(sliceFrom, sliceLength / 2); - assertFalse(Arrays.equals(slice1.toBytes(), slice3.toBytes())); - } - } - - private BytesReference getRandomizedPagedBytesReference(int length) throws IOException { - // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content - ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); - for (int i = 0; i < length; i++) { - out.writeByte((byte) random().nextInt(1 << 8)); - } - assertThat(out.size(), Matchers.equalTo(length)); - BytesReference ref = out.bytes(); - assertThat(ref.length(), Matchers.equalTo(length)); - assertThat(ref, Matchers.instanceOf(PagedBytesReference.class)); - return ref; + public void testHasArray() throws IOException { + int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(1, 3)); + BytesReference pbr = newBytesReference(length); + // must return true for <= pagesize + assertEquals(length <= PAGE_SIZE, pbr.hasArray()); } } diff --git a/core/src/test/java/org/elasticsearch/common/netty/ChannelBufferBytesReferenceTests.java b/core/src/test/java/org/elasticsearch/common/netty/ChannelBufferBytesReferenceTests.java new file mode 100644 index 00000000000..76a8626fee5 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/netty/ChannelBufferBytesReferenceTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.netty; + +import org.elasticsearch.common.bytes.AbstractBytesReferenceTestCase; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBuffers; + +import java.io.IOException; + +public class ChannelBufferBytesReferenceTests extends AbstractBytesReferenceTestCase { + @Override + protected BytesReference newBytesReference(int length) throws IOException { + ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); + for (int i = 0; i < length; i++) { + out.writeByte((byte) random().nextInt(1 << 8)); + } + assertEquals(out.size(), length); + BytesReference ref = out.bytes(); + assertEquals(ref.length(), length); + BytesArray bytesArray = ref.toBytesArray(); + return NettyUtils.toBytesReference(ChannelBuffers.wrappedBuffer(bytesArray.array(), bytesArray.arrayOffset(), + bytesArray.length())); + } + + public void testSliceOnAdvancedBuffer() throws IOException { + BytesReference bytesReference = newBytesReference(randomIntBetween(10, 3 * PAGE_SIZE)); + BytesArray bytesArray = bytesReference.toBytesArray(); + + ChannelBuffer channelBuffer = ChannelBuffers.wrappedBuffer(bytesArray.array(), bytesArray.arrayOffset(), + bytesArray.length()); + int numBytesToRead = randomIntBetween(1, 5); + for (int i = 0; i < numBytesToRead; i++) { + channelBuffer.readByte(); + } + BytesReference other = NettyUtils.toBytesReference(channelBuffer); + BytesReference slice = bytesReference.slice(numBytesToRead, bytesReference.length() - numBytesToRead); + assertEquals(other, slice); + + assertEquals(other.slice(3, 1), slice.slice(3, 1)); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java b/core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java index 89c282e7d81..2d981dc9eae 100644 --- a/core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/netty/NettyUtilsTests.java @@ -73,7 +73,6 @@ public class NettyUtilsTests extends ESTestCase { } private BytesReference getRandomizedBytesReference(int length) throws IOException { - // TODO we should factor out a BaseBytesReferenceTestCase // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays); for (int i = 0; i < length; i++) { From 6d2df0dc183a3e7dc1b9aeb856fd6ab9b94d2b54 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 29 Jun 2016 15:25:51 +0200 Subject: [PATCH 31/43] Fix docs example for the _id field, the field is not accessible in scripts --- docs/reference/mapping/fields/id-field.asciidoc | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/docs/reference/mapping/fields/id-field.asciidoc b/docs/reference/mapping/fields/id-field.asciidoc index f99f1ec9723..c640b561571 100644 --- a/docs/reference/mapping/fields/id-field.asciidoc +++ b/docs/reference/mapping/fields/id-field.asciidoc @@ -7,8 +7,8 @@ indexed as its value can be derived automatically from the <> field. The value of the `_id` field is accessible in certain queries (`term`, -`terms`, `match`, `query_string`, `simple_query_string`) and scripts, but -_not_ in aggregations or when sorting, where the <> +`terms`, `match`, `query_string`, `simple_query_string`), but +_not_ in aggregations, scripts or when sorting, where the <> field should be used instead: [source,js] @@ -30,18 +30,9 @@ GET my_index/_search "terms": { "_id": [ "1", "2" ] <1> } - }, - "script_fields": { - "UID": { - "script": { - "lang": "painless", - "inline": "doc['_id']" <2> - } - } } } -------------------------- // CONSOLE <1> Querying on the `_id` field (also see the <>) -<2> Accessing the `_id` field in scripts From 6d5666553c34761fe4cf275974eac4dd1cf63889 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Wed, 29 Jun 2016 15:53:57 +0200 Subject: [PATCH 32/43] [TEST] mute test because it fails about 1/100 runs --- .../test/java/org/elasticsearch/common/lucene/LuceneTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index e9f07acb9d1..f4260626160 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -357,6 +357,7 @@ public class LuceneTests extends ESTestCase { dir.close(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/19151") public void testAsSequentialAccessBits() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new KeywordAnalyzer())); From 56fa751928c2d9f89063573de5fc1b97c471a669 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Wed, 29 Jun 2016 16:44:12 +0200 Subject: [PATCH 33/43] Plugins: Add status bar on download (#18695) As some plugins are becoming big now, it is hard for the user to know, if the plugin is being downloaded or just nothing happens. This commit adds a progress bar during download, which can be disabled by using the `-q` parameter. In addition this updates to jimfs 1.1, which allows us to test the batch mode, as adding security policies are now supported due to having jimfs:// protocol support in URL stream handlers. --- .../java/org/elasticsearch/cli/Terminal.java | 11 +- .../plugins/InstallPluginCommand.java | 80 ++++++++---- .../org/elasticsearch/plugins/PluginCli.java | 2 - .../plugins/ProgressInputStream.java | 83 +++++++++++++ .../plugins/ProgressInputStreamTests.java | 116 ++++++++++++++++++ docs/plugins/plugin-script.asciidoc | 6 +- qa/evil-tests/build.gradle | 2 +- .../plugins/InstallPluginCommandTests.java | 46 ++++++- 8 files changed, 315 insertions(+), 31 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/plugins/ProgressInputStream.java create mode 100644 core/src/test/java/org/elasticsearch/plugins/ProgressInputStreamTests.java diff --git a/core/src/main/java/org/elasticsearch/cli/Terminal.java b/core/src/main/java/org/elasticsearch/cli/Terminal.java index d2dc57263dc..58eb5012d07 100644 --- a/core/src/main/java/org/elasticsearch/cli/Terminal.java +++ b/core/src/main/java/org/elasticsearch/cli/Terminal.java @@ -19,6 +19,8 @@ package org.elasticsearch.cli; +import org.elasticsearch.common.SuppressForbidden; + import java.io.BufferedReader; import java.io.Console; import java.io.IOException; @@ -26,8 +28,6 @@ import java.io.InputStreamReader; import java.io.PrintWriter; import java.nio.charset.Charset; -import org.elasticsearch.common.SuppressForbidden; - /** * A Terminal wraps access to reading input and writing output for a cli. * @@ -81,8 +81,13 @@ public abstract class Terminal { /** Prints a line to the terminal at {@code verbosity} level. */ public final void println(Verbosity verbosity, String msg) { + print(verbosity, msg + lineSeparator); + } + + /** Prints message to the terminal at {@code verbosity} level, without a newline. */ + public final void print(Verbosity verbosity, String msg) { if (this.verbosity.ordinal() >= verbosity.ordinal()) { - getWriter().print(msg + lineSeparator); + getWriter().print(msg); getWriter().flush(); } } diff --git a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 51bb3b6b82f..e9ea2d11e37 100644 --- a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -19,12 +19,31 @@ package org.elasticsearch.plugins; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.util.CollectionUtil; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.bootstrap.JarHell; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.SettingCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.node.internal.InternalSettingsPreparer; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.net.URL; +import java.net.URLConnection; import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.nio.file.DirectoryStream; @@ -49,24 +68,6 @@ import java.util.stream.Collectors; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; -import joptsimple.OptionSet; -import joptsimple.OptionSpec; -import org.apache.lucene.search.spell.LevensteinDistance; -import org.apache.lucene.util.CollectionUtil; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.bootstrap.JarHell; -import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.SettingCommand; -import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cli.UserError; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; - import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** @@ -107,7 +108,7 @@ class InstallPluginCommand extends SettingCommand { static final Set MODULES; static { try (InputStream stream = InstallPluginCommand.class.getResourceAsStream("/modules.txt"); - BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) { + BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) { Set modules = new HashSet<>(); String line = reader.readLine(); while (line != null) { @@ -124,7 +125,7 @@ class InstallPluginCommand extends SettingCommand { static final Set OFFICIAL_PLUGINS; static { try (InputStream stream = InstallPluginCommand.class.getResourceAsStream("/plugins.txt"); - BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) { + BufferedReader reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8))) { Set plugins = new TreeSet<>(); // use tree set to get sorting for help command String line = reader.readLine(); while (line != null) { @@ -141,6 +142,7 @@ class InstallPluginCommand extends SettingCommand { private final OptionSpec batchOption; private final OptionSpec arguments; + public static final Set DIR_AND_EXECUTABLE_PERMS; public static final Set FILE_PERMS; @@ -273,13 +275,49 @@ class InstallPluginCommand extends SettingCommand { terminal.println(VERBOSE, "Retrieving zip from " + urlString); URL url = new URL(urlString); Path zip = Files.createTempFile(tmpDir, null, ".zip"); - try (InputStream in = url.openStream()) { + URLConnection urlConnection = url.openConnection(); + int contentLength = urlConnection.getContentLength(); + try (InputStream in = new TerminalProgressInputStream(urlConnection.getInputStream(), contentLength, terminal)) { // must overwrite since creating the temp file above actually created the file Files.copy(in, zip, StandardCopyOption.REPLACE_EXISTING); } return zip; } + /** + * content length might be -1 for unknown and progress only makes sense if the content length is greater than 0 + */ + private class TerminalProgressInputStream extends ProgressInputStream { + + private final Terminal terminal; + private int width = 50; + private final boolean enabled; + + public TerminalProgressInputStream(InputStream is, int expectedTotalSize, Terminal terminal) { + super(is, expectedTotalSize); + this.terminal = terminal; + this.enabled = expectedTotalSize > 0; + } + + @Override + public void onProgress(int percent) { + if (enabled) { + int currentPosition = percent * width / 100; + StringBuilder sb = new StringBuilder("\r["); + sb.append(String.join("=", Collections.nCopies(currentPosition, ""))); + if (currentPosition > 0 && percent < 100) { + sb.append(">"); + } + sb.append(String.join(" ", Collections.nCopies(width - currentPosition, ""))); + sb.append("] %s   "); + if (percent == 100) { + sb.append("\n"); + } + terminal.print(Terminal.Verbosity.NORMAL, String.format(Locale.ROOT, sb.toString(), percent + "%")); + } + } + } + /** Downloads a zip from the url, as well as a SHA1 checksum, and checks the checksum. */ private Path downloadZipAndChecksum(Terminal terminal, String urlString, Path tmpDir) throws Exception { Path zip = downloadZip(terminal, urlString, tmpDir); diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java index 3a88c4d0083..3ce60882cce 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java @@ -26,8 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import java.util.Collections; - /** * A cli tool for adding, removing and listing plugins for elasticsearch. */ diff --git a/core/src/main/java/org/elasticsearch/plugins/ProgressInputStream.java b/core/src/main/java/org/elasticsearch/plugins/ProgressInputStream.java new file mode 100644 index 00000000000..16e1f203bb3 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/ProgressInputStream.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; + +/** + * An input stream that allows to add a listener to monitor progress + * The listener is triggered whenever a full percent is increased + * The listener is never triggered twice on the same percentage + * The listener will always return 99 percent, if the expectedTotalSize is exceeded, until it is finished + * + * Only used by the InstallPluginCommand, thus package private here + */ +abstract class ProgressInputStream extends FilterInputStream { + + private final int expectedTotalSize; + private int currentPercent; + private int count = 0; + + public ProgressInputStream(InputStream is, int expectedTotalSize) { + super(is); + this.expectedTotalSize = expectedTotalSize; + this.currentPercent = 0; + } + + @Override + public int read() throws IOException { + int read = in.read(); + checkProgress(read == -1 ? -1 : 1); + return read; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int byteCount = super.read(b, off, len); + checkProgress(byteCount); + return byteCount; + } + + @Override + public int read(byte b[]) throws IOException { + return read(b, 0, b.length); + } + + void checkProgress(int byteCount) { + // are we done? + if (byteCount == -1) { + currentPercent = 100; + onProgress(currentPercent); + } else { + count += byteCount; + // rounding up to 100% would mean we say we are done, before we are... + // this also catches issues, when expectedTotalSize was guessed wrong + int percent = Math.min(99, (int) Math.floor(100.0*count/expectedTotalSize)); + if (percent > currentPercent) { + currentPercent = percent; + onProgress(percent); + } + } + } + + public void onProgress(int percent) {} +} diff --git a/core/src/test/java/org/elasticsearch/plugins/ProgressInputStreamTests.java b/core/src/test/java/org/elasticsearch/plugins/ProgressInputStreamTests.java new file mode 100644 index 00000000000..81e937d26a9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/plugins/ProgressInputStreamTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasSize; + +public class ProgressInputStreamTests extends ESTestCase { + + private List progresses = new ArrayList<>(); + + public void testThatProgressListenerIsCalled() throws Exception { + ProgressInputStream is = newProgressInputStream(0); + is.checkProgress(-1); + + assertThat(progresses, hasSize(1)); + assertThat(progresses, hasItems(100)); + } + + public void testThatProgressListenerIsCalledOnUnexpectedCompletion() throws Exception { + ProgressInputStream is = newProgressInputStream(2); + is.checkProgress(-1); + assertThat(progresses, hasItems(100)); + } + + public void testThatProgressListenerReturnsMaxValueOnWrongExpectedSize() throws Exception { + ProgressInputStream is = newProgressInputStream(2); + + is.checkProgress(1); + assertThat(progresses, hasItems(50)); + + is.checkProgress(3); + assertThat(progresses, hasItems(50, 99)); + + is.checkProgress(-1); + assertThat(progresses, hasItems(50, 99, 100)); + } + + public void testOneByte() throws Exception { + ProgressInputStream is = newProgressInputStream(1); + is.checkProgress(1); + is.checkProgress(-1); + + assertThat(progresses, hasItems(99, 100)); + + } + + public void testOddBytes() throws Exception { + int odd = (randomIntBetween(100, 200) / 2) + 1; + ProgressInputStream is = newProgressInputStream(odd); + for (int i = 0; i < odd; i++) { + is.checkProgress(1); + } + is.checkProgress(-1); + + assertThat(progresses, hasSize(odd+1)); + assertThat(progresses, hasItem(100)); + } + + public void testEvenBytes() throws Exception { + int even = (randomIntBetween(100, 200) / 2); + ProgressInputStream is = newProgressInputStream(even); + + for (int i = 0; i < even; i++) { + is.checkProgress(1); + } + is.checkProgress(-1); + + assertThat(progresses, hasSize(even+1)); + assertThat(progresses, hasItem(100)); + } + + public void testOnProgressCannotBeCalledMoreThanOncePerPercent() throws Exception { + int count = randomIntBetween(150, 300); + ProgressInputStream is = newProgressInputStream(count); + + for (int i = 0; i < count; i++) { + is.checkProgress(1); + } + is.checkProgress(-1); + + assertThat(progresses, hasSize(100)); + } + + private ProgressInputStream newProgressInputStream(int expectedSize) { + return new ProgressInputStream(null, expectedSize) { + @Override + public void onProgress(int percent) { + progresses.add(percent); + } + }; + } +} \ No newline at end of file diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index 1e21288e39c..987cc7c9758 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -51,7 +51,7 @@ sudo bin/elasticsearch-plugin install analysis-icu ----------------------------------- This command will install the version of the plugin that matches your -Elasticsearch version. +Elasticsearch version and also show a progress bar while downloading. [float] === Custom URL or file system @@ -117,8 +117,8 @@ The `plugin` scripts supports a number of other command line parameters: === Silent/Verbose mode The `--verbose` parameter outputs more debug information, while the `--silent` -parameter turns off all output. The script may return the following exit -codes: +parameter turns off all output including the progress bar. The script may +return the following exit codes: [horizontal] `0`:: everything was OK diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 53406f1aad9..cba9334fbca 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -26,7 +26,7 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile 'com.google.jimfs:jimfs:1.0' + testCompile 'com.google.jimfs:jimfs:1.1' } // TODO: give each evil test its own fresh JVM for more isolation. diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 2b9b6ec6ab9..e5117fa0aa0 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -25,6 +25,7 @@ import com.google.common.jimfs.Jimfs; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserError; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; @@ -70,6 +71,7 @@ import java.util.zip.ZipOutputStream; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.not; @LuceneTestCase.SuppressFileSystems("*") public class InstallPluginCommandTests extends ESTestCase { @@ -179,6 +181,10 @@ public class InstallPluginCommandTests extends ESTestCase { /** creates a plugin .zip and returns the url for testing */ static String createPlugin(String name, Path structure) throws IOException { + return createPlugin(name, structure, false); + } + + static String createPlugin(String name, Path structure, boolean createSecurityPolicyFile) throws IOException { PluginTestUtil.writeProperties(structure, "description", "fake desc", "name", name, @@ -186,6 +192,10 @@ public class InstallPluginCommandTests extends ESTestCase { "elasticsearch.version", Version.CURRENT.toString(), "java.version", System.getProperty("java.specification.version"), "classname", "FakePlugin"); + if (createSecurityPolicyFile) { + String securityPolicyContent = "grant {\n permission java.lang.RuntimePermission \"setFactory\";\n};\n"; + Files.write(structure.resolve("plugin-security.policy"), securityPolicyContent.getBytes(StandardCharsets.UTF_8)); + } writeJar(structure.resolve("plugin.jar"), "FakePlugin"); return writeZip(structure, "elasticsearch"); } @@ -583,7 +593,41 @@ public class InstallPluginCommandTests extends ESTestCase { assertThat(e.getMessage(), containsString("Unknown plugin unknown_plugin")); } - // TODO: test batch flag? + public void testBatchFlag() throws Exception { + MockTerminal terminal = new MockTerminal(); + installPlugin(terminal, true); + assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions")); + } + + public void testQuietFlagDisabled() throws Exception { + MockTerminal terminal = new MockTerminal(); + terminal.setVerbosity(randomFrom(Terminal.Verbosity.NORMAL, Terminal.Verbosity.VERBOSE)); + installPlugin(terminal, false); + assertThat(terminal.getOutput(), containsString("100%")); + } + + public void testQuietFlagEnabled() throws Exception { + MockTerminal terminal = new MockTerminal(); + terminal.setVerbosity(Terminal.Verbosity.SILENT); + installPlugin(terminal, false); + assertThat(terminal.getOutput(), not(containsString("100%"))); + } + + private void installPlugin(MockTerminal terminal, boolean isBatch) throws Exception { + Tuple env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + // if batch is enabled, we also want to add a security policy + String pluginZip = createPlugin("fake", pluginDir, isBatch); + + Map settings = new HashMap<>(); + settings.put("path.home", env.v1().toString()); + new InstallPluginCommand() { + @Override + void jarHellCheck(Path candidate, Path pluginsDir) throws Exception { + } + }.execute(terminal, pluginZip, isBatch, settings); + } + // TODO: test checksum (need maven/official below) // TODO: test maven, official, and staging install...need tests with fixtures... } From 4f82b2de1a63793c775f6180f2234d39000c0949 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Wed, 29 Jun 2016 16:57:57 +0200 Subject: [PATCH 34/43] Fixed bad asciidoc in azure discovery --- docs/plugins/discovery-azure.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/plugins/discovery-azure.asciidoc b/docs/plugins/discovery-azure.asciidoc index 825c258133b..f49f1568cab 100644 --- a/docs/plugins/discovery-azure.asciidoc +++ b/docs/plugins/discovery-azure.asciidoc @@ -56,7 +56,7 @@ discovery: .Binding the network host ============================================== -WARNING: The keystore file must be placed in a directory accessible by elasticsearch like the `config` directory. +The keystore file must be placed in a directory accessible by elasticsearch like the `config` directory. It's important to define `network.host` as by default it's bound to `localhost`. From ef89e564f45d755c4b08fb030413b8dc28fff155 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 29 Jun 2016 11:04:54 -0400 Subject: [PATCH 35/43] Update Vagrant boxes before running packaging test This commit adds an execution of a Vagrant box update task before bringing a Vagrant box up for running packaging tests. Relates #19155 --- qa/vagrant/build.gradle | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 784655edaa3..89668a06534 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -223,6 +223,12 @@ for (String box : availableBoxes) { continue; } + Task update = tasks.create("vagrant${boxTask}#update", VagrantCommandTask) { + boxName box + args 'box', 'update', box + dependsOn checkVagrantVersion + } + Task up = tasks.create("vagrant${boxTask}#up", VagrantCommandTask) { boxName box /* Its important that we try to reprovision the box even if it already @@ -238,7 +244,7 @@ for (String box : availableBoxes) { args 'up', box, '--provision', '--provider', 'virtualbox' /* It'd be possible to check if the box is already up here and output SKIPPED but that would require running vagrant status which is slow! */ - dependsOn checkVagrantVersion + dependsOn update } Task smoke = tasks.create("vagrant${boxTask}#smoketest", Exec) { From 83d7f199c76195bca371ce356e4171722e95bd5e Mon Sep 17 00:00:00 2001 From: Paul Echeverri Date: Wed, 22 Jun 2016 18:48:41 -0700 Subject: [PATCH 36/43] Partial draft for Java Update-by-Query --- docs/java-api/docs/update-by-query.asciidoc | 522 ++++++++++++++++++++ 1 file changed, 522 insertions(+) create mode 100644 docs/java-api/docs/update-by-query.asciidoc diff --git a/docs/java-api/docs/update-by-query.asciidoc b/docs/java-api/docs/update-by-query.asciidoc new file mode 100644 index 00000000000..eff4be0182d --- /dev/null +++ b/docs/java-api/docs/update-by-query.asciidoc @@ -0,0 +1,522 @@ +[[docs-update-by-query]] +== Update By Query API + +experimental[The update-by-query API is new and should still be considered experimental. The API may change in ways that are not backwards compatible] + +The simplest usage of `updateByQuery` updates each +document in an index without changing the source. This usage enables +<> or another online +mapping change. + +[source,java] +-------------------------------------------------- +UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + +updateByQuery.source("cool_things") + .filter(termQuery("level", "awesome")) + .script(new Script("ctx._source.awesome = \"absolutely\"")); + +BulkIndexByScrollResponse response = updateByQuery.get(); +-------------------------------------------------- + +The `updatebyQuery` API returns a JSON object similar to the following example: + +[source,java] +-------------------------------------------------- +{ + "took" : 147, + "timed_out": false, + "updated": 120, + "deleted": 0, + "batches": 1, + "version_conflicts": 0, + "noops": 0, + "retries": { + "bulk": 0, + "search": 0 + }, + "throttled_millis": 0, + "requests_per_second": "unlimited", + "throttled_until_millis": 0, + "total": 120, + "failures" : [ ] +} +-------------------------------------------------- +// TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/] + +Calls to the `updateByQuery` API start by getting a snapshot of the index, indexing +any documents found using the `internal` versioning. + +NOTE: Version conflicts happen when a document changes between the time of the +snapshot and the time the index request processes. + +When the versions match, `updateByQuery` updates the document +and increments the version number. + +All update and query failures cause `updateByQuery` to abort. These failures are +listed in the `failures` section of the JSON response object. Any successful updates +remain and are not rolled back. While the first failure causes the abort, the JSON +response object contains all of the failures generated by the failed bulk request. + +To prevent version conflicts from causing `updateByQuery` to abort, +set `conflicts=proceed` on the URL or `"conflicts": "proceed"` +in the request body. The first example does this because it is trying to +pick up an online mapping change and a version conflict means that the +conflicting document was updated between the start of the `updateByQuery` +and the time when it attempted to update the document. This is fine because +that update will have picked up the online mapping update. + +Back to the API format, you can limit `updateByQuery` to a single type. This +will only update `tweet` documents from the `twitter` index: + +// provide API Example + +[source,java] +-------------------------------------------------- + +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +You can also limit `updateByQuery` using the +<>. This example updates all documents from the +`twitter` index for the user `kimchy`: + + +// provide API Example + +[source,java] +-------------------------------------------------- + +{ + "query": { <1> + "term": { + "user": "kimchy" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +<1> The query must be passed as a value to the `query` key, in the same +way as the <>. You can also use the `q` +parameter in the same way as the search api. + +So far we've only been updating documents without changing their source. That +is genuinely useful for things like +<> but it's only half the +fun. `updateByQuery` supports a `script` object to update the document. This +will increment the `likes` field on all of kimchy's tweets: + +// provide API Example + +[source,java] +-------------------------------------------------- + +{ + "script": { + "inline": "ctx._source.likes++" + }, + "query": { + "term": { + "user": "kimchy" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +Just as in <> you can set `ctx.op` to change the +operation that is executed: + +`noop`:: + +Set `ctx.op = "noop"` if your script decides that it doesn't have to make any +changes. That will cause `updateByQuery` to omit that document from its updates. + This no operation will be reported in the `noop` counter in the +<>. + +`delete`:: + +Set `ctx.op = "delete"` if your script decides that the document must be + deleted. The deletion will be reported in the `deleted` counter in the +<>. + +Setting `ctx.op` to anything else is an error. Setting any +other field in `ctx` is an error. + +Note that we stopped specifying `conflicts=proceed`. In this case we want a +version conflict to abort the process so we can handle the failure. + +This API doesn't allow you to move the documents it touches, just modify their +source. This is intentional! We've made no provisions for removing the document +from its original location. + +It's also possible to do this whole thing on multiple indexes and multiple +types at once, just like the search API: + +// provide API Example + +[source,java] +-------------------------------------------------- + +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT twitter\nPUT blog\nGET _cluster\/health?wait_for_status=yellow\n/] + +If you provide `routing` then the routing is copied to the scroll query, +limiting the process to the shards that match that routing value: + +// provide API Example +[source,java] +-------------------------------------------------- + +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +By default `updateByQuery` uses scroll batches of 1000. You can change the +batch size with the `scroll_size` URL parameter: + +// provide API Example +[source,java] +-------------------------------------------------- + +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +`updateByQuery` can also use the <> feature by +specifying a `pipeline` like this: + +// provide API Example +[source,java] +-------------------------------------------------- + +{ + "description" : "sets foo", + "processors" : [ { + "set" : { + "field": "foo", + "value": "bar" + } + } ] +} +POST twitter/_update_by_query?pipeline=set-foo +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +[float] +=== URL Parameters + +In addition to the standard parameters like `pretty`, the Update By Query API +also supports `refresh`, `wait_for_completion`, `consistency`, and `timeout`. + +Sending the `refresh` will update all shards in the index being updated when +the request completes. This is different than the Index API's `refresh` +parameter which causes just the shard that received the new data to be indexed. + +If the request contains `wait_for_completion=false` then Elasticsearch will +perform some preflight checks, launch the request, and then return a `task` +which can be used with <> +to cancel or get the status of the task. Elasticsearch will also create a +record of this task as a document at `.tasks/task/${taskId}`. This is yours +to keep or remove as you see fit. When you are done with it, delete it so +Elasticsearch can reclaim the space it uses. + +`consistency` controls how many copies of a shard must respond to each write +request. `timeout` controls how long each write request waits for unavailable +shards to become available. Both work exactly how they work in the +<>. + +`requests_per_second` can be set to any decimal number (`1.4`, `6`, `1000`, etc) +and throttles the number of requests per second that the update by query issues. +The throttling is done waiting between bulk batches so that it can manipulate +the scroll timeout. The wait time is the difference between the time it took the +batch to complete and the time `requests_per_second * requests_in_the_batch`. +Since the batch isn't broken into multiple bulk requests large batch sizes will +cause Elasticsearch to create many requests and then wait for a while before +starting the next set. This is "bursty" instead of "smooth". The default is +`unlimited` which is also the only non-number value that it accepts. + +[float] +[[docs-update-by-query-response-body]] +=== Response body + +The JSON response looks like this: + +[source,java] +-------------------------------------------------- +{ + "took" : 639, + "updated": 0, + "batches": 1, + "version_conflicts": 2, + "retries": { + "bulk": 0, + "search": 0 + } + "throttled_millis": 0, + "failures" : [ ] +} +-------------------------------------------------- + +`took`:: + +The number of milliseconds from start to end of the whole operation. + +`updated`:: + +The number of documents that were successfully updated. + +`batches`:: + +The number of scroll responses pulled back by the the update by query. + +`version_conflicts`:: + +The number of version conflicts that the update by query hit. + +`retries`:: + +The number of retries attempted by update-by-query. `bulk` is the number of bulk +actions retried and `search` is the number of search actions retried. + +`throttled_millis`:: + +Number of milliseconds the request slept to conform to `requests_per_second`. + +`failures`:: + +Array of all indexing failures. If this is non-empty then the request aborted +because of those failures. See `conflicts` for how to prevent version conflicts +from aborting the operation. + + +[float] +[[docs-update-by-query-task-api]] +=== Works with the Task API + +You can fetch the status of all running update-by-query requests with the +<>: + +// provide API Example + +[source,java] +-------------------------------------------------- + +-------------------------------------------------- +// CONSOLE + +The responses looks like: + +[source,java] +-------------------------------------------------- +{ + "nodes" : { + "r1A2WoRbTwKZ516z6NEs5A" : { + "name" : "Tyrannus", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1:9300", + "attributes" : { + "testattr" : "test", + "portsfile" : "true" + }, + "tasks" : { + "r1A2WoRbTwKZ516z6NEs5A:36619" : { + "node" : "r1A2WoRbTwKZ516z6NEs5A", + "id" : 36619, + "type" : "transport", + "action" : "indices:data/write/update/byquery", + "status" : { <1> + "total" : 6154, + "updated" : 3500, + "created" : 0, + "deleted" : 0, + "batches" : 4, + "version_conflicts" : 0, + "noops" : 0, + "retries": { + "bulk": 0, + "search": 0 + } + "throttled_millis": 0 + }, + "description" : "" + } + } + } + } +} +-------------------------------------------------- + +<1> this object contains the actual status. It is just like the response json +with the important addition of the `total` field. `total` is the total number +of operations that the reindex expects to perform. You can estimate the +progress by adding the `updated`, `created`, and `deleted` fields. The request +will finish when their sum is equal to the `total` field. + +With the task id you can look up the task directly: + +// provide API Example +[source,java] +-------------------------------------------------- + +-------------------------------------------------- +// CONSOLE +// TEST[catch:missing] + +The advantage of this API is that it integrates with `wait_for_completion=false` +to transparently return the status of completed tasks. If the task is completed +and `wait_for_completion=false` was set on it them it'll come back with a +`results` or an `error` field. The cost of this feature is the document that +`wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to +you to delete that document. + +[float] +[[docs-update-by-query-cancel-task-api]] +=== Works with the Cancel Task API + +Any Update By Query can be canceled using the <>: + +// provide API Example +[source,java] +-------------------------------------------------- + +-------------------------------------------------- +// CONSOLE + +The `task_id` can be found using the tasks API above. + +Cancelation should happen quickly but might take a few seconds. The task status +API above will continue to list the task until it is wakes to cancel itself. + + +[float] +[[docs-update-by-query-rethrottle]] +=== Rethrottling + +The value of `requests_per_second` can be changed on a running update by query +using the `_rethrottle` API: + +// provide API Example +[source,java] +-------------------------------------------------- + +-------------------------------------------------- +// CONSOLE + +The `task_id` can be found using the tasks API above. + +Just like when setting it on the `updateByQuery` API `requests_per_second` +can be either `unlimited` to disable throttling or any decimal number like `1.7` +or `12` to throttle to that level. Rethrottling that speeds up the query takes +effect immediately but rethrotting that slows down the query will take effect +on after completing the current batch. This prevents scroll timeouts. + + +[float] +[[picking-up-a-new-property]] +=== Pick up a new property + +Say you created an index without dynamic mapping, filled it with data, and then +added a mapping value to pick up more fields from the data: + +[source,java] +-------------------------------------------------- +PUT test +{ + "mappings": { + "test": { + "dynamic": false, <1> + "properties": { + "text": {"type": "text"} + } + } + } +} + +POST test/test?refresh +{ + "text": "words words", + "flag": "bar" +} +POST test/test?refresh +{ + "text": "words words", + "flag": "foo" +} +PUT test/_mapping/test <2> +{ + "properties": { + "text": {"type": "text"}, + "flag": {"type": "text", "analyzer": "keyword"} + } +} +-------------------------------------------------- +// CONSOLE + +<1> This means that new fields won't be indexed, just stored in `_source`. + +<2> This updates the mapping to add the new `flag` field. To pick up the new +field you have to reindex all documents with it. + +Searching for the data won't find anything: + +// provide API Example +[source,java] +-------------------------------------------------- + +{ + "query": { + "match": { + "flag": "foo" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,java] +-------------------------------------------------- +{ + "hits" : { + "total" : 0 + } +} +-------------------------------------------------- +// TESTRESPONSE + +But you can issue an `updateByQuery` request to pick up the new mapping: + +// provide API Example +[source,java] +-------------------------------------------------- + +{ + "query": { + "match": { + "flag": "foo" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,java] +-------------------------------------------------- +{ + "hits" : { + "total" : 1 + } +} +-------------------------------------------------- +// TESTRESPONSE + +You can do the exact same thing when adding a field to a multifield. From ccab85835aae26fd0eef2030a6a5c39835ebf9a7 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 24 Jun 2016 11:12:39 -0400 Subject: [PATCH 37/43] Rework java update-by-query docs --- docs/java-api/docs/update-by-query.asciidoc | 464 ++++---------------- 1 file changed, 85 insertions(+), 379 deletions(-) diff --git a/docs/java-api/docs/update-by-query.asciidoc b/docs/java-api/docs/update-by-query.asciidoc index eff4be0182d..16382067501 100644 --- a/docs/java-api/docs/update-by-query.asciidoc +++ b/docs/java-api/docs/update-by-query.asciidoc @@ -4,7 +4,7 @@ experimental[The update-by-query API is new and should still be considered experimental. The API may change in ways that are not backwards compatible] The simplest usage of `updateByQuery` updates each -document in an index without changing the source. This usage enables +document in an index without changing the source. This usage enables <> or another online mapping change. @@ -12,121 +12,93 @@ mapping change. -------------------------------------------------- UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); -updateByQuery.source("cool_things") - .filter(termQuery("level", "awesome")) - .script(new Script("ctx._source.awesome = \"absolutely\"")); +updateByQuery.source("source_index").abortOnVersionConflict(false); BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -The `updatebyQuery` API returns a JSON object similar to the following example: - -[source,java] --------------------------------------------------- -{ - "took" : 147, - "timed_out": false, - "updated": 120, - "deleted": 0, - "batches": 1, - "version_conflicts": 0, - "noops": 0, - "retries": { - "bulk": 0, - "search": 0 - }, - "throttled_millis": 0, - "requests_per_second": "unlimited", - "throttled_until_millis": 0, - "total": 120, - "failures" : [ ] -} --------------------------------------------------- -// TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/] - Calls to the `updateByQuery` API start by getting a snapshot of the index, indexing any documents found using the `internal` versioning. NOTE: Version conflicts happen when a document changes between the time of the -snapshot and the time the index request processes. +snapshot and the time the index request processes. When the versions match, `updateByQuery` updates the document and increments the version number. All update and query failures cause `updateByQuery` to abort. These failures are -listed in the `failures` section of the JSON response object. Any successful updates -remain and are not rolled back. While the first failure causes the abort, the JSON -response object contains all of the failures generated by the failed bulk request. +available from the `BulkIndexByScrollResponse#getIndexingFailures` method. Any +successful updates remain and are not rolled back. While the first failure +causes the abort, the response contains all of the failures generated by the +failed bulk request. -To prevent version conflicts from causing `updateByQuery` to abort, -set `conflicts=proceed` on the URL or `"conflicts": "proceed"` -in the request body. The first example does this because it is trying to -pick up an online mapping change and a version conflict means that the -conflicting document was updated between the start of the `updateByQuery` +To prevent version conflicts from causing `updateByQuery` to abort, set +`abortOnVersionConflict(false)`. The first example does this because it is +trying to pick up an online mapping change and a version conflict means that +the conflicting document was updated between the start of the `updateByQuery` and the time when it attempted to update the document. This is fine because that update will have picked up the online mapping update. -Back to the API format, you can limit `updateByQuery` to a single type. This -will only update `tweet` documents from the `twitter` index: - -// provide API Example +Back to the API, `UpdateByQueryRequestBuilder` supports filtering the documents +that are updated, limiting the total number updated, and updating documents +with a script: [source,java] -------------------------------------------------- +UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); +updateByQuery.source("source_index") + .filter(termQuery("level", "awesome")) + .size(1000) + .script(new Script("ctx._source.awesome = 'absolutely'", ScriptType.INLINE, "painless", emptyMap())); + +BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] -You can also limit `updateByQuery` using the -<>. This example updates all documents from the -`twitter` index for the user `kimchy`: - - -// provide API Example +`UpdateByQueryRequestBuilder` also allows you direct access to the query used +to select the documents which you can use to change the default scroll size or +otherwise modify the request for matching documents. [source,java] -------------------------------------------------- +UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); -{ - "query": { <1> - "term": { - "user": "kimchy" - } - } -} +updateByQuery.source("source_index") + .source().setSize(500); + +BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] -<1> The query must be passed as a value to the `query` key, in the same -way as the <>. You can also use the `q` -parameter in the same way as the search api. - -So far we've only been updating documents without changing their source. That -is genuinely useful for things like -<> but it's only half the -fun. `updateByQuery` supports a `script` object to update the document. This -will increment the `likes` field on all of kimchy's tweets: - -// provide API Example +You can also combine `size` with sorting to limit the documents updated: [source,java] -------------------------------------------------- +UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); -{ - "script": { - "inline": "ctx._source.likes++" - }, - "query": { - "term": { - "user": "kimchy" - } - } -} +updateByQuery.source("source_index").size(100) + .source().addSort("cat", SortOrder.DESC); + +BulkIndexByScrollResponse response = updateByQuery.get(); +-------------------------------------------------- + +In addition to changing the `_source` of the document (see above) the script +can change the update action similarly to the Update API: + +[source,java] +-------------------------------------------------- +UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + +updateByQuery.source("source_index") + .script(new Script( + "if (ctx._source.awesome == 'absolutely) {" + + " ctx.op='noop' + + "} else if (ctx._source.awesome == 'lame') {" + + " ctx.op='delete'" + + "} else {" + + "ctx._source.awesome = 'absolutely'}", ScriptType.INLINE, "painless", emptyMap())); + +BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] Just as in <> you can set `ctx.op` to change the operation that is executed: @@ -141,15 +113,12 @@ changes. That will cause `updateByQuery` to omit that document from its updates. `delete`:: Set `ctx.op = "delete"` if your script decides that the document must be - deleted. The deletion will be reported in the `deleted` counter in the +deleted. The deletion will be reported in the `deleted` counter in the <>. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. -Note that we stopped specifying `conflicts=proceed`. In this case we want a -version conflict to abort the process so we can handle the failure. - This API doesn't allow you to move the documents it touches, just modify their source. This is intentional! We've made no provisions for removing the document from its original location. @@ -157,144 +126,38 @@ from its original location. It's also possible to do this whole thing on multiple indexes and multiple types at once, just like the search API: -// provide API Example - [source,java] -------------------------------------------------- +UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); +updateByQuery.source("foo", "bar").source().setTypes("a", "b"); + +BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -// CONSOLE -// TEST[s/^/PUT twitter\nPUT blog\nGET _cluster\/health?wait_for_status=yellow\n/] If you provide `routing` then the routing is copied to the scroll query, limiting the process to the shards that match that routing value: -// provide API Example [source,java] -------------------------------------------------- +UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); +updateByQuery.source().setRouting("cat"); + +BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] - -By default `updateByQuery` uses scroll batches of 1000. You can change the -batch size with the `scroll_size` URL parameter: - -// provide API Example -[source,java] --------------------------------------------------- - --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] `updateByQuery` can also use the <> feature by specifying a `pipeline` like this: -// provide API Example [source,java] -------------------------------------------------- +UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); -{ - "description" : "sets foo", - "processors" : [ { - "set" : { - "field": "foo", - "value": "bar" - } - } ] -} -POST twitter/_update_by_query?pipeline=set-foo +updateByQuery.setPipeline("hurray"); + +BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] - -[float] -=== URL Parameters - -In addition to the standard parameters like `pretty`, the Update By Query API -also supports `refresh`, `wait_for_completion`, `consistency`, and `timeout`. - -Sending the `refresh` will update all shards in the index being updated when -the request completes. This is different than the Index API's `refresh` -parameter which causes just the shard that received the new data to be indexed. - -If the request contains `wait_for_completion=false` then Elasticsearch will -perform some preflight checks, launch the request, and then return a `task` -which can be used with <> -to cancel or get the status of the task. Elasticsearch will also create a -record of this task as a document at `.tasks/task/${taskId}`. This is yours -to keep or remove as you see fit. When you are done with it, delete it so -Elasticsearch can reclaim the space it uses. - -`consistency` controls how many copies of a shard must respond to each write -request. `timeout` controls how long each write request waits for unavailable -shards to become available. Both work exactly how they work in the -<>. - -`requests_per_second` can be set to any decimal number (`1.4`, `6`, `1000`, etc) -and throttles the number of requests per second that the update by query issues. -The throttling is done waiting between bulk batches so that it can manipulate -the scroll timeout. The wait time is the difference between the time it took the -batch to complete and the time `requests_per_second * requests_in_the_batch`. -Since the batch isn't broken into multiple bulk requests large batch sizes will -cause Elasticsearch to create many requests and then wait for a while before -starting the next set. This is "bursty" instead of "smooth". The default is -`unlimited` which is also the only non-number value that it accepts. - -[float] -[[docs-update-by-query-response-body]] -=== Response body - -The JSON response looks like this: - -[source,java] --------------------------------------------------- -{ - "took" : 639, - "updated": 0, - "batches": 1, - "version_conflicts": 2, - "retries": { - "bulk": 0, - "search": 0 - } - "throttled_millis": 0, - "failures" : [ ] -} --------------------------------------------------- - -`took`:: - -The number of milliseconds from start to end of the whole operation. - -`updated`:: - -The number of documents that were successfully updated. - -`batches`:: - -The number of scroll responses pulled back by the the update by query. - -`version_conflicts`:: - -The number of version conflicts that the update by query hit. - -`retries`:: - -The number of retries attempted by update-by-query. `bulk` is the number of bulk -actions retried and `search` is the number of search actions retried. - -`throttled_millis`:: - -Number of milliseconds the request slept to conform to `requests_per_second`. - -`failures`:: - -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. - [float] [[docs-update-by-query-task-api]] @@ -303,79 +166,26 @@ from aborting the operation. You can fetch the status of all running update-by-query requests with the <>: -// provide API Example - [source,java] -------------------------------------------------- +ListTasksResponse tasksList = client.admin().cluster().prepareListTasks() + .setActions(UpdateByQueryAction.NAME).setDetailed(true).get(); --------------------------------------------------- -// CONSOLE - -The responses looks like: - -[source,java] --------------------------------------------------- -{ - "nodes" : { - "r1A2WoRbTwKZ516z6NEs5A" : { - "name" : "Tyrannus", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1:9300", - "attributes" : { - "testattr" : "test", - "portsfile" : "true" - }, - "tasks" : { - "r1A2WoRbTwKZ516z6NEs5A:36619" : { - "node" : "r1A2WoRbTwKZ516z6NEs5A", - "id" : 36619, - "type" : "transport", - "action" : "indices:data/write/update/byquery", - "status" : { <1> - "total" : 6154, - "updated" : 3500, - "created" : 0, - "deleted" : 0, - "batches" : 4, - "version_conflicts" : 0, - "noops" : 0, - "retries": { - "bulk": 0, - "search": 0 - } - "throttled_millis": 0 - }, - "description" : "" - } - } - } - } +for (TaskInfo info: tasksList.getTasks()) { + TaskId taskId = info.getTaskId(); + BulkByScrollTask.Status status = (BulkByScrollTask.Status) info.getStatus(); + // do stuff } + -------------------------------------------------- -<1> this object contains the actual status. It is just like the response json -with the important addition of the `total` field. `total` is the total number -of operations that the reindex expects to perform. You can estimate the -progress by adding the `updated`, `created`, and `deleted` fields. The request -will finish when their sum is equal to the `total` field. - -With the task id you can look up the task directly: +With the `TaskId` shown above you can look up the task directly: // provide API Example [source,java] -------------------------------------------------- - +GetTaskResponse get = client.admin().cluster().prepareGetTask(taskId).get(); -------------------------------------------------- -// CONSOLE -// TEST[catch:missing] - -The advantage of this API is that it integrates with `wait_for_completion=false` -to transparently return the status of completed tasks. If the task is completed -and `wait_for_completion=false` was set on it them it'll come back with a -`results` or an `error` field. The cost of this feature is the document that -`wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to -you to delete that document. [float] [[docs-update-by-query-cancel-task-api]] @@ -383,14 +193,15 @@ you to delete that document. Any Update By Query can be canceled using the <>: -// provide API Example [source,java] -------------------------------------------------- - +// Cancel all update-by-query requests +client.admin().cluster().prepareCancelTasks().setActions(UpdateByQueryAction.NAME).get().getTasks() +// Cancel a specific update-by-query request +client.admin().cluster().prepareCancelTasks().setTaskId(taskId).get().getTasks() -------------------------------------------------- -// CONSOLE -The `task_id` can be found using the tasks API above. +The `taskId` can be found using the list tasks API above. Cancelation should happen quickly but might take a few seconds. The task status API above will continue to list the task until it is wakes to cancel itself. @@ -403,120 +214,15 @@ API above will continue to list the task until it is wakes to cancel itself. The value of `requests_per_second` can be changed on a running update by query using the `_rethrottle` API: -// provide API Example [source,java] -------------------------------------------------- - +RethrottleAction.INSTANCE.newRequestBuilder(client).setTaskId(taskId).setRequestsPerSecond(2.0f).get(); -------------------------------------------------- -// CONSOLE -The `task_id` can be found using the tasks API above. +The `taskId` can be found using the tasks API above. Just like when setting it on the `updateByQuery` API `requests_per_second` -can be either `unlimited` to disable throttling or any decimal number like `1.7` -or `12` to throttle to that level. Rethrottling that speeds up the query takes +can be either `Float.POSITIVE_INFINITY` to disable throttling or any positive +float to throttle to that level. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query will take effect on after completing the current batch. This prevents scroll timeouts. - - -[float] -[[picking-up-a-new-property]] -=== Pick up a new property - -Say you created an index without dynamic mapping, filled it with data, and then -added a mapping value to pick up more fields from the data: - -[source,java] --------------------------------------------------- -PUT test -{ - "mappings": { - "test": { - "dynamic": false, <1> - "properties": { - "text": {"type": "text"} - } - } - } -} - -POST test/test?refresh -{ - "text": "words words", - "flag": "bar" -} -POST test/test?refresh -{ - "text": "words words", - "flag": "foo" -} -PUT test/_mapping/test <2> -{ - "properties": { - "text": {"type": "text"}, - "flag": {"type": "text", "analyzer": "keyword"} - } -} --------------------------------------------------- -// CONSOLE - -<1> This means that new fields won't be indexed, just stored in `_source`. - -<2> This updates the mapping to add the new `flag` field. To pick up the new -field you have to reindex all documents with it. - -Searching for the data won't find anything: - -// provide API Example -[source,java] --------------------------------------------------- - -{ - "query": { - "match": { - "flag": "foo" - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[source,java] --------------------------------------------------- -{ - "hits" : { - "total" : 0 - } -} --------------------------------------------------- -// TESTRESPONSE - -But you can issue an `updateByQuery` request to pick up the new mapping: - -// provide API Example -[source,java] --------------------------------------------------- - -{ - "query": { - "match": { - "flag": "foo" - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[source,java] --------------------------------------------------- -{ - "hits" : { - "total" : 1 - } -} --------------------------------------------------- -// TESTRESPONSE - -You can do the exact same thing when adding a field to a multifield. From 57f413e85164600ca12c7f30c455b4c8bba7d63e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 29 Jun 2016 11:08:54 -0400 Subject: [PATCH 38/43] More changes to java update-by-query api docs --- docs/java-api/docs/update-by-query.asciidoc | 53 ++++++++++----------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/docs/java-api/docs/update-by-query.asciidoc b/docs/java-api/docs/update-by-query.asciidoc index 16382067501..a94899668ef 100644 --- a/docs/java-api/docs/update-by-query.asciidoc +++ b/docs/java-api/docs/update-by-query.asciidoc @@ -39,8 +39,8 @@ the conflicting document was updated between the start of the `updateByQuery` and the time when it attempted to update the document. This is fine because that update will have picked up the online mapping update. -Back to the API, `UpdateByQueryRequestBuilder` supports filtering the documents -that are updated, limiting the total number updated, and updating documents +The `UpdateByQueryRequestBuilder` API supports filtering the updated documents, +limiting the total number of documents to update, and updating documents with a script: [source,java] @@ -55,8 +55,8 @@ updateByQuery.source("source_index") BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -`UpdateByQueryRequestBuilder` also allows you direct access to the query used -to select the documents which you can use to change the default scroll size or +`UpdateByQueryRequestBuilder` also enables direct access to the query used +to select the documents. You can use this access to change the default scroll size or otherwise modify the request for matching documents. [source,java] @@ -81,8 +81,8 @@ updateByQuery.source("source_index").size(100) BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -In addition to changing the `_source` of the document (see above) the script -can change the update action similarly to the Update API: +In addition to changing the `_source` field for the document, you can use a +script to change the action, similar to the Update API: [source,java] -------------------------------------------------- @@ -100,14 +100,14 @@ updateByQuery.source("source_index") BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -Just as in <> you can set `ctx.op` to change the -operation that is executed: +As in the <>, you can set the value of `ctx.op` to change the +operation that executes: `noop`:: -Set `ctx.op = "noop"` if your script decides that it doesn't have to make any -changes. That will cause `updateByQuery` to omit that document from its updates. - This no operation will be reported in the `noop` counter in the +Set `ctx.op = "noop"` if your script doesn't make any +changes. The `updateByQuery` operaton then omits that document from the updates. +This behavior increments the `noop` counter in the <>. `delete`:: @@ -116,15 +116,14 @@ Set `ctx.op = "delete"` if your script decides that the document must be deleted. The deletion will be reported in the `deleted` counter in the <>. -Setting `ctx.op` to anything else is an error. Setting any -other field in `ctx` is an error. +Setting `ctx.op` to any other value generates an error. Setting any +other field in `ctx` generates an error. This API doesn't allow you to move the documents it touches, just modify their source. This is intentional! We've made no provisions for removing the document from its original location. -It's also possible to do this whole thing on multiple indexes and multiple -types at once, just like the search API: +You can also perform these operations on multiple indices and types at once, similar to the search API: [source,java] -------------------------------------------------- @@ -135,7 +134,7 @@ updateByQuery.source("foo", "bar").source().setTypes("a", "b"); BulkIndexByScrollResponse response = updateByQuery.get(); -------------------------------------------------- -If you provide `routing` then the routing is copied to the scroll query, +If you provide a `routing` value then the process copies the routing value to the scroll query, limiting the process to the shards that match that routing value: [source,java] @@ -201,28 +200,26 @@ client.admin().cluster().prepareCancelTasks().setActions(UpdateByQueryAction.NAM client.admin().cluster().prepareCancelTasks().setTaskId(taskId).get().getTasks() -------------------------------------------------- -The `taskId` can be found using the list tasks API above. - -Cancelation should happen quickly but might take a few seconds. The task status -API above will continue to list the task until it is wakes to cancel itself. +Use the `list tasks` API to find the value of `taskId`. +Cancelling a request is typically a very fast process but can take up to a few seconds. +The task status API continues to list the task until the cancellation is complete. [float] [[docs-update-by-query-rethrottle]] === Rethrottling -The value of `requests_per_second` can be changed on a running update by query -using the `_rethrottle` API: +Use the `_rethrottle` API to change the value of `requests_per_second` on a running update: [source,java] -------------------------------------------------- RethrottleAction.INSTANCE.newRequestBuilder(client).setTaskId(taskId).setRequestsPerSecond(2.0f).get(); -------------------------------------------------- -The `taskId` can be found using the tasks API above. +Use the `list tasks` API to find the value of `taskId`. -Just like when setting it on the `updateByQuery` API `requests_per_second` -can be either `Float.POSITIVE_INFINITY` to disable throttling or any positive -float to throttle to that level. Rethrottling that speeds up the query takes -effect immediately but rethrotting that slows down the query will take effect -on after completing the current batch. This prevents scroll timeouts. +As with the `updateByQuery` API, the value of `requests_per_second` +can be any positive float value to set the level of the throttle, or `Float.POSITIVE_INFINITY` to disable throttling. +A value of `requests_per_second` that speeds up the process takes +effect immediately. `requests_per_second` values that slow the query take effect +after completing the current batch in order to prevent scroll timeouts. From 0d81dee0137ebd1c80b5713b44b5581037a2c84b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 28 Jun 2016 19:32:56 +0200 Subject: [PATCH 39/43] Fix key_as_string for date histogram and epoch_millis/epoch_second format When doing a `date_histogram` aggregation with `"format":"epoch_millis"` or `"format" : "epoch_second"` and using a time zone other than UTC, the `key_as_string` ouput in the response does not reflect the UTC timestamp that is used as the key. This happens because when applying the `time_zone` in DocValueFormat.DateTime to an epoch-based formatter, this adds the time zone offset to the value being formated. Instead we should adjust the added display offset to get back the utc instance in EpochTimePrinter. Closes #19038 --- .../org/elasticsearch/common/joda/Joda.java | 18 +++++--- .../elasticsearch/search/DocValueFormat.java | 1 + .../aggregations/bucket/DateHistogramIT.java | 41 +++++++++++++++++++ 3 files changed, 55 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/joda/Joda.java b/core/src/main/java/org/elasticsearch/common/joda/Joda.java index cffea836ac2..34c882d0d80 100644 --- a/core/src/main/java/org/elasticsearch/common/joda/Joda.java +++ b/core/src/main/java/org/elasticsearch/common/joda/Joda.java @@ -43,7 +43,6 @@ import org.joda.time.format.StrictISODateTimeFormat; import java.io.IOException; import java.io.Writer; import java.util.Locale; -import java.util.regex.Pattern; /** * @@ -375,21 +374,30 @@ public class Joda { return hasMilliSecondPrecision ? 19 : 16; } + + /** + * We adjust the instant by displayOffset to adjust for the offset that might have been added in + * {@link DateTimeFormatter#printTo(Appendable, long, Chronology)} when using a time zone. + */ @Override public void printTo(StringBuffer buf, long instant, Chronology chrono, int displayOffset, DateTimeZone displayZone, Locale locale) { if (hasMilliSecondPrecision) { - buf.append(instant); + buf.append(instant - displayOffset); } else { - buf.append(instant / 1000); + buf.append((instant - displayOffset) / 1000); } } + /** + * We adjust the instant by displayOffset to adjust for the offset that might have been added in + * {@link DateTimeFormatter#printTo(Appendable, long, Chronology)} when using a time zone. + */ @Override public void printTo(Writer out, long instant, Chronology chrono, int displayOffset, DateTimeZone displayZone, Locale locale) throws IOException { if (hasMilliSecondPrecision) { - out.write(String.valueOf(instant)); + out.write(String.valueOf(instant - displayOffset)); } else { - out.append(String.valueOf(instant / 1000)); + out.append(String.valueOf((instant - displayOffset) / 1000)); } } diff --git a/core/src/main/java/org/elasticsearch/search/DocValueFormat.java b/core/src/main/java/org/elasticsearch/search/DocValueFormat.java index 9fc36c38e76..4fc1967d7ec 100644 --- a/core/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/core/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -115,6 +115,7 @@ public interface DocValueFormat extends NamedWriteable { return Double.parseDouble(value); } + @Override public BytesRef parseBytesRef(String value) { return new BytesRef(value); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 4f7064a33bb..2c3534183e0 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -47,6 +47,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.Callable; @@ -237,6 +238,46 @@ public class DateHistogramIT extends ESIntegTestCase { assertThat(bucket.getDocCount(), equalTo(1L)); } + public void testSingleValued_timeZone_epoch() throws Exception { + String format = randomBoolean() ? "epoch_millis" : "epoch_second"; + int millisDivider = format.equals("epoch_millis") ? 1 : 1000; + if (randomBoolean()) { + format = format + "||date_optional_time"; + } + DateTimeZone tz = DateTimeZone.forID("+01:00"); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(dateHistogram("histo").field("date") + .dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(1) + .timeZone(tz).format(format)) + .execute() + .actionGet(); + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(6)); + + List expectedKeys = new ArrayList<>(); + expectedKeys.add(new DateTime(2012, 1, 1, 23, 0, DateTimeZone.UTC)); + expectedKeys.add(new DateTime(2012, 2, 1, 23, 0, DateTimeZone.UTC)); + expectedKeys.add(new DateTime(2012, 2, 14, 23, 0, DateTimeZone.UTC)); + expectedKeys.add(new DateTime(2012, 3, 1, 23, 0, DateTimeZone.UTC)); + expectedKeys.add(new DateTime(2012, 3, 14, 23, 0, DateTimeZone.UTC)); + expectedKeys.add(new DateTime(2012, 3, 22, 23, 0, DateTimeZone.UTC)); + + + Iterator keyIterator = expectedKeys.iterator(); + for (Histogram.Bucket bucket : buckets) { + assertThat(bucket, notNullValue()); + DateTime expectedKey = keyIterator.next(); + assertThat(bucket.getKeyAsString(), equalTo(Long.toString(expectedKey.getMillis() / millisDivider))); + assertThat(((DateTime) bucket.getKey()), equalTo(expectedKey)); + assertThat(bucket.getDocCount(), equalTo(1L)); + } + } + public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo") From fc38e503e04194da01e719810c267be4d2799698 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 29 Jun 2016 13:36:11 -0400 Subject: [PATCH 40/43] Clearer error when handling fractional time values In 2f638b5a23597967a98b1ced1deac91d64af5a44, support for fractional time values was removed. While this change is documented, the error message presented does not give an indication that fractional inputs are not supported. This commit fixes this by detecting when the input is a time value that would successfully parse as a double but will not parse as a long and presenting a clear error message that fractional time values are not supported. Relates #19158 --- .../elasticsearch/common/unit/TimeValue.java | 68 ++++++++++--------- .../common/unit/TimeValueTests.java | 31 +++++++++ 2 files changed, 68 insertions(+), 31 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java index 5a56603dad7..db8299cdc9a 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -317,40 +317,46 @@ public class TimeValue implements Writeable { if (sValue == null) { return defaultValue; } - try { - String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim(); - if (lowerSValue.endsWith("nanos")) { - return new TimeValue(parse(lowerSValue, 5), TimeUnit.NANOSECONDS); - } else if (lowerSValue.endsWith("micros")) { - return new TimeValue(parse(lowerSValue, 6), TimeUnit.MICROSECONDS); - } else if (lowerSValue.endsWith("ms")) { - return new TimeValue(parse(lowerSValue, 2), TimeUnit.MILLISECONDS); - } else if (lowerSValue.endsWith("s")) { - return new TimeValue(parse(lowerSValue, 1), TimeUnit.SECONDS); - } else if (lowerSValue.endsWith("m")) { - return new TimeValue(parse(lowerSValue, 1), TimeUnit.MINUTES); - } else if (lowerSValue.endsWith("h")) { - return new TimeValue(parse(lowerSValue, 1), TimeUnit.HOURS); - } else if (lowerSValue.endsWith("d")) { - return new TimeValue(parse(lowerSValue, 1), TimeUnit.DAYS); - } else if (lowerSValue.matches("-0*1")) { - return TimeValue.MINUS_ONE; - } else if (lowerSValue.matches("0+")) { - return TimeValue.ZERO; - } else { - // Missing units: - throw new ElasticsearchParseException( - "failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized", - settingName, - sValue); - } - } catch (NumberFormatException e) { - throw new ElasticsearchParseException("failed to parse [{}]", e, sValue); + final String normalized = sValue.toLowerCase(Locale.ROOT).trim(); + if (normalized.endsWith("nanos")) { + return new TimeValue(parse(sValue, normalized, 5), TimeUnit.NANOSECONDS); + } else if (normalized.endsWith("micros")) { + return new TimeValue(parse(sValue, normalized, 6), TimeUnit.MICROSECONDS); + } else if (normalized.endsWith("ms")) { + return new TimeValue(parse(sValue, normalized, 2), TimeUnit.MILLISECONDS); + } else if (normalized.endsWith("s")) { + return new TimeValue(parse(sValue, normalized, 1), TimeUnit.SECONDS); + } else if (normalized.endsWith("m")) { + return new TimeValue(parse(sValue, normalized, 1), TimeUnit.MINUTES); + } else if (normalized.endsWith("h")) { + return new TimeValue(parse(sValue, normalized, 1), TimeUnit.HOURS); + } else if (normalized.endsWith("d")) { + return new TimeValue(parse(sValue, normalized, 1), TimeUnit.DAYS); + } else if (normalized.matches("-0*1")) { + return TimeValue.MINUS_ONE; + } else if (normalized.matches("0+")) { + return TimeValue.ZERO; + } else { + // Missing units: + throw new ElasticsearchParseException( + "failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized", + settingName, + sValue); } } - private static long parse(String s, int suffixLength) { - return Long.parseLong(s.substring(0, s.length() - suffixLength).trim()); + private static long parse(final String initialInput, final String normalized, final int suffixLength) { + final String s = normalized.substring(0, normalized.length() - suffixLength).trim(); + try { + return Long.parseLong(s); + } catch (final NumberFormatException e) { + try { + @SuppressWarnings("unused") final double ignored = Double.parseDouble(s); + throw new ElasticsearchParseException("failed to parse [{}], fractional time values are not supported", e, initialInput); + } catch (final NumberFormatException ignored) { + throw new ElasticsearchParseException("failed to parse [{}]", e, initialInput); + } + } } private static final long C0 = 1L; diff --git a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index 9b73f2f99af..78afc9e514f 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -30,9 +30,12 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.object.HasToString.hasToString; public class TimeValueTests extends ESTestCase { @@ -125,6 +128,34 @@ public class TimeValueTests extends ESTestCase { assertThat(TimeValue.parseTimeValue(t.getStringRep(), null, "test"), equalTo(t)); } + private static final String FRACTIONAL_TIME_VALUES_ARE_NOT_SUPPORTED = "fractional time values are not supported"; + + public void testNonFractionalTimeValues() { + final String s = randomAsciiOfLength(10) + randomTimeUnit(); + final ElasticsearchParseException e = + expectThrows(ElasticsearchParseException.class, () -> TimeValue.parseTimeValue(s, null, "test")); + assertThat(e, hasToString(containsString("failed to parse [" + s + "]"))); + assertThat(e, not(hasToString(containsString(FRACTIONAL_TIME_VALUES_ARE_NOT_SUPPORTED)))); + assertThat(e.getCause(), instanceOf(NumberFormatException.class)); + } + + public void testFractionalTimeValues() { + double value; + do { + value = randomDouble(); + } while (value == 0); + final String s = Double.toString(randomIntBetween(0, 128) + value) + randomTimeUnit(); + final ElasticsearchParseException e = + expectThrows(ElasticsearchParseException.class, () -> TimeValue.parseTimeValue(s, null, "test")); + assertThat(e, hasToString(containsString("failed to parse [" + s + "]"))); + assertThat(e, hasToString(containsString(FRACTIONAL_TIME_VALUES_ARE_NOT_SUPPORTED))); + assertThat(e.getCause(), instanceOf(NumberFormatException.class)); + } + + private String randomTimeUnit() { + return randomFrom("nanos", "micros", "ms", "s", "m", "h", "d"); + } + private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException { BytesStreamOutput out = new BytesStreamOutput(); value.writeTo(out); From 8b533b7ca9c315349edf329d2b8aa931548fe792 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 29 Jun 2016 11:09:35 -0700 Subject: [PATCH 41/43] Internal: Deprecate ExceptionsHelper.detailedMessage This is a trappy "helper" and only hurts. See #19069 --- .../src/main/java/org/elasticsearch/ExceptionsHelper.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java index 3842ab4e3bf..5996856cda0 100644 --- a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -89,10 +89,18 @@ public final class ExceptionsHelper { return result; } + /** + * @deprecated Don't swallow exceptions, allow them to propagate. + */ + @Deprecated public static String detailedMessage(Throwable t) { return detailedMessage(t, false, 0); } + /** + * @deprecated Don't swallow exceptions, allow them to propagate. + */ + @Deprecated public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) { if (t == null) { return "Unknown"; From b2da5424b4048e375b263e06721a86ae31ec748a Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Wed, 29 Jun 2016 20:18:25 +0200 Subject: [PATCH 42/43] [TEST] fix vagrant tests for seed with format ABC:DEF (#19157) * [TEST] fix vagrant tests for seed with format ABC:DEF Otherwise one gets an error message when passing -Dtests.seed=ABC:DEF to any test run. --- qa/vagrant/build.gradle | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 89668a06534..878e9c5ef7f 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -58,15 +58,20 @@ for (String box : vagrantBoxes.split(',')) { } long seed -String formattedSeed +String formattedSeed = null String[] upgradeFromVersions String upgradeFromVersion String maybeTestsSeed = System.getProperty("tests.seed", null); if (maybeTestsSeed != null) { - seed = new BigInteger(maybeTestsSeed, 16).longValue() - formattedSeed = maybeTestsSeed -} else { + List seeds = maybeTestsSeed.tokenize(':') + if (seeds.size() != 0) { + String masterSeed = seeds.get(0) + seed = new BigInteger(masterSeed, 16).longValue() + formattedSeed = maybeTestsSeed + } +} +if (formattedSeed == null) { seed = new Random().nextLong() formattedSeed = String.format("%016X", seed) } From b3daf7d683d28cc0943da20c748b75881d9f3196 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 29 Jun 2016 11:25:23 -0700 Subject: [PATCH 43/43] Remove unnecessary variant of detailedMessage --- .../org/elasticsearch/ExceptionsHelper.java | 23 ++----------------- .../elasticsearch/index/store/StoreTests.java | 4 ++-- 2 files changed, 4 insertions(+), 23 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java index 5996856cda0..28f8d4391ec 100644 --- a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -94,18 +94,9 @@ public final class ExceptionsHelper { */ @Deprecated public static String detailedMessage(Throwable t) { - return detailedMessage(t, false, 0); - } - - /** - * @deprecated Don't swallow exceptions, allow them to propagate. - */ - @Deprecated - public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) { if (t == null) { return "Unknown"; } - int counter = initialCounter + 1; if (t.getCause() != null) { StringBuilder sb = new StringBuilder(); while (t != null) { @@ -115,21 +106,11 @@ public final class ExceptionsHelper { sb.append(t.getMessage()); sb.append("]"); } - if (!newLines) { - sb.append("; "); - } + sb.append("; "); t = t.getCause(); if (t != null) { - if (newLines) { - sb.append("\n"); - for (int i = 0; i < counter; i++) { - sb.append("\t"); - } - } else { - sb.append("nested: "); - } + sb.append("nested: "); } - counter++; } return sb.toString(); } else { diff --git a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java index e005fa400ef..e40f1c7f06f 100644 --- a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -1086,7 +1086,7 @@ public class StoreTests extends ESTestCase { String uuid = Store.CORRUPTED + UUIDs.randomBase64UUID(); try (IndexOutput output = dir.createOutput(uuid, IOContext.DEFAULT)) { CodecUtil.writeHeader(output, Store.CODEC, Store.VERSION_STACK_TRACE); - output.writeString(ExceptionsHelper.detailedMessage(exception, true, 0)); + output.writeString(ExceptionsHelper.detailedMessage(exception)); output.writeString(ExceptionsHelper.stackTrace(exception)); CodecUtil.writeFooter(output); } @@ -1102,7 +1102,7 @@ public class StoreTests extends ESTestCase { try (IndexOutput output = dir.createOutput(uuid, IOContext.DEFAULT)) { CodecUtil.writeHeader(output, Store.CODEC, Store.VERSION_START); - output.writeString(ExceptionsHelper.detailedMessage(exception, true, 0)); + output.writeString(ExceptionsHelper.detailedMessage(exception)); CodecUtil.writeFooter(output); } try {