diff --git a/TESTING.asciidoc b/TESTING.asciidoc index e85d1c7716f..332e0986d23 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -283,3 +283,9 @@ To disable validation step (forbidden API or `// NOCOMMIT`) use --------------------------------------------------------------------------- mvn test -Dvalidate.skip=true --------------------------------------------------------------------------- + +You can also skip this by using the "dev" profile: + +--------------------------------------------------------------------------- +mvn test -Pdev +--------------------------------------------------------------------------- diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index badaa29ea82..cbc23bde34f 100644 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -583,6 +583,20 @@ def ensure_checkout_is_clean(branchName): if 'is ahead' in s: raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout %s", "git reset --hard origin/%s": got:\n%s' % (branchName, branchName, s)) +# Checks all source files for //NORELEASE comments +def check_norelease(path='src'): + pattern = re.compile(r'\bnorelease\b', re.IGNORECASE) + for root, _, file_names in os.walk(path): + for file_name in fnmatch.filter(file_names, '*.java'): + full_path = os.path.join(root, file_name) + line_number = 0 + with open(full_path, 'r', encoding='utf-8') as current_file: + for line in current_file: + line_number = line_number + 1 + if pattern.search(line): + raise RuntimeError('Found //norelease comment in %s line %s' % (full_path, line_number)) + + if __name__ == '__main__': parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release') parser.add_argument('--branch', '-b', metavar='RELEASE_BRANCH', default=get_current_branch(), @@ -626,6 +640,7 @@ if __name__ == '__main__': print(' JAVA_HOME is [%s]' % JAVA_HOME) print(' Running with maven command: [%s] ' % (MVN)) if build: + check_norelease(path='src') ensure_checkout_is_clean(src_branch) verify_lucene_version() release_version = find_release_version(src_branch) diff --git a/dev-tools/create-bwc-index.py b/dev-tools/create-bwc-index.py index ea1d1746125..23d102c0a49 100644 --- a/dev-tools/create-bwc-index.py +++ b/dev-tools/create-bwc-index.py @@ -66,8 +66,6 @@ def index_documents(es, index_name, type, num_docs): es.indices.refresh(index=index_name) if rarely(): es.indices.flush(index=index_name, force=frequently()) - if rarely(): - es.indices.optimize(index=index_name) logging.info('Flushing index') es.indices.flush(index=index_name) @@ -149,12 +147,15 @@ def generate_index(client, version): 'type': 'string', 'index_analyzer': 'standard' }, - 'completion_with_index_analyzer': { - 'type': 'completion', - 'index_analyzer': 'standard' - } } } + # completion type was added in 0.90.3 + if not version in ['0.90.0.Beta1', '0.90.0.RC1', '0.90.0.RC2', '0.90.0', '0.90.1', '0.90.2']: + mappings['analyzer_1']['properties']['completion_with_index_analyzer'] = { + 'type': 'completion', + 'index_analyzer': 'standard' + } + mappings['analyzer_type2'] = { 'index_analyzer': 'standard', 'search_analyzer': 'keyword', @@ -209,7 +210,7 @@ def generate_index(client, version): health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0) assert health['timed_out'] == False, 'cluster health timed out %s' % health - num_docs = random.randint(10, 100) + num_docs = random.randint(2000, 3000) index_documents(client, 'test', 'doc', num_docs) logging.info('Running basic asserts on the data added') run_basic_asserts(client, 'test', 'doc', num_docs) diff --git a/dev-tools/forbidden/all-signatures.txt b/dev-tools/forbidden/all-signatures.txt index 72f9a00e269..e8494c2721e 100644 --- a/dev-tools/forbidden/all-signatures.txt +++ b/dev-tools/forbidden/all-signatures.txt @@ -1,3 +1,19 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + @defaultMessage Convert to URI java.net.URL#getPath() java.net.URL#getFile() diff --git a/dev-tools/forbidden/core-signatures.txt b/dev-tools/forbidden/core-signatures.txt index 151cbb2754b..2a662a60974 100644 --- a/dev-tools/forbidden/core-signatures.txt +++ b/dev-tools/forbidden/core-signatures.txt @@ -1,3 +1,19 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + @defaultMessage spawns threads with vague names; use a custom thread factory and name threads so that you can tell (by its name) which executor it is associated with java.util.concurrent.Executors#newFixedThreadPool(int) diff --git a/dev-tools/forbidden/test-signatures.txt b/dev-tools/forbidden/test-signatures.txt index a4d5737281e..7471aa685bc 100644 --- a/dev-tools/forbidden/test-signatures.txt +++ b/dev-tools/forbidden/test-signatures.txt @@ -1,2 +1,18 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. com.carrotsearch.randomizedtesting.RandomizedTest#globalTempDir() @ Use newTempDirPath() instead +com.carrotsearch.randomizedtesting.annotations.Seed @ Don't commit hardcoded seeds diff --git a/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc index cab70cf4f60..dd61767e512 100644 --- a/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -16,7 +16,7 @@ AggregationBuilder aggregation = AggregationBuilders .dateHistogram("agg") .field("dateOfBirth") - .interval(DateHistogram.Interval.YEAR); + .interval(DateHistogramInterval.YEAR); -------------------------------------------------- Or if you want to set an interval of 10 days: @@ -27,7 +27,7 @@ AggregationBuilder aggregation = AggregationBuilders .dateHistogram("agg") .field("dateOfBirth") - .interval(DateHistogram.Interval.days(10)); + .interval(DateHistogramInterval.days(10)); -------------------------------------------------- @@ -43,13 +43,13 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; [source,java] -------------------------------------------------- // sr is here your SearchResponse object -DateHistogram agg = sr.getAggregations().get("agg"); +Histogram agg = sr.getAggregations().get("agg"); // For each entry -for (DateHistogram.Bucket entry : agg.getBuckets()) { - String key = entry.getKey(); // Key - DateTime keyAsDate = entry.getKeyAsDate(); // Key as date - long docCount = entry.getDocCount(); // Doc count +for (Histogram.Bucket entry : agg.getBuckets()) { + DateTime keyAsDate = (DateTime) entry.getKey(); // Key + String key = entry.getKeyAsString(); // Key as String + long docCount = entry.getDocCount(); // Doc count logger.info("key [{}], date [{}], doc_count [{}]", key, keyAsDate.getYear(), docCount); } diff --git a/docs/java-api/aggregations/bucket/daterange-aggregation.asciidoc b/docs/java-api/aggregations/bucket/daterange-aggregation.asciidoc index 54abb357470..b86bec68086 100644 --- a/docs/java-api/aggregations/bucket/daterange-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/daterange-aggregation.asciidoc @@ -29,20 +29,20 @@ Import Aggregation definition classes: [source,java] -------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.range.date.DateRange; +import org.elasticsearch.search.aggregations.bucket.range.Range; -------------------------------------------------- [source,java] -------------------------------------------------- // sr is here your SearchResponse object -DateRange agg = sr.getAggregations().get("agg"); +Range agg = sr.getAggregations().get("agg"); // For each entry -for (DateRange.Bucket entry : agg.getBuckets()) { - String key = entry.getKey(); // Date range as key - DateTime fromAsDate = entry.getFromAsDate(); // Date bucket from as a Date - DateTime toAsDate = entry.getToAsDate(); // Date bucket to as a Date - long docCount = entry.getDocCount(); // Doc count +for (Range.Bucket entry : agg.getBuckets()) { + String key = entry.getKey(); // Date range as key + DateTime fromAsDate = (DateTime) entry.getFrom(); // Date bucket from as a Date + DateTime toAsDate = (DateTime) entry.getTo(); // Date bucket to as a Date + long docCount = entry.getDocCount(); // Doc count logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, fromAsDate, toAsDate, docCount); } diff --git a/docs/java-api/aggregations/bucket/geodistance-aggregation.asciidoc b/docs/java-api/aggregations/bucket/geodistance-aggregation.asciidoc index 9b8f776c03f..e3a9d95c0e3 100644 --- a/docs/java-api/aggregations/bucket/geodistance-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/geodistance-aggregation.asciidoc @@ -30,20 +30,20 @@ Import Aggregation definition classes: [source,java] -------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistance; +import org.elasticsearch.search.aggregations.bucket.range.Range; -------------------------------------------------- [source,java] -------------------------------------------------- // sr is here your SearchResponse object -GeoDistance agg = sr.getAggregations().get("agg"); +Range agg = sr.getAggregations().get("agg"); // For each entry -for (GeoDistance.Bucket entry : agg.getBuckets()) { - String key = entry.getKey(); // key as String - Number from = entry.getFrom(); // bucket from value - Number to = entry.getTo(); // bucket to value - long docCount = entry.getDocCount(); // Doc count +for (Range.Bucket entry : agg.getBuckets()) { + String key = entry.getKey(); // key as String + Number from = (Number) entry.getFrom(); // bucket from value + Number to = (Number) entry.getTo(); // bucket to value + long docCount = entry.getDocCount(); // Doc count logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, from, to, docCount); } diff --git a/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc b/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc index c4e5ec535e8..f5c8abe2e7e 100644 --- a/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/histogram-aggregation.asciidoc @@ -36,7 +36,8 @@ Histogram agg = sr.getAggregations().get("agg"); // For each entry for (Histogram.Bucket entry : agg.getBuckets()) { - String key = entry.getKey(); // Key - long docCount = entry.getDocCount(); // Doc count + Number key = (Number) entry.getKey(); // Key + String keyAsString = entry.getKeyAsString(); // Key As String + long docCount = entry.getDocCount(); // Doc count } -------------------------------------------------- diff --git a/docs/java-api/aggregations/bucket/iprange-aggregation.asciidoc b/docs/java-api/aggregations/bucket/iprange-aggregation.asciidoc index c7bef4c8ed0..3cf301ffaee 100644 --- a/docs/java-api/aggregations/bucket/iprange-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/iprange-aggregation.asciidoc @@ -40,7 +40,7 @@ Import Aggregation definition classes: [source,java] -------------------------------------------------- -import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4Range; +import org.elasticsearch.search.aggregations.bucket.range.Range; -------------------------------------------------- [source,java] @@ -49,7 +49,7 @@ import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4Range; IPv4Range agg = sr.getAggregations().get("agg"); // For each entry -for (IPv4Range.Bucket entry : agg.getBuckets()) { +for (Range.Bucket entry : agg.getBuckets()) { String key = entry.getKey(); // Ip range as key String fromAsString = entry.getFromAsString(); // Ip bucket from as a String String toAsString = entry.getToAsString(); // Ip bucket to as a String diff --git a/docs/java-api/aggregations/bucket/range-aggregation.asciidoc b/docs/java-api/aggregations/bucket/range-aggregation.asciidoc index 512c3d4772a..3b0f8174eae 100644 --- a/docs/java-api/aggregations/bucket/range-aggregation.asciidoc +++ b/docs/java-api/aggregations/bucket/range-aggregation.asciidoc @@ -38,10 +38,10 @@ Range agg = sr.getAggregations().get("agg"); // For each entry for (Range.Bucket entry : agg.getBuckets()) { - String key = entry.getKey(); // Range as key - Number from = entry.getFrom(); // Bucket from - Number to = entry.getTo(); // Bucket to - long docCount = entry.getDocCount(); // Doc count + String key = entry.getKey(); // Range as key + Number from = (Number) entry.getFrom(); // Bucket from + Number to = (Number) entry.getTo(); // Bucket to + long docCount = entry.getDocCount(); // Doc count logger.info("key [{}], from [{}], to [{}], doc_count [{}]", key, from, to, docCount); } diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 169d96cfe26..51a28e89adb 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -7,9 +7,9 @@ information *spans nodes*. [source,shell] -------------------------------------------------- % curl 'localhost:9200/_cat/indices/twi*?v' -health index pri rep docs.count docs.deleted store.size pri.store.size -green twitter 5 1 11434 0 64mb 32mb -green twitter2 2 0 2030 0 5.8mb 5.8mb +health status index pri rep docs.count docs.deleted store.size pri.store.size +green open twitter 5 1 11434 0 64mb 32mb +green open twitter2 2 0 2030 0 5.8mb 5.8mb -------------------------------------------------- We can tell quickly how many shards make up an index, the number of @@ -33,8 +33,8 @@ Which indices are yellow? [source,shell] -------------------------------------------------- % curl localhost:9200/_cat/indices | grep ^yell -yellow wiki 2 1 6401 1115 151.4mb 151.4mb -yellow twitter 5 1 11434 0 32mb 32mb +yellow open wiki 2 1 6401 1115 151.4mb 151.4mb +yellow open twitter 5 1 11434 0 32mb 32mb -------------------------------------------------- What's my largest index by disk usage not including replicas? @@ -42,9 +42,9 @@ What's my largest index by disk usage not including replicas? [source,shell] -------------------------------------------------- % curl 'localhost:9200/_cat/indices?bytes=b' | sort -rnk7 -green wiki 2 0 6401 1115 158843725 158843725 -green twitter 5 1 11434 0 67155614 33577857 -green twitter2 2 0 2030 0 6125085 6125085 +green open wiki 2 0 6401 1115 158843725 158843725 +green open twitter 5 1 11434 0 67155614 33577857 +green open twitter2 2 0 2030 0 6125085 6125085 -------------------------------------------------- How many merge operations have the shards for the `wiki` completed? diff --git a/docs/reference/images/Exponential.png b/docs/reference/images/Exponential.png index 6abe85a4fd7..1591f747a13 100644 Binary files a/docs/reference/images/Exponential.png and b/docs/reference/images/Exponential.png differ diff --git a/docs/reference/images/Gaussian.png b/docs/reference/images/Gaussian.png index eb87e8bc59d..9d49bfb9470 100644 Binary files a/docs/reference/images/Gaussian.png and b/docs/reference/images/Gaussian.png differ diff --git a/docs/reference/images/Linear.png b/docs/reference/images/Linear.png index 770be13caf2..d06862266c1 100644 Binary files a/docs/reference/images/Linear.png and b/docs/reference/images/Linear.png differ diff --git a/docs/reference/index-modules/fielddata.asciidoc b/docs/reference/index-modules/fielddata.asciidoc index e9da3689cae..68764a05a27 100644 --- a/docs/reference/index-modules/fielddata.asciidoc +++ b/docs/reference/index-modules/fielddata.asciidoc @@ -45,7 +45,7 @@ settings API. [[fielddata-circuit-breaker]] ==== Field data circuit breaker The field data circuit breaker allows Elasticsearch to estimate the amount of -memory a field will required to be loaded into memory. It can then prevent the +memory a field will require to be loaded into memory. It can then prevent the field data loading by raising an exception. By default the limit is configured to 60% of the maximum JVM heap. It can be configured with the following parameters: diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 74b30cb02ce..b50b5c4dce9 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -65,6 +65,9 @@ coming[1.5.0, this syntax was change to fix inconsistencies with other API] }, "translog" : { "recovered" : 0, + "total" : 0, + "percent" : "100.0%", + "total_on_start" : 0, "total_time" : "0s", "total_time_in_millis" : 0 }, diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index e5e3f38cedf..3810bfbdca0 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -64,9 +64,6 @@ settings API: `index.index_concurrency`:: experimental[] Defaults to `8`. -`index.fail_on_merge_failure`:: - experimental[] Default to `true`. - `index.translog.flush_threshold_ops`:: When to flush based on operations. diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index 03b43bc13cd..678d696ff80 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -53,7 +53,7 @@ GET /_search } ----------------------------------- -Save the contents of the script as a file called `config/script/my_script.groovy` +Save the contents of the script as a file called `config/scripts/my_script.groovy` on every data node in the cluster: [source,js] diff --git a/docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc index f89072d973f..2120c0bec9a 100644 --- a/docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/geodistance-aggregation.asciidoc @@ -31,18 +31,19 @@ Response: "rings" : { "buckets": [ { - "unit": "km", + "key": "*-100.0", + "from": 0, "to": 100.0, "doc_count": 3 }, { - "unit": "km", + "key": "100.0-300.0", "from": 100.0, "to": 300.0, "doc_count": 1 }, { - "unit": "km", + "key": "300.0-*", "from": 300.0, "doc_count": 7 } diff --git a/docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc index d3b6c8dc04a..6d06743644b 100644 --- a/docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/iprange-aggregation.asciidoc @@ -30,7 +30,7 @@ Response: ... "aggregations": { - "ip_ranges": + "ip_ranges": { "buckets" : [ { "to": 167772165, diff --git a/docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc index 40417a065f9..1d17c4ad5ac 100644 --- a/docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/significantterms-aggregation.asciidoc @@ -149,7 +149,6 @@ Now we have anomaly detection for each of the police forces using a single reque We can use other forms of top-level aggregations to segment our data, for example segmenting by geographic area to identify unusual hot-spots of a particular crime type: - [source,js] -------------------------------------------------- { @@ -345,9 +344,38 @@ Roughly, `mutual_information` prefers high frequent terms even if they occur als It is hard to say which one of the different heuristics will be the best choice as it depends on what the significant terms are used for (see for example [Yang and Pedersen, "A Comparative Study on Feature Selection in Text Categorization", 1997](http://courses.ischool.berkeley.edu/i256/f06/papers/yang97comparative.pdf) for a study on using significant terms for feature selection for text classification). +If none of the above measures suits your usecase than another option is to implement a custom significance measure: +===== scripted +coming[1.5.0] +Customized scores can be implemented via a script: +[source,js] +-------------------------------------------------- + + "script_heuristic": { + "script": "_subset_freq/(_superset_freq - _subset_freq + 1)" + } +-------------------------------------------------- + +Scripts can be inline (as in above example), indexed or stored on disk. For details on the options, see <>. +Parameters need to be set as follows: + +[horizontal] +`script`:: Inline script, name of script file or name of indexed script. Mandatory. +`script_type`:: One of "inline" (default), "indexed" or "file". +`lang`:: Script language (default "groovy") +`params`:: Script parameters (default empty). + +Available parameters in the script are + +[horizontal] +`_subset_freq`:: Number of documents the term appears in in the subset. +`_superset_freq`:: Number of documents the term appears in in the superset. +`_subset_size`:: Number of documents in the subset. +`_superset_size`:: Number of documents in the superset. + ===== Size & Shard Size The `size` parameter can be set to define how many term buckets should be returned out of the overall terms list. By diff --git a/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc index 6b14c352078..0fd51f723b8 100644 --- a/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc @@ -28,7 +28,7 @@ Let's look at a range of percentiles representing load time: "aggs" : { "load_time_outlier" : { "percentile_ranks" : { - "field" : "load_time" <1> + "field" : "load_time", <1> "values" : [15, 30] } } diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index 656471406ef..5e38ac024c2 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -152,6 +152,28 @@ An important default is that the `_source` returned in hits inside `inner_hits` So in the above example only the comment part is returned per nested hit and not the entire source of the top level document that contained the the comment. +[[hierarchical-nested-inner-hits]] +==== Hierarchical levels of nested object fields and inner hits. + +If a mapping has multiple levels of hierarchical nested object fields each level can be accessed via dot notated path. +For example if there is a `comments` nested field that contains a `votes` nested field and votes should directly be returned +with the the root hits then the following path can be defined: + +[source,js] +-------------------------------------------------- +{ + "query" : { + "nested" : { + "path" : "comments.votes", + "query" : { ... }, + "inner_hits" : {} + } + } +} +-------------------------------------------------- + +This indirect referencing is only supported for nested inner hits. + [[parent-child-inner-hits]] ==== Parent/child inner hits diff --git a/rest-api-spec/test/cat.recovery/10_basic.yaml b/rest-api-spec/test/cat.recovery/10_basic.yaml index ae6c00581e1..89b1fb8c765 100755 --- a/rest-api-spec/test/cat.recovery/10_basic.yaml +++ b/rest-api-spec/test/cat.recovery/10_basic.yaml @@ -42,6 +42,9 @@ \d+\.\d+% \s+ # bytes_percent \d+ \s+ # total_files \d+ \s+ # total_bytes + \d+ \s+ # translog + -?\d+\.\d+% \s+ # translog_percent + -?\d+ \s+ # total_translog \n )+ $/ diff --git a/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/test/cat.shards/10_basic.yaml index b0f6f7cf7d7..ce3145b83b5 100755 --- a/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/test/cat.shards/10_basic.yaml @@ -53,3 +53,26 @@ - match: $body: | /^(index2 \s+ \d \s+ (p|r) \s+ ((STARTED|INITIALIZING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){5}$/ + + - do: + indices.create: + index: index3 + body: + settings: + number_of_shards: "1" + number_of_replicas: "1" + shadow_replicas: true + shared_filesystem: false + - do: + cluster.health: + wait_for_status: yellow + wait_for_relocating_shards: 0 + + - do: + cat.shards: + index: index3 + v: false + - match: + $body: | + /^(index3 \s+ \d \s+ (p|s) \s+ ((STARTED|INITIALIZING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){2}$/ + diff --git a/rest-api-spec/test/indices.recovery/10_basic.yaml b/rest-api-spec/test/indices.recovery/10_basic.yaml index ea1c522ab8a..86d396c2063 100644 --- a/rest-api-spec/test/indices.recovery/10_basic.yaml +++ b/rest-api-spec/test/indices.recovery/10_basic.yaml @@ -33,6 +33,8 @@ - gte: { test_1.shards.0.index.size.recovered_in_bytes: 0 } - match: { test_1.shards.0.index.size.percent: /^\d+\.\d\%$/ } - gte: { test_1.shards.0.translog.recovered: 0 } + - gte: { test_1.shards.0.translog.total: -1 } + - gte: { test_1.shards.0.translog.total_on_start: 0 } - gte: { test_1.shards.0.translog.total_time_in_millis: 0 } - gte: { test_1.shards.0.start.check_index_time_in_millis: 0 } - gte: { test_1.shards.0.start.total_time_in_millis: 0 } diff --git a/src/main/java/org/elasticsearch/Build.java b/src/main/java/org/elasticsearch/Build.java index d7e6b816cc5..cf3b7de05a6 100644 --- a/src/main/java/org/elasticsearch/Build.java +++ b/src/main/java/org/elasticsearch/Build.java @@ -93,4 +93,9 @@ public class Build { out.writeString(build.hashShort()); out.writeString(build.timestamp()); } + + @Override + public String toString() { + return "[" + hash + "][" + timestamp + "]"; + } } diff --git a/src/main/java/org/elasticsearch/Version.java b/src/main/java/org/elasticsearch/Version.java index 66a85cb4e77..e9d9b215657 100644 --- a/src/main/java/org/elasticsearch/Version.java +++ b/src/main/java/org/elasticsearch/Version.java @@ -226,9 +226,11 @@ public class Version { public static final int V_1_4_4_ID = 1040499; public static final Version V_1_4_4 = new Version(V_1_4_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3); public static final int V_1_4_5_ID = 1040599; - public static final Version V_1_4_5 = new Version(V_1_4_5_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_3); + public static final Version V_1_4_5 = new Version(V_1_4_5_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_3); // TODO 1.4.5 -> 1.6 is lucene 4.10.4 we need the constant here public static final int V_1_5_0_ID = 1050099; public static final Version V_1_5_0 = new Version(V_1_5_0_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_3); + public static final int V_1_6_0_ID = 1060099; + public static final Version V_1_6_0 = new Version(V_1_6_0_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_3); public static final int V_2_0_0_ID = 2000099; public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_1_0); @@ -246,6 +248,8 @@ public class Version { switch (id) { case V_2_0_0_ID: return V_2_0_0; + case V_1_6_0_ID: + return V_1_6_0; case V_1_5_0_ID: return V_1_5_0; case V_1_4_5_ID: diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 5edd55796ac..e05dbc60697 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -233,7 +233,6 @@ public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBui /** * Sets index settings that should be added or replaced during restore - * @param settings index settings * @return this builder */ diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java index 8fdb2f1f065..fbc620a318c 100644 --- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java +++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java @@ -24,7 +24,12 @@ import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; -import org.elasticsearch.action.search.*; +import org.elasticsearch.action.search.ReduceSearchPhaseException; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; @@ -213,7 +218,7 @@ public abstract class TransportSearchTypeAction extends TransportAction() { + final int maxBytePageCount = (int) (bytesWeight * maxPageCount / totalWeight); + bytePage = build(type, maxBytePageCount, searchThreadPoolSize, availableProcessors, new AbstractRecyclerC() { @Override public byte[] newInstance(int sizing) { return new byte[BigArrays.BYTE_PAGE_SIZE]; @@ -114,7 +107,9 @@ public class PageCacheRecycler extends AbstractComponent { // nothing to do } }); - intPage = build(type, maxCount(limit, BigArrays.INT_PAGE_SIZE, intsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new AbstractRecyclerC() { + + final int maxIntPageCount = (int) (intsWeight * maxPageCount / totalWeight); + intPage = build(type, maxIntPageCount, searchThreadPoolSize, availableProcessors, new AbstractRecyclerC() { @Override public int[] newInstance(int sizing) { return new int[BigArrays.INT_PAGE_SIZE]; @@ -124,17 +119,21 @@ public class PageCacheRecycler extends AbstractComponent { // nothing to do } }); - longPage = build(type, maxCount(limit, BigArrays.LONG_PAGE_SIZE, longsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new AbstractRecyclerC() { + + final int maxLongPageCount = (int) (longsWeight * maxPageCount / totalWeight); + longPage = build(type, maxLongPageCount, searchThreadPoolSize, availableProcessors, new AbstractRecyclerC() { @Override public long[] newInstance(int sizing) { return new long[BigArrays.LONG_PAGE_SIZE]; } @Override public void recycle(long[] value) { - // nothing to do + // nothing to do } }); - objectPage = build(type, maxCount(limit, BigArrays.OBJECT_PAGE_SIZE, objectsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new AbstractRecyclerC() { + + final int maxObjectPageCount = (int) (objectsWeight * maxPageCount / totalWeight); + objectPage = build(type, maxObjectPageCount, searchThreadPoolSize, availableProcessors, new AbstractRecyclerC() { @Override public Object[] newInstance(int sizing) { return new Object[BigArrays.OBJECT_PAGE_SIZE]; @@ -144,6 +143,8 @@ public class PageCacheRecycler extends AbstractComponent { Arrays.fill(value, null); // we need to remove the strong refs on the objects stored in the array } }); + + assert BigArrays.PAGE_SIZE_IN_BYTES * (maxBytePageCount + maxIntPageCount + maxLongPageCount + maxObjectPageCount) <= limit; } public Recycler.V bytePage(boolean clear) { diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index a85d80c971c..6dde296c186 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -333,7 +333,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin(); boolean changed = initialize(routingNodes, unassigned); - if (!changed) { + if (!changed && allocation.deciders().canRebalance(allocation).type() == Type.YES) { NodeSorter sorter = newNodeSorter(); if (nodes.size() > 1) { /* skip if we only have one node */ for (String index : buildWeightOrderedIndidces(Operation.BALANCE, sorter)) { diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java index a99e1b87fbe..a6204485d7d 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java @@ -80,4 +80,13 @@ public abstract class AllocationDecider extends AbstractComponent { public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) { return Decision.ALWAYS; } + + /** + * Returns a {@link Decision} whether the cluster can execute + * re-balanced operations at all. + * {@link Decision#ALWAYS}. + */ + public Decision canRebalance(RoutingAllocation allocation) { + return Decision.ALWAYS; + } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index cccbc11535f..f57c48e8a75 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -157,4 +157,23 @@ public class AllocationDeciders extends AllocationDecider { } return ret; } + + @Override + public Decision canRebalance(RoutingAllocation allocation) { + Decision.Multi ret = new Decision.Multi(); + for (AllocationDecider allocationDecider : allocations) { + Decision decision = allocationDecider.canRebalance(allocation); + // short track if a NO is returned. + if (decision == Decision.NO) { + if (!allocation.debugDecision()) { + return decision; + } else { + ret.add(decision); + } + } else if (decision != Decision.ALWAYS) { + ret.add(decision); + } + } + return ret; + } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index fc263ee8bd2..f0480c4af7c 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -135,6 +135,11 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + return canRebalance(allocation); + } + + @Override + public Decision canRebalance(RoutingAllocation allocation) { if (type == ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE) { // check if there are unassigned primaries. if ( allocation.routingNodes().hasUnassignedPrimaries() ) { diff --git a/src/main/java/org/elasticsearch/cluster/settings/Validator.java b/src/main/java/org/elasticsearch/cluster/settings/Validator.java index dbd1e3d6479..92c1427a160 100644 --- a/src/main/java/org/elasticsearch/cluster/settings/Validator.java +++ b/src/main/java/org/elasticsearch/cluster/settings/Validator.java @@ -205,6 +205,44 @@ public interface Validator { } }; + public static final Validator PERCENTAGE = new Validator() { + @Override + public String validate(String setting, String value) { + try { + if (value == null) { + return "the value of " + setting + " can not be null"; + } + if (!value.endsWith("%")) { + return "the value [" + value + "] for " + setting + " must end with %"; + } + final double asDouble = Double.parseDouble(value.substring(0, value.length() - 1)); + if (asDouble < 0.0 || asDouble > 100.0) { + return "the value [" + value + "] for " + setting + " must be a percentage between 0% and 100%"; + } + } catch (NumberFormatException ex) { + return ex.getMessage(); + } + return null; + } + }; + + + public static final Validator BYTES_SIZE_OR_PERCENTAGE = new Validator() { + @Override + public String validate(String setting, String value) { + String byteSize = BYTES_SIZE.validate(setting, value); + if (byteSize != null) { + String percentage = PERCENTAGE.validate(setting, value); + if (percentage == null) { + return null; + } + return percentage + " or be a valid bytes size value, like [16mb]"; + } + return null; + } + }; + + public static final Validator MEMORY_SIZE = new Validator() { @Override public String validate(String setting, String value) { diff --git a/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java b/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java index a0d49ff01cf..f044877a981 100644 --- a/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java +++ b/src/main/java/org/elasticsearch/common/http/client/HttpDownloadHelper.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.http.client; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; @@ -277,6 +278,9 @@ public class HttpDownloadHelper { ((HttpURLConnection) connection).setUseCaches(true); ((HttpURLConnection) connection).setConnectTimeout(5000); } + connection.setRequestProperty("ES-Version", Version.CURRENT.toString()); + connection.setRequestProperty("User-Agent", "elasticsearch-plugin-manager"); + // connect to the remote site (may take some time) connection.connect(); diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 4b4c3cd8b89..d71e18b65ea 100644 --- a/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -177,7 +177,7 @@ public class Lucene { } } final CommitPoint cp = new CommitPoint(si, directory); - try (IndexWriter _ = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) .setIndexCommit(cp) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) @@ -203,7 +203,7 @@ public class Lucene { } } } - try (IndexWriter _ = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) .setMergePolicy(NoMergePolicy.INSTANCE) // no merges .setCommitOnClose(false) // no commits .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append... diff --git a/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java index 90835930afa..02a9725b37b 100644 --- a/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java @@ -125,7 +125,7 @@ public abstract class TimeZoneRounding extends Rounding { long timeLocal = utcMillis; timeLocal = timeZone.convertUTCToLocal(utcMillis); long rounded = field.roundFloor(timeLocal); - return timeZone.convertLocalToUTC(rounded, true, utcMillis); + return timeZone.convertLocalToUTC(rounded, false, utcMillis); } @Override @@ -139,7 +139,7 @@ public abstract class TimeZoneRounding extends Rounding { long timeLocal = time; timeLocal = timeZone.convertUTCToLocal(time); long nextInLocalTime = durationField.add(timeLocal, 1); - return timeZone.convertLocalToUTC(nextInLocalTime, true); + return timeZone.convertLocalToUTC(nextInLocalTime, false); } @Override @@ -184,7 +184,7 @@ public abstract class TimeZoneRounding extends Rounding { long timeLocal = utcMillis; timeLocal = timeZone.convertUTCToLocal(utcMillis); long rounded = Rounding.Interval.roundValue(Rounding.Interval.roundKey(timeLocal, interval), interval); - return timeZone.convertLocalToUTC(rounded, true); + return timeZone.convertLocalToUTC(rounded, false); } @Override @@ -198,7 +198,7 @@ public abstract class TimeZoneRounding extends Rounding { long timeLocal = time; timeLocal = timeZone.convertUTCToLocal(time); long next = timeLocal + interval; - return timeZone.convertLocalToUTC(next, true); + return timeZone.convertLocalToUTC(next, false); } @Override diff --git a/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/src/main/java/org/elasticsearch/common/unit/TimeValue.java index 5ae3a1cbfa2..6ad1bbc0d2e 100644 --- a/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -268,6 +268,9 @@ public class TimeValue implements Serializable, Streamable { return timeValue; } + /** + * serialization converts TimeValue internally to NANOSECONDS + */ @Override public void readFrom(StreamInput in) throws IOException { duration = in.readLong(); @@ -285,17 +288,12 @@ public class TimeValue implements Serializable, Streamable { if (o == null || getClass() != o.getClass()) return false; TimeValue timeValue = (TimeValue) o; - - if (duration != timeValue.duration) return false; - if (timeUnit != timeValue.timeUnit) return false; - - return true; + return timeUnit.toNanos(duration) == timeValue.timeUnit.toNanos(timeValue.duration); } @Override public int hashCode() { - int result = (int) (duration ^ (duration >>> 32)); - result = 31 * result + (timeUnit != null ? timeUnit.hashCode() : 0); - return result; + long normalized = timeUnit.toNanos(duration); + return (int) (normalized ^ (normalized >>> 32)); } } diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 752209f9407..d91c7a2f127 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -744,11 +744,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen assert newClusterState.nodes().masterNode() != null : "received a cluster state without a master"; assert !newClusterState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock()) : "received a cluster state with a master block"; - ClusterState currentState = clusterService.state(); - if (shouldIgnoreNewClusterState(logger, currentState, newClusterState)) { - return; - } - clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + newClusterState.nodes().masterNode() + "])", Priority.URGENT, new ProcessedClusterStateNonMasterUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { @@ -766,7 +761,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen if (updatedState == null) { updatedState = currentState; } - if (shouldIgnoreNewClusterState(logger, currentState, updatedState)) { + if (shouldIgnoreOrRejectNewClusterState(logger, currentState, updatedState)) { return currentState; } @@ -876,16 +871,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen /** * In the case we follow an elected master the new cluster state needs to have the same elected master and - * the new cluster state version needs to be equal or higher than our cluster state version. If either conditions - * are true then the cluster state is dated and we should ignore it. + * the new cluster state version needs to be equal or higher than our cluster state version. + * If the first condition fails we reject the cluster state and throw an error. + * If the second condition fails we ignore the cluster state. */ - static boolean shouldIgnoreNewClusterState(ESLogger logger, ClusterState currentState, ClusterState newClusterState) { + static boolean shouldIgnoreOrRejectNewClusterState(ESLogger logger, ClusterState currentState, ClusterState newClusterState) { if (currentState.nodes().masterNodeId() == null) { return false; } if (!currentState.nodes().masterNodeId().equals(newClusterState.nodes().masterNodeId())) { - logger.warn("received a cluster state from a different master then the current one, ignoring (received {}, current {})", newClusterState.nodes().masterNode(), currentState.nodes().masterNode()); - return true; + logger.warn("received a cluster state from a different master then the current one, rejecting (received {}, current {})", newClusterState.nodes().masterNode(), currentState.nodes().masterNode()); + throw new ElasticsearchIllegalStateException("cluster state from a different master then the current one, rejecting (received " + newClusterState.nodes().masterNode() + ", current " + currentState.nodes().masterNode() + ")"); } else if (newClusterState.version() < currentState.version()) { // if the new state has a smaller version, and it has the same master node, then no need to process it logger.debug("received a cluster state that has a lower version than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version()); diff --git a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index 224deb15768..e8352f389c5 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -21,14 +21,15 @@ package org.elasticsearch.discovery.zen.publish; import com.google.common.collect.Maps; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.*; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler; @@ -190,25 +191,34 @@ public class PublishClusterStateAction extends AbstractComponent { ClusterState clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); clusterState.status(ClusterState.ClusterStateStatus.RECEIVED); logger.debug("received cluster state version {}", clusterState.version()); - listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() { - @Override - public void onNewClusterStateProcessed() { - try { - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } catch (Throwable e) { - logger.debug("failed to send response on cluster state processed", e); + try { + listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() { + @Override + public void onNewClusterStateProcessed() { + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (Throwable e) { + logger.debug("failed to send response on cluster state processed", e); + } } - } - @Override - public void onNewClusterStateFailed(Throwable t) { - try { - channel.sendResponse(t); - } catch (Throwable e) { - logger.debug("failed to send response on cluster state processed", e); + @Override + public void onNewClusterStateFailed(Throwable t) { + try { + channel.sendResponse(t); + } catch (Throwable e) { + logger.debug("failed to send response on cluster state processed", e); + } } + }); + } catch (Exception e) { + logger.warn("unexpected error while processing cluster state version [{}]", e, clusterState.version()); + try { + channel.sendResponse(e); + } catch (Throwable e1) { + logger.debug("failed to send response on cluster state processed", e1); } - }); + } } @Override diff --git a/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java new file mode 100644 index 00000000000..c88fdc843c0 --- /dev/null +++ b/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import com.google.common.collect.*; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.env.NodeEnvironment; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * The dangling indices state is responsible for finding new dangling indices (indices that have + * their state written on disk, but don't exists in the metadata of the cluster), and importing + * them into the cluster. + */ +public class DanglingIndicesState extends AbstractComponent { + + private final NodeEnvironment nodeEnv; + private final MetaStateService metaStateService; + private final LocalAllocateDangledIndices allocateDangledIndices; + + private final Map danglingIndices = ConcurrentCollections.newConcurrentMap(); + + @Inject + public DanglingIndicesState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, + LocalAllocateDangledIndices allocateDangledIndices) { + super(settings); + this.nodeEnv = nodeEnv; + this.metaStateService = metaStateService; + this.allocateDangledIndices = allocateDangledIndices; + } + + /** + * Process dangling indices based on the provided meta data, handling cleanup, finding + * new dangling indices, and allocating outstanding ones. + */ + public void processDanglingIndices(MetaData metaData) { + if (nodeEnv.hasNodeFile() == false) { + return; + } + cleanupAllocatedDangledIndices(metaData); + findNewAndAddDanglingIndices(metaData); + allocateDanglingIndices(); + } + + /** + * The current set of dangling indices. + */ + Map getDanglingIndices() { + return ImmutableMap.copyOf(danglingIndices); + } + + /** + * Cleans dangling indices if they are already allocated on the provided meta data. + */ + void cleanupAllocatedDangledIndices(MetaData metaData) { + for (String danglingIndex : danglingIndices.keySet()) { + if (metaData.hasIndex(danglingIndex)) { + logger.debug("[{}] no longer dangling (created), removing from dangling list", danglingIndex); + danglingIndices.remove(danglingIndex); + } + } + } + + /** + * Finds (@{link #findNewAndAddDanglingIndices}) and adds the new dangling indices + * to the currently tracked dangling indices. + */ + void findNewAndAddDanglingIndices(MetaData metaData) { + danglingIndices.putAll(findNewDanglingIndices(metaData)); + } + + /** + * Finds new dangling indices by iterating over the indices and trying to find indices + * that have state on disk, but are not part of the provided meta data, or not detected + * as dangled already. + */ + Map findNewDanglingIndices(MetaData metaData) { + final Set indices; + try { + indices = nodeEnv.findAllIndices(); + } catch (Throwable e) { + logger.warn("failed to list dangling indices", e); + return ImmutableMap.of(); + } + + Map newIndices = Maps.newHashMap(); + for (String indexName : indices) { + if (metaData.hasIndex(indexName) == false && danglingIndices.containsKey(indexName) == false) { + try { + IndexMetaData indexMetaData = metaStateService.loadIndexState(indexName); + if (indexMetaData != null) { + logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state", indexName); + if (!indexMetaData.index().equals(indexName)) { + logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.index()); + indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build(); + } + newIndices.put(indexName, indexMetaData); + } else { + logger.debug("[{}] dangling index directory detected, but no state found", indexName); + } + } catch (Throwable t) { + logger.warn("[{}] failed to load index state for detected dangled index", t, indexName); + } + } + } + return newIndices; + } + + /** + * Allocates the provided list of the dangled indices by sending them to the master node + * for allocation. + */ + private void allocateDanglingIndices() { + if (danglingIndices.isEmpty() == true) { + return; + } + try { + allocateDangledIndices.allocateDangled(ImmutableList.copyOf(danglingIndices.values()), new LocalAllocateDangledIndices.Listener() { + @Override + public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { + logger.trace("allocated dangled"); + } + + @Override + public void onFailure(Throwable e) { + logger.info("failed to send allocated dangled", e); + } + }); + } catch (Throwable e) { + logger.warn("failed to send allocate dangled", e); + } + } +} diff --git a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 2700b5d3a0b..34793f214b5 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -19,11 +19,7 @@ package org.elasticsearch.gateway; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -41,136 +37,38 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.ShardLock; -import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ScheduledFuture; -import java.util.regex.Pattern; /** * */ public class GatewayMetaState extends AbstractComponent implements ClusterStateListener { - static final String GLOBAL_STATE_FILE_PREFIX = "global-"; - private static final String INDEX_STATE_FILE_PREFIX = "state-"; - static final Pattern GLOBAL_STATE_FILE_PATTERN = Pattern.compile(GLOBAL_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?"); - static final Pattern INDEX_STATE_FILE_PATTERN = Pattern.compile(INDEX_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?"); - private static final String GLOBAL_STATE_LOG_TYPE = "[_global]"; private static final String DEPRECATED_SETTING_ROUTING_HASH_FUNCTION = "cluster.routing.operation.hash.type"; private static final String DEPRECATED_SETTING_ROUTING_USE_TYPE = "cluster.routing.operation.use_type"; - public static final String GATEWAY_DANGLING_TIMEOUT = "gateway.dangling_timeout"; - public static final String GATEWAY_DELETE_TIMEOUT = "gateway.delete_timeout"; - public static final String GATEWAY_AUTO_IMPORT_DANGLED = "gateway.auto_import_dangled"; - // legacy - this used to be in a different package - private static final String GATEWAY_LOCAL_DANGLING_TIMEOUT = "gateway.local.dangling_timeout"; - private static final String GATEWAY_LOCAL_AUTO_IMPORT_DANGLED = "gateway.local.auto_import_dangled"; - - static enum AutoImportDangledState { - NO() { - @Override - public boolean shouldImport() { - return false; - } - }, - YES() { - @Override - public boolean shouldImport() { - return true; - } - }, - CLOSED() { - @Override - public boolean shouldImport() { - return true; - } - }; - - public abstract boolean shouldImport(); - - public static AutoImportDangledState fromString(String value) { - if ("no".equalsIgnoreCase(value)) { - return NO; - } else if ("yes".equalsIgnoreCase(value)) { - return YES; - } else if ("closed".equalsIgnoreCase(value)) { - return CLOSED; - } else { - throw new ElasticsearchIllegalArgumentException("failed to parse [" + value + "], not a valid auto dangling import type"); - } - } - } private final NodeEnvironment nodeEnv; - private final ThreadPool threadPool; - - private final LocalAllocateDangledIndices allocateDangledIndices; + private final MetaStateService metaStateService; + private final DanglingIndicesState danglingIndicesState; @Nullable private volatile MetaData currentMetaData; - private final XContentType format; - private final ToXContent.Params formatParams; - private final ToXContent.Params gatewayModeFormatParams; - - - private final AutoImportDangledState autoImportDangled; - private final TimeValue danglingTimeout; - private final TimeValue deleteTimeout; - private final Map danglingIndices = ConcurrentCollections.newConcurrentMap(); - private final Object danglingMutex = new Object(); - private final IndicesService indicesService; - @Inject - public GatewayMetaState(Settings settings, ThreadPool threadPool, NodeEnvironment nodeEnv, - TransportNodesListGatewayMetaState nodesListGatewayMetaState, LocalAllocateDangledIndices allocateDangledIndices, - IndicesService indicesService) throws Exception { + public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, + DanglingIndicesState danglingIndicesState, TransportNodesListGatewayMetaState nodesListGatewayMetaState) throws Exception { super(settings); this.nodeEnv = nodeEnv; - this.threadPool = threadPool; - this.format = XContentType.fromRestContentType(settings.get("format", "smile")); - this.allocateDangledIndices = allocateDangledIndices; + this.metaStateService = metaStateService; + this.danglingIndicesState = danglingIndicesState; nodesListGatewayMetaState.init(this); - if (this.format == XContentType.SMILE) { - Map params = Maps.newHashMap(); - params.put("binary", "true"); - formatParams = new ToXContent.MapParams(params); - Map gatewayModeParams = Maps.newHashMap(); - gatewayModeParams.put("binary", "true"); - gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); - gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams); - } else { - formatParams = ToXContent.EMPTY_PARAMS; - Map gatewayModeParams = Maps.newHashMap(); - gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); - gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams); - } - this.autoImportDangled = AutoImportDangledState.fromString(settings.get(GATEWAY_AUTO_IMPORT_DANGLED, settings.get(GATEWAY_LOCAL_AUTO_IMPORT_DANGLED, AutoImportDangledState.YES.toString()))); - this.danglingTimeout = settings.getAsTime(GATEWAY_DANGLING_TIMEOUT, settings.getAsTime(GATEWAY_LOCAL_DANGLING_TIMEOUT, TimeValue.timeValueHours(2))); - this.deleteTimeout = settings.getAsTime(GATEWAY_DELETE_TIMEOUT, TimeValue.timeValueSeconds(30)); - - logger.debug("using {} [{}], {} [{}], with {} [{}]", - GATEWAY_AUTO_IMPORT_DANGLED, this.autoImportDangled, - GATEWAY_DELETE_TIMEOUT, this.deleteTimeout, - GATEWAY_DANGLING_TIMEOUT, this.danglingTimeout); if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) { nodeEnv.ensureAtomicMoveSupported(); } @@ -179,18 +77,17 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL ensureNoPre019State(); pre20Upgrade(); long start = System.currentTimeMillis(); - loadState(); + metaStateService.loadFullState(); logger.debug("took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start)); } catch (Exception e) { logger.error("failed to read local state, exiting...", e); throw e; } } - this.indicesService = indicesService; } public MetaData loadMetaState() throws Exception { - return loadState(); + return metaStateService.loadFullState(); } @Override @@ -211,7 +108,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL // check if the global state changed? if (currentMetaData == null || !MetaData.isGlobalStateEquals(currentMetaData, newMetaData)) { try { - writeGlobalState("changed", newMetaData); + metaStateService.writeGlobalState("changed", newMetaData); } catch (Throwable e) { success = false; } @@ -224,7 +121,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL if (currentMetaData == null) { // a new event..., check from the state stored try { - currentIndexMetaData = loadIndexState(indexMetaData.index()); + currentIndexMetaData = metaStateService.loadIndexState(indexMetaData.index()); } catch (IOException ex) { throw new ElasticsearchException("failed to load index state", ex); } @@ -243,198 +140,20 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } try { - writeIndex(writeReason, indexMetaData, currentIndexMetaData); + metaStateService.writeIndex(writeReason, indexMetaData, currentIndexMetaData); } catch (Throwable e) { success = false; } } } - // handle dangling indices, we handle those for all nodes that have a node file (data or master) - if (nodeEnv.hasNodeFile()) { - if (danglingTimeout.millis() >= 0) { - synchronized (danglingMutex) { - for (String danglingIndex : danglingIndices.keySet()) { - if (newMetaData.hasIndex(danglingIndex)) { - logger.debug("[{}] no longer dangling (created), removing", danglingIndex); - DanglingIndex removed = danglingIndices.remove(danglingIndex); - FutureUtils.cancel(removed.future); - } - } - // delete indices that are no longer part of the metadata - try { - for (String indexName : nodeEnv.findAllIndices()) { - // if we have the index on the metadata, don't delete it - if (newMetaData.hasIndex(indexName)) { - continue; - } - if (danglingIndices.containsKey(indexName)) { - // already dangling, continue - continue; - } - final IndexMetaData indexMetaData = loadIndexState(indexName); - if (indexMetaData != null) { - if(autoImportDangled.shouldImport()){ - logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state [{}]", indexName, autoImportDangled); - danglingIndices.put(indexName, new DanglingIndex(indexName, null)); - } else if (danglingTimeout.millis() == 0) { - logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, timeout set to 0, deleting now", indexName); - indicesService.deleteIndexStore("dangling index with timeout set to 0", indexMetaData); - } else { - logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, scheduling to delete in [{}], auto import to cluster state [{}]", indexName, danglingTimeout, autoImportDangled); - danglingIndices.put(indexName, - new DanglingIndex(indexName, - threadPool.schedule(danglingTimeout, - ThreadPool.Names.SAME, - new RemoveDanglingIndex(indexMetaData)))); - } - } - } - } catch (Throwable e) { - logger.warn("failed to find dangling indices", e); - } - } - } - if (autoImportDangled.shouldImport() && !danglingIndices.isEmpty()) { - final List dangled = Lists.newArrayList(); - for (String indexName : danglingIndices.keySet()) { - IndexMetaData indexMetaData; - try { - indexMetaData = loadIndexState(indexName); - } catch (IOException ex) { - throw new ElasticsearchException("failed to load index state", ex); - } - if (indexMetaData == null) { - logger.debug("failed to find state for dangling index [{}]", indexName); - continue; - } - // we might have someone copying over an index, renaming the directory, handle that - if (!indexMetaData.index().equals(indexName)) { - logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.index()); - indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build(); - } - if (autoImportDangled == AutoImportDangledState.CLOSED) { - indexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).build(); - } - if (indexMetaData != null) { - dangled.add(indexMetaData); - } - } - IndexMetaData[] dangledIndices = dangled.toArray(new IndexMetaData[dangled.size()]); - try { - allocateDangledIndices.allocateDangled(dangledIndices, new LocalAllocateDangledIndices.Listener() { - @Override - public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { - logger.trace("allocated dangled"); - } - - @Override - public void onFailure(Throwable e) { - logger.info("failed to send allocated dangled", e); - } - }); - } catch (Throwable e) { - logger.warn("failed to send allocate dangled", e); - } - } - } + danglingIndicesState.processDanglingIndices(newMetaData); if (success) { currentMetaData = newMetaData; } } - /** - * Returns a StateFormat that can read and write {@link MetaData} - */ - static MetaDataStateFormat globalStateFormat(XContentType format, final ToXContent.Params formatParams, final boolean deleteOldFiles) { - return new MetaDataStateFormat(format, deleteOldFiles) { - - @Override - public void toXContent(XContentBuilder builder, MetaData state) throws IOException { - MetaData.Builder.toXContent(state, builder, formatParams); - } - - @Override - public MetaData fromXContent(XContentParser parser) throws IOException { - return MetaData.Builder.fromXContent(parser); - } - }; - } - - /** - * Returns a StateFormat that can read and write {@link IndexMetaData} - */ - static MetaDataStateFormat indexStateFormat(XContentType format, final ToXContent.Params formatParams, boolean deleteOldFiles) { - return new MetaDataStateFormat(format, deleteOldFiles) { - - @Override - public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException { - IndexMetaData.Builder.toXContent(state, builder, formatParams); } - - @Override - public IndexMetaData fromXContent(XContentParser parser) throws IOException { - return IndexMetaData.Builder.fromXContent(parser); - } - }; - } - - private void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception { - logger.trace("[{}] writing state, reason [{}]", indexMetaData.index(), reason); - final boolean deleteOldFiles = previousIndexMetaData != null && previousIndexMetaData.version() != indexMetaData.version(); - final MetaDataStateFormat writer = indexStateFormat(format, formatParams, deleteOldFiles); - try { - writer.write(indexMetaData, INDEX_STATE_FILE_PREFIX, indexMetaData.version(), - nodeEnv.indexPaths(new Index(indexMetaData.index()))); - } catch (Throwable ex) { - logger.warn("[{}]: failed to write index state", ex, indexMetaData.index()); - throw new IOException("failed to write state for [" + indexMetaData.index() + "]", ex); - } - } - - private void writeGlobalState(String reason, MetaData metaData) throws Exception { - logger.trace("{} writing state, reason [{}]", GLOBAL_STATE_LOG_TYPE, reason); - final MetaDataStateFormat writer = globalStateFormat(format, gatewayModeFormatParams, true); - try { - writer.write(metaData, GLOBAL_STATE_FILE_PREFIX, metaData.version(), nodeEnv.nodeDataPaths()); - } catch (Throwable ex) { - logger.warn("{}: failed to write global state", ex, GLOBAL_STATE_LOG_TYPE); - throw new IOException("failed to write global state", ex); - } - } - - private MetaData loadState() throws Exception { - MetaData globalMetaData = loadGlobalState(); - MetaData.Builder metaDataBuilder; - if (globalMetaData != null) { - metaDataBuilder = MetaData.builder(globalMetaData); - } else { - metaDataBuilder = MetaData.builder(); - } - - final Set indices = nodeEnv.findAllIndices(); - for (String index : indices) { - IndexMetaData indexMetaData = loadIndexState(index); - if (indexMetaData == null) { - logger.debug("[{}] failed to find metadata for existing index location", index); - } else { - metaDataBuilder.put(indexMetaData, false); - } - } - return metaDataBuilder.build(); - } - - @Nullable - private IndexMetaData loadIndexState(String index) throws IOException { - return MetaDataStateFormat.loadLatestState(logger, indexStateFormat(format, formatParams, true), - INDEX_STATE_FILE_PATTERN, "[" + index + "]", nodeEnv.indexPaths(new Index(index))); - } - - private MetaData loadGlobalState() throws IOException { - return MetaDataStateFormat.loadLatestState(logger, globalStateFormat(format, gatewayModeFormatParams, true), GLOBAL_STATE_FILE_PATTERN, GLOBAL_STATE_LOG_TYPE, nodeEnv.nodeDataPaths()); - } - - /** * Throws an IAE if a pre 0.19 state is detected */ @@ -497,7 +216,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL .version(indexMetaData.version()) .settings(indexSettings) .build(); - writeIndex("upgrade", newMetaData, null); + metaStateService.writeIndex("upgrade", newMetaData, null); } else if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0)) { if (indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) != null || indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE) != null) { @@ -511,41 +230,4 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL + "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE); } } - - class RemoveDanglingIndex implements Runnable { - - private final IndexMetaData metaData; - - RemoveDanglingIndex(IndexMetaData metaData) { - this.metaData = metaData; - } - - @Override - public void run() { - synchronized (danglingMutex) { - DanglingIndex remove = danglingIndices.remove(metaData.index()); - // no longer there... - if (remove == null) { - return; - } - logger.warn("[{}] deleting dangling index", metaData.index()); - try { - indicesService.deleteIndexStore("deleting dangling index", metaData); - } catch (Exception ex) { - logger.debug("failed to delete dangling index", ex); - } - } - } - } - - static class DanglingIndex { - public final String index; - public final ScheduledFuture future; - - DanglingIndex(String index, ScheduledFuture future) { - this.index = index; - this.future = future; - } - } - } diff --git a/src/main/java/org/elasticsearch/gateway/GatewayModule.java b/src/main/java/org/elasticsearch/gateway/GatewayModule.java index 97df53d16b0..6e061fb292d 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayModule.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayModule.java @@ -28,6 +28,8 @@ public class GatewayModule extends AbstractModule { @Override protected void configure() { + bind(MetaStateService.class).asEagerSingleton(); + bind(DanglingIndicesState.class).asEagerSingleton(); bind(GatewayService.class).asEagerSingleton(); bind(Gateway.class).asEagerSingleton(); bind(GatewayShardsState.class).asEagerSingleton(); diff --git a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 06d858d1bb4..bf0da6f20fc 100644 --- a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -40,6 +40,7 @@ import org.elasticsearch.transport.*; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; /** */ @@ -62,14 +63,14 @@ public class LocalAllocateDangledIndices extends AbstractComponent { transportService.registerHandler(ACTION_NAME, new AllocateDangledRequestHandler()); } - public void allocateDangled(IndexMetaData[] indices, final Listener listener) { + public void allocateDangled(Collection indices, final Listener listener) { ClusterState clusterState = clusterService.state(); DiscoveryNode masterNode = clusterState.nodes().masterNode(); if (masterNode == null) { listener.onFailure(new MasterNotDiscoveredException("no master to send allocate dangled request")); return; } - AllocateDangledRequest request = new AllocateDangledRequest(clusterService.localNode(), indices); + AllocateDangledRequest request = new AllocateDangledRequest(clusterService.localNode(), indices.toArray(new IndexMetaData[indices.size()])); transportService.sendRequest(masterNode, ACTION_NAME, request, new TransportResponseHandler() { @Override public AllocateDangledResponse newInstance() { diff --git a/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/src/main/java/org/elasticsearch/gateway/MetaStateService.java new file mode 100644 index 00000000000..48e2d6e48cb --- /dev/null +++ b/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -0,0 +1,187 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import com.google.common.collect.Maps; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; + +/** + * Handles writing and loading both {@link MetaData} and {@link IndexMetaData} + */ +public class MetaStateService extends AbstractComponent { + + static final String FORMAT_SETTING = "gateway.format"; + + static final String GLOBAL_STATE_FILE_PREFIX = "global-"; + private static final String INDEX_STATE_FILE_PREFIX = "state-"; + static final Pattern GLOBAL_STATE_FILE_PATTERN = Pattern.compile(GLOBAL_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?"); + static final Pattern INDEX_STATE_FILE_PATTERN = Pattern.compile(INDEX_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?"); + private static final String GLOBAL_STATE_LOG_TYPE = "[_global]"; + + private final NodeEnvironment nodeEnv; + + private final XContentType format; + private final ToXContent.Params formatParams; + private final ToXContent.Params gatewayModeFormatParams; + + @Inject + public MetaStateService(Settings settings, NodeEnvironment nodeEnv) { + super(settings); + this.nodeEnv = nodeEnv; + this.format = XContentType.fromRestContentType(settings.get(FORMAT_SETTING, "smile")); + + if (this.format == XContentType.SMILE) { + Map params = Maps.newHashMap(); + params.put("binary", "true"); + formatParams = new ToXContent.MapParams(params); + Map gatewayModeParams = Maps.newHashMap(); + gatewayModeParams.put("binary", "true"); + gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); + gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams); + } else { + formatParams = ToXContent.EMPTY_PARAMS; + Map gatewayModeParams = Maps.newHashMap(); + gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); + gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams); + } + } + + /** + * Loads the full state, which includes both the global state and all the indices + * meta state. + */ + MetaData loadFullState() throws Exception { + MetaData globalMetaData = loadGlobalState(); + MetaData.Builder metaDataBuilder; + if (globalMetaData != null) { + metaDataBuilder = MetaData.builder(globalMetaData); + } else { + metaDataBuilder = MetaData.builder(); + } + + final Set indices = nodeEnv.findAllIndices(); + for (String index : indices) { + IndexMetaData indexMetaData = loadIndexState(index); + if (indexMetaData == null) { + logger.debug("[{}] failed to find metadata for existing index location", index); + } else { + metaDataBuilder.put(indexMetaData, false); + } + } + return metaDataBuilder.build(); + } + + /** + * Loads the index state for the provided index name, returning null if doesn't exists. + */ + @Nullable + IndexMetaData loadIndexState(String index) throws IOException { + return MetaDataStateFormat.loadLatestState(logger, indexStateFormat(format, formatParams, true), + INDEX_STATE_FILE_PATTERN, "[" + index + "]", nodeEnv.indexPaths(new Index(index))); + } + + /** + * Loads the global state, *without* index state, see {@link #loadFullState()} for that. + */ + MetaData loadGlobalState() throws IOException { + return MetaDataStateFormat.loadLatestState(logger, globalStateFormat(format, gatewayModeFormatParams, true), GLOBAL_STATE_FILE_PATTERN, GLOBAL_STATE_LOG_TYPE, nodeEnv.nodeDataPaths()); + } + + /** + * Writes the index state. + */ + void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception { + logger.trace("[{}] writing state, reason [{}]", indexMetaData.index(), reason); + final boolean deleteOldFiles = previousIndexMetaData != null && previousIndexMetaData.version() != indexMetaData.version(); + final MetaDataStateFormat writer = indexStateFormat(format, formatParams, deleteOldFiles); + try { + writer.write(indexMetaData, INDEX_STATE_FILE_PREFIX, indexMetaData.version(), + nodeEnv.indexPaths(new Index(indexMetaData.index()))); + } catch (Throwable ex) { + logger.warn("[{}]: failed to write index state", ex, indexMetaData.index()); + throw new IOException("failed to write state for [" + indexMetaData.index() + "]", ex); + } + } + + /** + * Writes the global state, *without* the indices states. + */ + void writeGlobalState(String reason, MetaData metaData) throws Exception { + logger.trace("{} writing state, reason [{}]", GLOBAL_STATE_LOG_TYPE, reason); + final MetaDataStateFormat writer = globalStateFormat(format, gatewayModeFormatParams, true); + try { + writer.write(metaData, GLOBAL_STATE_FILE_PREFIX, metaData.version(), nodeEnv.nodeDataPaths()); + } catch (Throwable ex) { + logger.warn("{}: failed to write global state", ex, GLOBAL_STATE_LOG_TYPE); + throw new IOException("failed to write global state", ex); + } + } + + /** + * Returns a StateFormat that can read and write {@link MetaData} + */ + static MetaDataStateFormat globalStateFormat(XContentType format, final ToXContent.Params formatParams, final boolean deleteOldFiles) { + return new MetaDataStateFormat(format, deleteOldFiles) { + + @Override + public void toXContent(XContentBuilder builder, MetaData state) throws IOException { + MetaData.Builder.toXContent(state, builder, formatParams); + } + + @Override + public MetaData fromXContent(XContentParser parser) throws IOException { + return MetaData.Builder.fromXContent(parser); + } + }; + } + + /** + * Returns a StateFormat that can read and write {@link IndexMetaData} + */ + static MetaDataStateFormat indexStateFormat(XContentType format, final ToXContent.Params formatParams, boolean deleteOldFiles) { + return new MetaDataStateFormat(format, deleteOldFiles) { + + @Override + public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException { + IndexMetaData.Builder.toXContent(state, builder, formatParams); } + + @Override + public IndexMetaData fromXContent(XContentParser parser) throws IOException { + return IndexMetaData.Builder.fromXContent(parser); + } + }; + } +} diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java index 69bba98bc42..0ab816e9a97 100644 --- a/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -489,13 +489,8 @@ public abstract class Engine implements Closeable { /** Check whether the engine should be failed */ protected boolean maybeFailEngine(String source, Throwable t) { if (Lucene.isCorruptionException(t)) { - if (engineConfig.isFailEngineOnCorruption()) { - failEngine("corrupt file detected source: [" + source + "]", t); - return true; - } else { - logger.warn("corrupt file detected source: [{}] but [{}] is set to [{}]", t, source, - EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, engineConfig.isFailEngineOnCorruption()); - } + failEngine("corrupt file detected source: [" + source + "]", t); + return true; } else if (ExceptionsHelper.isOOM(t)) { failEngine("out of memory", t); return true; diff --git a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 79ab1ead7a6..1cb3efd7d3f 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -23,7 +23,6 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -50,9 +49,9 @@ import java.util.concurrent.TimeUnit; */ public final class EngineConfig { private final ShardId shardId; - private volatile boolean failOnMergeFailure = true; - private volatile boolean failEngineOnCorruption = true; private volatile ByteSizeValue indexingBufferSize; + private volatile ByteSizeValue versionMapSize; + private volatile String versionMapSizeSetting; private final int indexConcurrency; private volatile boolean compoundOnFlush = true; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); @@ -99,18 +98,6 @@ public final class EngineConfig { */ public static final String INDEX_GC_DELETES_SETTING = "index.gc_deletes"; - /** - * Index setting to enable / disable engine failures on merge exceptions. Default is true / enabled. - * This setting is realtime updateable. - */ - public static final String INDEX_FAIL_ON_MERGE_FAILURE_SETTING = "index.fail_on_merge_failure"; - - /** - * Index setting to enable / disable engine failures on detected index corruptions. Default is true / enabled. - * This setting is realtime updateable. - */ - public static final String INDEX_FAIL_ON_CORRUPTION_SETTING = "index.fail_on_corruption"; - /** * Index setting to control the initial index buffer size. * This setting is not realtime updateable. @@ -123,11 +110,25 @@ public final class EngineConfig { */ public static final String INDEX_CODEC_SETTING = "index.codec"; + /** + * Index setting to enable / disable checksum checks on merge + * This setting is realtime updateable. + */ + public static final String INDEX_CHECKSUM_ON_MERGE = "index.checksum_on_merge"; + + /** + * The maximum size the version map should grow to before issuing a refresh. Can be an absolute value or a percentage of + * the current index memory buffer (defaults to 25%) + */ + public static final String INDEX_VERSION_MAP_SIZE = "index.version_map_size"; + public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60); public static final ByteSizeValue DEFAUTL_INDEX_BUFFER_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); public static final ByteSizeValue INACTIVE_SHARD_INDEXING_BUFFER = ByteSizeValue.parseBytesSizeValue("500kb"); + public static final String DEFAULT_VERSION_MAP_SIZE = "25%"; + private static final String DEFAULT_CODEC_NAME = "default"; @@ -155,9 +156,42 @@ public final class EngineConfig { this.indexConcurrency = indexSettings.getAsInt(EngineConfig.INDEX_CONCURRENCY_SETTING, Math.max(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65))); codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME); indexingBufferSize = indexSettings.getAsBytesSize(INDEX_BUFFER_SIZE_SETTING, DEFAUTL_INDEX_BUFFER_SIZE); - failEngineOnCorruption = indexSettings.getAsBoolean(INDEX_FAIL_ON_CORRUPTION_SETTING, true); - failOnMergeFailure = indexSettings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE_SETTING, true); gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis(); + versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE); + updateVersionMapSize(); + } + + /** updates {@link #versionMapSize} based on current setting and {@link #indexingBufferSize} */ + private void updateVersionMapSize() { + if (versionMapSizeSetting.endsWith("%")) { + double percent = Double.parseDouble(versionMapSizeSetting.substring(0, versionMapSizeSetting.length() - 1)); + versionMapSize = new ByteSizeValue((long) (((double) indexingBufferSize.bytes() * (percent / 100)))); + } else { + versionMapSize = ByteSizeValue.parseBytesSizeValue(versionMapSizeSetting); + } + } + + /** + * Settings the version map size that should trigger a refresh. See {@link #INDEX_VERSION_MAP_SIZE} for details. + */ + public void setVersionMapSizeSetting(String versionMapSizeSetting) { + this.versionMapSizeSetting = versionMapSizeSetting; + updateVersionMapSize(); + } + + /** + * current setting for the version map size that should trigger a refresh. See {@link #INDEX_VERSION_MAP_SIZE} for details. + */ + public String getVersionMapSizeSetting() { + return versionMapSizeSetting; + } + + + /** + * returns the size of the version map that should trigger a refresh + */ + public ByteSizeValue getVersionMapSize() { + return versionMapSize; } /** @@ -165,6 +199,7 @@ public final class EngineConfig { */ public void setIndexingBufferSize(ByteSizeValue indexingBufferSize) { this.indexingBufferSize = indexingBufferSize; + updateVersionMapSize(); } /** @@ -176,20 +211,6 @@ public final class EngineConfig { this.enableGcDeletes = enableGcDeletes; } - /** - * Returns true iff the engine should be failed if a merge error is hit. Defaults to true - */ - public boolean isFailOnMergeFailure() { - return failOnMergeFailure; - } - - /** - * Returns true if the engine should be failed in the case of a corrupted index. Defaults to true - */ - public boolean isFailEngineOnCorruption() { - return failEngineOnCorruption; - } - /** * Returns the initial index buffer size. This setting is only read on startup and otherwise controlled by {@link org.elasticsearch.indices.memory.IndexingMemoryController} */ @@ -370,18 +391,4 @@ public final class EngineConfig { public void setCompoundOnFlush(boolean compoundOnFlush) { this.compoundOnFlush = compoundOnFlush; } - - /** - * Sets if the engine should be failed in the case of a corrupted index. Defaults to true - */ - public void setFailEngineOnCorruption(boolean failEngineOnCorruption) { - this.failEngineOnCorruption = failEngineOnCorruption; - } - - /** - * Sets if the engine should be failed if a merge error is hit. Defaults to true - */ - public void setFailOnMergeFailure(boolean failOnMergeFailure) { - this.failOnMergeFailure = failOnMergeFailure; - } } diff --git a/src/main/java/org/elasticsearch/index/engine/OptimizeFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/ForceMergeFailedEngineException.java similarity index 82% rename from src/main/java/org/elasticsearch/index/engine/OptimizeFailedEngineException.java rename to src/main/java/org/elasticsearch/index/engine/ForceMergeFailedEngineException.java index 442f7b0537b..e4590f12b30 100644 --- a/src/main/java/org/elasticsearch/index/engine/OptimizeFailedEngineException.java +++ b/src/main/java/org/elasticsearch/index/engine/ForceMergeFailedEngineException.java @@ -24,9 +24,9 @@ import org.elasticsearch.index.shard.ShardId; /** * */ -public class OptimizeFailedEngineException extends EngineException { +public class ForceMergeFailedEngineException extends EngineException { - public OptimizeFailedEngineException(ShardId shardId, Throwable t) { - super(shardId, "Optimize failed", t); + public ForceMergeFailedEngineException(ShardId shardId, Throwable t) { + super(shardId, "force merge failed", t); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index d05ed4512d5..37c4396f544 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -52,7 +52,6 @@ import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.lang.reflect.Method; import java.util.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -83,11 +82,11 @@ public class InternalEngine extends Engine { private final SearcherFactory searcherFactory; private final SearcherManager searcherManager; - private final AtomicBoolean optimizeMutex = new AtomicBoolean(); // we use flushNeeded here, since if there are no changes, then the commit won't write // will not really happen, and then the commitUserData and the new translog will not be reflected private volatile boolean flushNeeded = false; private final Lock flushLock = new ReentrantLock(); + private final ReentrantLock optimizeLock = new ReentrantLock(); protected final FlushingRecoveryCounter onGoingRecoveries; // A uid (in the form of BytesRef) to the version map @@ -203,7 +202,7 @@ public class InternalEngine extends Engine { @Override public GetResult get(Get get) throws EngineException { - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (get.realtime()) { VersionValue versionValue = versionMap.getUnderLock(get.uid().bytes()); @@ -232,7 +231,7 @@ public class InternalEngine extends Engine { @Override public void create(Create create) throws EngineException { - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (create.origin() == Operation.Origin.RECOVERY) { // Don't throttle recovery operations @@ -338,7 +337,7 @@ public class InternalEngine extends Engine { @Override public void index(Index index) throws EngineException { - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (index.origin() == Operation.Origin.RECOVERY) { // Don't throttle recovery operations @@ -357,11 +356,10 @@ public class InternalEngine extends Engine { } /** - * Forces a refresh if the versionMap is using too much RAM (currently > 25% of IndexWriter's RAM buffer). + * Forces a refresh if the versionMap is using too much RAM */ private void checkVersionMapRefresh() { - // TODO: we force refresh when versionMap is using > 25% of IW's RAM buffer; should we make this separately configurable? - if (versionMap.ramBytesUsedForRefresh() > 0.25 * engineConfig.getIndexingBufferSize().bytes() && versionMapRefreshPending.getAndSet(true) == false) { + if (versionMap.ramBytesUsedForRefresh() > config().getVersionMapSize().bytes() && versionMapRefreshPending.getAndSet(true) == false) { try { if (isClosed.get()) { // no point... @@ -438,8 +436,9 @@ public class InternalEngine extends Engine { @Override public void delete(Delete delete) throws EngineException { - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); + // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: innerDelete(delete); flushNeeded = true; } catch (OutOfMemoryError | IllegalStateException | IOException t) { @@ -505,8 +504,21 @@ public class InternalEngine extends Engine { @Override public void delete(DeleteByQuery delete) throws EngineException { - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); + if (delete.origin() == Operation.Origin.RECOVERY) { + // Don't throttle recovery operations + innerDelete(delete); + } else { + try (Releasable r = throttle.acquireThrottle()) { + innerDelete(delete); + } + } + } + } + + private void innerDelete(DeleteByQuery delete) throws EngineException { + try { Query query; if (delete.nested() && delete.aliasFilter() != null) { query = new IncludeNestedDocsQuery(new FilteredQuery(delete.query(), delete.aliasFilter()), delete.parentFilter()); @@ -535,7 +547,7 @@ public class InternalEngine extends Engine { public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); updateIndexWriterSettings(); searcherManager.maybeRefreshBlocking(); @@ -580,7 +592,7 @@ public class InternalEngine extends Engine { * Thread 1: flushes via API and gets the flush lock but blocks on the readlock since Thread 2 has the writeLock * Thread 2: flushes at the end of the recovery holding the writeLock and blocks on the flushLock owned by Thread 1 */ - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); updateIndexWriterSettings(); if (flushLock.tryLock() == false) { @@ -640,8 +652,15 @@ public class InternalEngine extends Engine { } } - // reread the last committed segment infos + /* + * we have to inc-ref the store here since if the engine is closed by a tragic event + * we don't acquire the write lock and wait until we have exclusive access. This might also + * dec the store reference which can essentially close the store and unless we can inc the reference + * we can't use it. + */ + store.incRef(); try { + // reread the last committed segment infos lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); } catch (Throwable e) { if (isClosed.get() == false) { @@ -650,6 +669,8 @@ public class InternalEngine extends Engine { throw new FlushFailedEngineException(shardId, e); } } + } finally { + store.decRef(); } } catch (FlushFailedEngineException ex) { maybeFailEngine("flush", ex); @@ -688,58 +709,59 @@ public class InternalEngine extends Engine { lastDeleteVersionPruneTimeMSec = timeMSec; } - // TODO: can we please remove this method?! - private void waitForMerges(boolean flushAfter, boolean upgrade) { - try { - Method method = IndexWriter.class.getDeclaredMethod("waitForMerges"); - method.setAccessible(true); - method.invoke(indexWriter); - } catch (ReflectiveOperationException e) { - throw new OptimizeFailedEngineException(shardId, e); - } - if (flushAfter) { - flush(true, true, true); - } - if (upgrade) { - logger.info("Finished upgrade of " + shardId); - } - } - @Override public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, final boolean upgrade) throws EngineException { - if (optimizeMutex.compareAndSet(false, true)) { - try (ReleasableLock _ = readLock.acquire()) { - ensureOpen(); - /* - * The way we implement upgrades is a bit hackish in the sense that we set an instance - * variable and that this setting will thus apply to the next forced merge that will be run. - * This is ok because (1) this is the only place we call forceMerge, (2) we have a single - * thread for optimize, and the 'optimizeMutex' guarding this code, and (3) ConcurrentMergeScheduler - * syncs calls to findForcedMerges. - */ - MergePolicy mp = indexWriter.getConfig().getMergePolicy(); - assert mp instanceof ElasticsearchMergePolicy : "MergePolicy is " + mp.getClass().getName(); - if (upgrade) { - logger.info("Starting upgrade of " + shardId); - ((ElasticsearchMergePolicy) mp).setUpgradeInProgress(true); - } - + /* + * We do NOT acquire the readlock here since we are waiting on the merges to finish + * that's fine since the IW.rollback should stop all the threads and trigger an IOException + * causing us to fail the forceMerge + * + * The way we implement upgrades is a bit hackish in the sense that we set an instance + * variable and that this setting will thus apply to the next forced merge that will be run. + * This is ok because (1) this is the only place we call forceMerge, (2) we have a single + * thread for optimize, and the 'optimizeLock' guarding this code, and (3) ConcurrentMergeScheduler + * syncs calls to findForcedMerges. + */ + assert indexWriter.getConfig().getMergePolicy() instanceof ElasticsearchMergePolicy : "MergePolicy is " + indexWriter.getConfig().getMergePolicy().getClass().getName(); + ElasticsearchMergePolicy mp = (ElasticsearchMergePolicy) indexWriter.getConfig().getMergePolicy(); + optimizeLock.lock(); + try { + ensureOpen(); + if (upgrade) { + logger.info("starting segment upgrade"); + mp.setUpgradeInProgress(true); + } + store.incRef(); // increment the ref just to ensure nobody closes the store while we optimize + try { if (onlyExpungeDeletes) { - indexWriter.forceMergeDeletes(false); + assert upgrade == false; + indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); } else if (maxNumSegments <= 0) { + assert upgrade == false; indexWriter.maybeMerge(); } else { - indexWriter.forceMerge(maxNumSegments, false); + indexWriter.forceMerge(maxNumSegments, true /* blocks and waits for merges*/); + } + if (flush) { + flush(true, true, true); + } + if (upgrade) { + logger.info("finished segment upgrade"); } - } catch (Throwable t) { - maybeFailEngine("optimize", t); - throw new OptimizeFailedEngineException(shardId, t); } finally { - optimizeMutex.set(false); + store.decRef(); + } + } catch (Throwable t) { + ForceMergeFailedEngineException ex = new ForceMergeFailedEngineException(shardId, t); + maybeFailEngine("force merge", ex); + throw ex; + } finally { + try { + mp.setUpgradeInProgress(false); // reset it just to make sure we reset it in a case of an error + } finally { + optimizeLock.unlock(); } } - - waitForMerges(flush, upgrade); } @Override @@ -747,7 +769,7 @@ public class InternalEngine extends Engine { // we have to flush outside of the readlock otherwise we might have a problem upgrading // the to a write lock when we fail the engine in this operation flush(false, false, true); - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); return deletionPolicy.snapshot(); } catch (IOException e) { @@ -759,7 +781,7 @@ public class InternalEngine extends Engine { public void recover(RecoveryHandler recoveryHandler) throws EngineException { // take a write lock here so it won't happen while a flush is in progress // this means that next commits will not be allowed once the lock is released - try (ReleasableLock _ = writeLock.acquire()) { + try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); onGoingRecoveries.startRecovery(); } @@ -848,7 +870,7 @@ public class InternalEngine extends Engine { @Override public List segments(boolean verbose) { - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose); // fill in the merges flag @@ -1070,12 +1092,8 @@ public class InternalEngine extends Engine { @Override public void onFailedMerge(MergePolicy.MergeException e) { if (Lucene.isCorruptionException(e)) { - if (engineConfig.isFailEngineOnCorruption()) { - failEngine("corrupt file detected source: [merge]", e); - } else { - logger.warn("corrupt file detected source: [merge] but [{}] is set to [{}]", e, EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, engineConfig.isFailEngineOnCorruption()); - } - } else if (engineConfig.isFailOnMergeFailure()) { + failEngine("corrupt file detected source: [merge]", e); + } else { failEngine("merge exception", e); } } diff --git a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 0fd4accf166..33140671829 100644 --- a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -123,7 +123,15 @@ public class ShadowEngine extends Engine { logger.trace("skipping FLUSH on shadow engine"); // reread the last committed segment infos refresh("flush"); - try (ReleasableLock _ = readLock.acquire()) { + /* + * we have to inc-ref the store here since if the engine is closed by a tragic event + * we don't acquire the write lock and wait until we have exclusive access. This might also + * dec the store reference which can essentially close the store and unless we can inc the reference + * we can't use it. + */ + store.incRef(); + try (ReleasableLock lock = readLock.acquire()) { + // reread the last committed segment infos lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); } catch (Throwable e) { if (isClosed.get() == false) { @@ -132,6 +140,8 @@ public class ShadowEngine extends Engine { throw new FlushFailedEngineException(shardId, e); } } + } finally { + store.decRef(); } } @@ -149,7 +159,7 @@ public class ShadowEngine extends Engine { @Override public List segments(boolean verbose) { - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose); for (int i = 0; i < segmentsArr.length; i++) { // hard code all segments as committed, because they are in @@ -164,7 +174,7 @@ public class ShadowEngine extends Engine { public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) - try (ReleasableLock _ = readLock.acquire()) { + try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); searcherManager.maybeRefreshBlocking(); } catch (AlreadyClosedException e) { diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java index a6787fb3537..0fc6a082acc 100644 --- a/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java +++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGateway.java @@ -191,6 +191,8 @@ public class IndexShardGateway extends AbstractIndexShardComponent implements Cl if (recoveringTranslogFile == null || Files.exists(recoveringTranslogFile) == false) { // no translog files, bail + recoveryState.getTranslog().totalOperations(0); + recoveryState.getTranslog().totalOperationsOnStart(0); indexShard.finalizeRecovery(); indexShard.postRecovery("post recovery from gateway, no translog"); // no index, just start the shard and bail @@ -236,7 +238,7 @@ public class IndexShardGateway extends AbstractIndexShardComponent implements Cl typesToUpdate.add(potentialIndexOperation.docMapper().type()); } } - recoveryState.getTranslog().addTranslogOperations(1); + recoveryState.getTranslog().incrementRecoveredOperations(); } catch (ElasticsearchException e) { if (e.status() == RestStatus.BAD_REQUEST) { // mainly for MapperParsingException and Failure to detect xcontent diff --git a/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java index 75e7cb3e880..ff170b64ba5 100644 --- a/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java +++ b/src/main/java/org/elasticsearch/index/gateway/IndexShardGatewayService.java @@ -114,13 +114,10 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem shardGateway.recover(indexShouldExists, recoveryState); } - // Check that the gateway have set the shard to POST_RECOVERY. Note that if a shard - // is in POST_RECOVERY, it may have been started as well if: - // 1) master sent a new cluster state indicating shard is initializing - // 2) IndicesClusterStateService#applyInitializingShard will send a shard started event - // 3) Master will mark shard as started and this will be processed locally. + // Check that the gateway didn't leave the shard in init or recovering stage. it is up to the gateway + // to call post recovery. IndexShardState shardState = indexShard.state(); - assert shardState == IndexShardState.POST_RECOVERY || shardState == IndexShardState.STARTED : "recovery process didn't call post_recovery. shardState [" + shardState + "]"; + assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]"; if (logger.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); @@ -135,7 +132,7 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem .append(new ByteSizeValue(index.reusedBytes())).append("]\n"); sb.append(" start : took [").append(TimeValue.timeValueMillis(recoveryState.getStart().time())).append("], check_index [") .append(timeValueMillis(recoveryState.getStart().checkIndexTime())).append("]\n"); - sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().currentTranslogOperations()) + sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations()) .append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]"); logger.trace(sb.toString()); } else if (logger.isDebugEnabled()) { diff --git a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index a4471e95a8b..2614e60f98e 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -235,7 +236,7 @@ public abstract class NumberFieldMapper extends AbstractFieldM RuntimeException e = null; try { innerParseCreateField(context, fields); - } catch (IllegalArgumentException e1) { + } catch (IllegalArgumentException | ElasticsearchIllegalArgumentException e1) { e = e1; } catch (MapperParsingException e2) { e = e2; diff --git a/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java b/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java index a7db60e14fb..009ba0b59ca 100644 --- a/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/scheduler/ConcurrentMergeSchedulerProvider.java @@ -139,9 +139,11 @@ public class ConcurrentMergeSchedulerProvider extends MergeSchedulerProvider { @Override protected void handleMergeException(Directory dir, Throwable exc) { - logger.warn("failed to merge", exc); + logger.error("failed to merge", exc); provider.failedMerge(new MergePolicy.MergeException(exc, dir)); - super.handleMergeException(dir, exc); + // NOTE: do not call super.handleMergeException here, which would just re-throw the exception + // and let Java's thread exc handler see it / log it to stderr, but we already 1) logged it + // and 2) handled the exception by failing the engine } @Override diff --git a/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index 6d4e7fa5ede..ca12ff5a927 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -86,7 +86,7 @@ public class BoolQueryBuilder extends BaseQueryBuilder implements BoostableQuery } /** - * Disables Similarity#coord(int,int) in scoring. Defualts to false. + * Disables Similarity#coord(int,int) in scoring. Defaults to false. */ public BoolQueryBuilder disableCoord(boolean disableCoord) { this.disableCoord = disableCoord; diff --git a/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java b/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java index 0d85a35e034..bdd5e463caa 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; @@ -149,8 +148,7 @@ public class NestedQueryParser implements QueryParser { } if (innerHits != null) { - ObjectMapper parentObjectMapper = childDocumentMapper.findParentObjectMapper(nestedObjectMapper); - InnerHitsContext.NestedInnerHits nestedInnerHits = new InnerHitsContext.NestedInnerHits(innerHits.v2(), getInnerQuery(), null, parentObjectMapper, nestedObjectMapper); + InnerHitsContext.NestedInnerHits nestedInnerHits = new InnerHitsContext.NestedInnerHits(innerHits.v2(), getInnerQuery(), null, getParentObjectMapper(), nestedObjectMapper); String name = innerHits.v1() != null ? innerHits.v1() : path; parseContext.addInnerHits(name, nestedInnerHits); } diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java index 7e1dc0d2981..fca599a8cd0 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java @@ -69,7 +69,7 @@ public class ScriptFilterParser implements FilterParser { HashedBytesRef cacheKey = null; // also, when caching, since its isCacheable is false, will result in loading all bit set... String script = null; - String scriptLang = null; + String scriptLang; Map params = null; String filterName = null; @@ -130,12 +130,9 @@ public class ScriptFilterParser implements FilterParser { private final SearchScript searchScript; - private final ScriptService.ScriptType scriptType; - public ScriptFilter(String scriptLang, String script, ScriptService.ScriptType scriptType, Map params, ScriptService scriptService, SearchLookup searchLookup) { this.script = script; this.params = params; - this.scriptType = scriptType; this.searchScript = scriptService.search(searchLookup, scriptLang, script, scriptType, newHashMap(params)); } diff --git a/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java index a0d471e91b6..176f97f4fff 100644 --- a/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.query; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; import java.util.Map; diff --git a/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java b/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java index 175473d5a96..6dbb5ec7824 100644 --- a/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java @@ -19,17 +19,15 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngineService; import java.io.IOException; import java.util.HashMap; @@ -52,9 +50,9 @@ public class TemplateQueryParser implements QueryParser { private final static Map parametersToTypes = new HashMap<>(); static { - parametersToTypes.put("query",ScriptService.ScriptType.INLINE); - parametersToTypes.put("file",ScriptService.ScriptType.FILE); - parametersToTypes.put("id",ScriptService.ScriptType.INDEXED); + parametersToTypes.put("query", ScriptService.ScriptType.INLINE); + parametersToTypes.put("file", ScriptService.ScriptType.FILE); + parametersToTypes.put("id", ScriptService.ScriptType.INDEXED); } @Inject @@ -78,15 +76,14 @@ public class TemplateQueryParser implements QueryParser { public Query parse(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); TemplateContext templateContext = parse(parser, PARAMS, parametersToTypes); - ExecutableScript executable = this.scriptService.executable("mustache", templateContext.template(), templateContext.scriptType(), templateContext.params()); + ExecutableScript executable = this.scriptService.executable(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), templateContext.params()); BytesReference querySource = (BytesReference) executable.run(); try (XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource)) { final QueryParseContext context = new QueryParseContext(parseContext.index(), parseContext.indexQueryParserService()); context.reset(qSourceParser); - Query result = context.parseInnerQuery(); - return result; + return context.parseInnerQuery(); } } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java index d6d2de0fc77..f09dc8774c4 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java @@ -56,7 +56,6 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser { public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException { ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); String script = null; - String scriptLang = null; Map vars = null; ScriptService.ScriptType scriptType = null; String currentFieldName = null; @@ -82,15 +81,13 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser { script = scriptValue.script(); scriptType = scriptValue.scriptType(); } - scriptLang = scriptParameterParser.lang(); - if (script == null) { throw new QueryParsingException(parseContext.index(), NAMES[0] + " requires 'script' field"); } SearchScript searchScript; try { - searchScript = parseContext.scriptService().search(parseContext.lookup(), scriptLang, script, scriptType, vars); + searchScript = parseContext.scriptService().search(parseContext.lookup(), scriptParameterParser.lang(), script, scriptType, vars); return new ScriptScoreFunction(script, vars, searchScript); } catch (Exception e) { throw new QueryParsingException(parseContext.index(), NAMES[0] + " the script could not be loaded", e); diff --git a/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java b/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java index 76bfa4c906c..07142e5e1e6 100644 --- a/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java +++ b/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java @@ -60,6 +60,7 @@ public class NestedInnerQueryParseSupport { protected DocumentMapper childDocumentMapper; protected ObjectMapper nestedObjectMapper; + private ObjectMapper parentObjectMapper; public NestedInnerQueryParseSupport(XContentParser parser, SearchContext searchContext) { parseContext = searchContext.queryParserService().getParseContext(); @@ -187,6 +188,10 @@ public class NestedInnerQueryParseSupport { return filterFound; } + public ObjectMapper getParentObjectMapper() { + return parentObjectMapper; + } + private void setPathLevel() { ObjectMapper objectMapper = parseContext.nestedScope().getObjectMapper(); if (objectMapper == null) { @@ -195,7 +200,7 @@ public class NestedInnerQueryParseSupport { parentFilter = parseContext.bitsetFilter(objectMapper.nestedTypeFilter()); } childFilter = parseContext.bitsetFilter(nestedObjectMapper.nestedTypeFilter()); - parseContext.nestedScope().nextLevel(nestedObjectMapper); + parentObjectMapper = parseContext.nestedScope().nextLevel(nestedObjectMapper); } private void resetPathLevel() { diff --git a/src/main/java/org/elasticsearch/index/query/support/NestedScope.java b/src/main/java/org/elasticsearch/index/query/support/NestedScope.java index e06cb7b5050..8a7383d4cc5 100644 --- a/src/main/java/org/elasticsearch/index/query/support/NestedScope.java +++ b/src/main/java/org/elasticsearch/index/query/support/NestedScope.java @@ -39,17 +39,19 @@ public final class NestedScope { } /** - * Sets the new current nested level and moves old current nested level down + * Sets the new current nested level and pushes old current nested level down the stack returns that level. */ - public void nextLevel(ObjectMapper level) { + public ObjectMapper nextLevel(ObjectMapper level) { + ObjectMapper previous = levelStack.peek(); levelStack.push(level); + return previous; } /** - * Sets the previous nested level as current nested level and removes the current nested level. + * Sets the previous nested level as current nested level and removes and returns the current nested level. */ - public void previousLevel() { - ObjectMapper level = levelStack.pop(); + public ObjectMapper previousLevel() { + return levelStack.pop(); } } diff --git a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java index 26d7e00da2c..91f8ac4f3bf 100644 --- a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java +++ b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java @@ -39,9 +39,9 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.support.AbstractIndexStore; import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.index.translog.fs.FsTranslog; +import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.ttl.IndicesTTLService; -import org.elasticsearch.indices.IndicesWarmer; /** */ @@ -85,9 +85,8 @@ public class IndexDynamicSettingsModule extends AbstractModule { indexDynamicSettings.addDynamicSetting(LogDocMergePolicyProvider.INDEX_COMPOUND_FORMAT); indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_COMPOUND_ON_FLUSH, Validator.BOOLEAN); indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_GC_DELETES_SETTING, Validator.TIME); - indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, Validator.BOOLEAN); - indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, Validator.BOOLEAN); indexDynamicSettings.addDynamicSetting(IndexShard.INDEX_FLUSH_ON_CLOSE, Validator.BOOLEAN); + indexDynamicSettings.addDynamicSetting(EngineConfig.INDEX_VERSION_MAP_SIZE, Validator.BYTES_SIZE_OR_PERCENTAGE); indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, Validator.TIME); indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, Validator.TIME); indexDynamicSettings.addDynamicSetting(ShardSlowLogIndexingService.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, Validator.TIME); diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index d8e9a332013..57e3f3df52d 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -46,7 +46,6 @@ import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; @@ -222,7 +221,7 @@ public class IndexShard extends AbstractIndexShardComponent { this.codecService = codecService; this.shardSuggestService = shardSuggestService; this.shardBitsetFilterCache = shardBitsetFilterCache; - assert clusterService.lifecycleState() == Lifecycle.State.STARTED; // otherwise localNode is still none; + assert clusterService.localNode() != null : "Local node is null lifecycle state is: " + clusterService.lifecycleState(); this.localNode = clusterService.localNode(); state = IndexShardState.CREATED; this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, EngineConfig.DEFAULT_REFRESH_INTERVAL); @@ -960,7 +959,8 @@ public class IndexShard extends AbstractIndexShardComponent { public void updateBufferSize(ByteSizeValue shardIndexingBufferSize, ByteSizeValue shardTranslogBufferSize) { ByteSizeValue preValue = config.getIndexingBufferSize(); config.setIndexingBufferSize(shardIndexingBufferSize); - if (preValue.bytes() != shardIndexingBufferSize.bytes()) { + // update engine if it is already started. + if (preValue.bytes() != shardIndexingBufferSize.bytes() && engineUnsafe() != null) { // its inactive, make sure we do a refresh / full IW flush in this case, since the memory // changes only after a "data" change has happened to the writer // the index writer lazily allocates memory and a refresh will clean it all up. @@ -1029,18 +1029,9 @@ public class IndexShard extends AbstractIndexShardComponent { config.setCompoundOnFlush(compoundOnFlush); change = true; } - - final boolean failEngineOnCorruption = settings.getAsBoolean(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, config.isFailEngineOnCorruption()); - if (failEngineOnCorruption != config.isFailEngineOnCorruption()) { - logger.info("updating {} from [{}] to [{}]", EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, config.isFailEngineOnCorruption(), failEngineOnCorruption); - config.setFailEngineOnCorruption(failEngineOnCorruption); - change = true; - } - final boolean failOnMergeFailure = settings.getAsBoolean(EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, config.isFailOnMergeFailure()); - if (failOnMergeFailure != config.isFailOnMergeFailure()) { - logger.info("updating {} from [{}] to [{}]", EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, config.isFailOnMergeFailure(), failOnMergeFailure); - config.setFailOnMergeFailure(failOnMergeFailure); - change = true; + final String versionMapSize = settings.get(EngineConfig.INDEX_VERSION_MAP_SIZE, config.getVersionMapSizeSetting()); + if (config.getVersionMapSizeSetting().equals(versionMapSize) == false) { + config.setVersionMapSizeSetting(versionMapSize); } } if (change) { @@ -1175,13 +1166,17 @@ public class IndexShard extends AbstractIndexShardComponent { } public Engine engine() { - Engine engine = this.currentEngineReference.get(); + Engine engine = engineUnsafe(); if (engine == null) { throw new EngineClosedException(shardId); } return engine; } + protected Engine engineUnsafe() { + return this.currentEngineReference.get(); + } + class ShardEngineFailListener implements Engine.FailedEngineListener { private final CopyOnWriteArrayList delegates = new CopyOnWriteArrayList<>(); diff --git a/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java index d5ac11a704e..ce0ee16f932 100644 --- a/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java +++ b/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotAndRestoreService.java @@ -115,6 +115,8 @@ public class IndexShardSnapshotAndRestoreService extends AbstractIndexShardCompo logger.trace("[{}] restoring shard [{}]", restoreSource.snapshotId(), shardId); } try { + recoveryState.getTranslog().totalOperations(0); + recoveryState.getTranslog().totalOperationsOnStart(0); indexShard.prepareForIndexRecovery(); IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository()); ShardId snapshotShardId = shardId; diff --git a/src/main/java/org/elasticsearch/index/translog/TranslogStats.java b/src/main/java/org/elasticsearch/index/translog/TranslogStats.java index ea917ea4331..d0160ba18d0 100644 --- a/src/main/java/org/elasticsearch/index/translog/TranslogStats.java +++ b/src/main/java/org/elasticsearch/index/translog/TranslogStats.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.translog; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; @@ -35,9 +36,12 @@ public class TranslogStats implements ToXContent, Streamable { private long translogSizeInBytes = 0; private int estimatedNumberOfOperations = 0; - public TranslogStats() {} + public TranslogStats() { + } public TranslogStats(int estimatedNumberOfOperations, long translogSizeInBytes) { + assert estimatedNumberOfOperations >= 0 : "estimatedNumberOfOperations must be >=0, got [" + estimatedNumberOfOperations + "]"; + assert translogSizeInBytes >= 0 : "translogSizeInBytes must be >=0, got [" + translogSizeInBytes + "]"; this.estimatedNumberOfOperations = estimatedNumberOfOperations; this.translogSizeInBytes = translogSizeInBytes; } @@ -48,7 +52,15 @@ public class TranslogStats implements ToXContent, Streamable { } this.estimatedNumberOfOperations += translogStats.estimatedNumberOfOperations; - this.translogSizeInBytes =+ translogStats.translogSizeInBytes; + this.translogSizeInBytes = +translogStats.translogSizeInBytes; + } + + public ByteSizeValue translogSizeInBytes() { + return new ByteSizeValue(translogSizeInBytes); + } + + public long estimatedNumberOfOperations() { + return estimatedNumberOfOperations; } @Override @@ -70,10 +82,12 @@ public class TranslogStats implements ToXContent, Streamable { @Override public void readFrom(StreamInput in) throws IOException { estimatedNumberOfOperations = in.readVInt(); + translogSizeInBytes = in.readVLong(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(estimatedNumberOfOperations); + out.writeVLong(translogSizeInBytes); } } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java index 5346994bbdd..bb6b8ba537f 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java @@ -439,7 +439,12 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog @Override public TranslogStats stats() { - return new TranslogStats(estimatedNumberOfOperations(), translogSizeInBytes()); + FsTranslogFile current = this.current; + if (current == null) { + return new TranslogStats(0, 0); + } + + return new TranslogStats(current.estimatedNumberOfOperations(), current.translogSizeInBytes()); } @Override diff --git a/src/main/java/org/elasticsearch/indices/IndicesService.java b/src/main/java/org/elasticsearch/indices/IndicesService.java index ca19c142d95..551e738a54a 100644 --- a/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -118,7 +118,6 @@ public class IndicesService extends AbstractLifecycleComponent i private final PluginsService pluginsService; private final NodeEnvironment nodeEnv; - private final ClusterService clusterService; private volatile Map> indices = ImmutableMap.of(); private final Map> pendingDeletes = new HashMap<>(); @@ -126,10 +125,9 @@ public class IndicesService extends AbstractLifecycleComponent i private final OldShardsStats oldShardsStats = new OldShardsStats(); @Inject - public IndicesService(Settings settings, IndicesLifecycle indicesLifecycle, IndicesAnalysisService indicesAnalysisService, Injector injector, NodeEnvironment nodeEnv, ClusterService clusterService) { + public IndicesService(Settings settings, IndicesLifecycle indicesLifecycle, IndicesAnalysisService indicesAnalysisService, Injector injector, NodeEnvironment nodeEnv) { super(settings); this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle; - this.clusterService = clusterService; this.indicesAnalysisService = indicesAnalysisService; this.injector = injector; this.pluginsService = injector.getInstance(PluginsService.class); @@ -447,16 +445,15 @@ public class IndicesService extends AbstractLifecycleComponent i removeIndex(index, reason, true); } - public void deleteClosedIndex(String reason, IndexMetaData metaData) { + public void deleteClosedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) { if (nodeEnv.hasNodeFile()) { String indexName = metaData.getIndex(); try { - ClusterState clusterState = clusterService.state(); if (clusterState.metaData().hasIndex(indexName)) { final IndexMetaData index = clusterState.metaData().index(indexName); throw new ElasticsearchIllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); } - deleteIndexStore(reason, metaData); + deleteIndexStore(reason, metaData, clusterState); } catch (IOException e) { logger.warn("[{}] failed to delete closed index", e, metaData.index()); } @@ -467,16 +464,17 @@ public class IndicesService extends AbstractLifecycleComponent i * Deletes the index store trying to acquire all shards locks for this index. * This method will delete the metadata for the index even if the actual shards can't be locked. */ - public void deleteIndexStore(String reason, IndexMetaData metaData) throws IOException { + public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState) throws IOException { if (nodeEnv.hasNodeFile()) { synchronized (this) { String indexName = metaData.index(); - if (indices.containsKey(metaData.index())) { - String localUUid = indices.get(metaData.index()).v1().indexUUID(); - throw new ElasticsearchIllegalStateException("Can't delete index store for [" + metaData.getIndex() + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]"); + if (indices.containsKey(indexName)) { + String localUUid = indices.get(indexName).v1().indexUUID(); + throw new ElasticsearchIllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]"); } - ClusterState clusterState = clusterService.state(); - if (clusterState.metaData().hasIndex(indexName)) { + if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) { + // we do not delete the store if it is a master eligible node and the index is still in the cluster state + // because we want to keep the meta data for indices around even if no shards are left here final IndexMetaData index = clusterState.metaData().index(indexName); throw new ElasticsearchIllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); } diff --git a/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 3ca1ab55905..61f9d381f74 100644 --- a/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -244,7 +244,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent CAN_UPDATE_INDEX_BUFFER_STATES = EnumSet.of(IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); + private static final EnumSet CAN_UPDATE_INDEX_BUFFER_STATES = EnumSet.of( + IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); @Inject public IndexingMemoryController(Settings settings, ThreadPool threadPool, IndicesService indicesService) { diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java index 819ae984ae9..0ff00d7c008 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryCleanFilesRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.recovery; -import com.google.common.collect.Sets; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -27,7 +26,6 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -import java.util.Set; /** * @@ -37,15 +35,17 @@ class RecoveryCleanFilesRequest extends TransportRequest { private long recoveryId; private ShardId shardId; - private Store.MetadataSnapshot snapshotFiles; + private Store.MetadataSnapshot snapshotFiles; + private int totalTranslogOps = RecoveryState.Translog.UNKNOWN; RecoveryCleanFilesRequest() { } - RecoveryCleanFilesRequest(long recoveryId, ShardId shardId, Store.MetadataSnapshot snapshotFiles) { + RecoveryCleanFilesRequest(long recoveryId, ShardId shardId, Store.MetadataSnapshot snapshotFiles, int totalTranslogOps) { this.recoveryId = recoveryId; this.shardId = shardId; this.snapshotFiles = snapshotFiles; + this.totalTranslogOps = totalTranslogOps; } public long recoveryId() { @@ -62,6 +62,7 @@ class RecoveryCleanFilesRequest extends TransportRequest { recoveryId = in.readLong(); shardId = ShardId.readShardId(in); snapshotFiles = Store.MetadataSnapshot.read(in); + totalTranslogOps = in.readVInt(); } @Override @@ -70,9 +71,14 @@ class RecoveryCleanFilesRequest extends TransportRequest { out.writeLong(recoveryId); shardId.writeTo(out); snapshotFiles.writeTo(out); + out.writeVInt(totalTranslogOps); } public Store.MetadataSnapshot sourceMetaSnapshot() { return snapshotFiles; } + + public int totalTranslogOps() { + return totalTranslogOps; + } } diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java index d27f94d9a93..5d06772069b 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java @@ -42,16 +42,20 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi private BytesReference content; private StoreFileMetaData metaData; + private int totalTranslogOps; + RecoveryFileChunkRequest() { } - public RecoveryFileChunkRequest(long recoveryId, ShardId shardId, StoreFileMetaData metaData, long position, BytesReference content, boolean lastChunk) { + public RecoveryFileChunkRequest(long recoveryId, ShardId shardId, StoreFileMetaData metaData, long position, BytesReference content, + boolean lastChunk, int totalTranslogOps) { this.recoveryId = recoveryId; this.shardId = shardId; this.metaData = metaData; this.position = position; this.content = content; this.lastChunk = lastChunk; + this.totalTranslogOps = totalTranslogOps; } public long recoveryId() { @@ -83,6 +87,10 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi return content; } + public int totalTranslogOps() { + return totalTranslogOps; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -98,6 +106,7 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi writtenBy = Lucene.parseVersionLenient(versionString, null); metaData = new StoreFileMetaData(name, length, checksum, writtenBy); lastChunk = in.readBoolean(); + totalTranslogOps = in.readVInt(); } @Override @@ -112,6 +121,7 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi out.writeBytesReference(content); out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString()); out.writeBoolean(lastChunk); + out.writeVInt(totalTranslogOps); } @Override diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java index 2d9fb2b0653..d28ae270f9e 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryFilesInfoRequest.java @@ -41,17 +41,20 @@ class RecoveryFilesInfoRequest extends TransportRequest { List phase1ExistingFileNames; List phase1ExistingFileSizes; + int totalTranslogOps; + RecoveryFilesInfoRequest() { } RecoveryFilesInfoRequest(long recoveryId, ShardId shardId, List phase1FileNames, List phase1FileSizes, - List phase1ExistingFileNames, List phase1ExistingFileSizes) { + List phase1ExistingFileNames, List phase1ExistingFileSizes, int totalTranslogOps) { this.recoveryId = recoveryId; this.shardId = shardId; this.phase1FileNames = phase1FileNames; this.phase1FileSizes = phase1FileSizes; this.phase1ExistingFileNames = phase1ExistingFileNames; this.phase1ExistingFileSizes = phase1ExistingFileSizes; + this.totalTranslogOps = totalTranslogOps; } public long recoveryId() { @@ -90,6 +93,7 @@ class RecoveryFilesInfoRequest extends TransportRequest { for (int i = 0; i < size; i++) { phase1ExistingFileSizes.add(in.readVLong()); } + totalTranslogOps = in.readVInt(); } @Override @@ -117,5 +121,6 @@ class RecoveryFilesInfoRequest extends TransportRequest { for (Long phase1ExistingFileSize : phase1ExistingFileSizes) { out.writeVLong(phase1ExistingFileSize); } + out.writeVInt(totalTranslogOps); } } diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index e5a131843e9..dbc4a1503c1 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -33,13 +33,15 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { private long recoveryId; private ShardId shardId; + private int totalTranslogOps = RecoveryState.Translog.UNKNOWN; RecoveryPrepareForTranslogOperationsRequest() { } - RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId) { + RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps) { this.recoveryId = recoveryId; this.shardId = shardId; + this.totalTranslogOps = totalTranslogOps; } public long recoveryId() { @@ -50,11 +52,16 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { return shardId; } + public int totalTranslogOps() { + return totalTranslogOps; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); recoveryId = in.readLong(); shardId = ShardId.readShardId(in); + totalTranslogOps = in.readVInt(); } @Override @@ -62,5 +69,6 @@ class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest { super.writeTo(out); out.writeLong(recoveryId); shardId.writeTo(out); + out.writeVInt(totalTranslogOps); } } diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 3f6702aa036..39b8c49f8f3 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -199,7 +199,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { @Override public void run() throws InterruptedException { RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(request.recoveryId(), request.shardId(), - response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes); + response.phase1FileNames, response.phase1FileSizes, response.phase1ExistingFileNames, response.phase1ExistingFileSizes, + shard.translog().estimatedNumberOfOperations()); transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); @@ -288,7 +289,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { public void run() throws InterruptedException { // Actually send the file chunk to the target node, waiting for it to complete transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FILE_CHUNK, - new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content, lastChunk), + new RecoveryFileChunkRequest(request.recoveryId(), request.shardId(), md, position, content, + lastChunk, shard.translog().estimatedNumberOfOperations()), requestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } }); @@ -350,7 +352,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { // are deleted try { transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.CLEAN_FILES, - new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata), + new RecoveryCleanFilesRequest(request.recoveryId(), shard.shardId(), recoverySourceMetadata, shard.translog().estimatedNumberOfOperations()), TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } catch (RemoteTransportException remoteException) { @@ -427,7 +429,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { // operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, - new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId()), + new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), shard.translog().estimatedNumberOfOperations()), TransportRequestOptions.options().withTimeout(recoverySettings.internalActionTimeout()), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } }); @@ -435,7 +437,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { stopWatch.stop(); response.startTime = stopWatch.totalTime().millis(); logger.trace("{} recovery [phase2] to {}: start took [{}]", - request.shardId(), request.targetNode(), request.targetNode(), stopWatch.totalTime()); + request.shardId(), request.targetNode(), stopWatch.totalTime()); logger.trace("{} recovery [phase2] to {}: updating current mapping to master", request.shardId(), request.targetNode()); @@ -616,7 +618,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { - final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations); + final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest( + request.recoveryId(), request.shardId(), operations, shard.translog().estimatedNumberOfOperations()); transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } @@ -633,7 +636,8 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { - RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations); + RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest( + request.recoveryId(), request.shardId(), operations, shard.translog().estimatedNumberOfOperations()); transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 4e6a747530a..36289d9ef53 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -39,7 +39,6 @@ import java.io.IOException; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; /** * Keeps track of state related to shard recovery. @@ -359,6 +358,7 @@ public class RecoveryState implements ToXContent, Streamable { static final XContentBuilderString TARGET = new XContentBuilderString("target"); static final XContentBuilderString INDEX = new XContentBuilderString("index"); static final XContentBuilderString TRANSLOG = new XContentBuilderString("translog"); + static final XContentBuilderString TOTAL_ON_START = new XContentBuilderString("total_on_start"); static final XContentBuilderString START = new XContentBuilderString("start"); static final XContentBuilderString RECOVERED = new XContentBuilderString("recovered"); static final XContentBuilderString RECOVERED_IN_BYTES = new XContentBuilderString("recovered_in_bytes"); @@ -473,40 +473,90 @@ public class RecoveryState implements ToXContent, Streamable { } public static class Translog extends Timer implements ToXContent, Streamable { - private final AtomicInteger currentTranslogOperations = new AtomicInteger(); + public static final int UNKNOWN = -1; - public void reset() { + private int recovered; + private int total = UNKNOWN; + private int totalOnStart = UNKNOWN; + + public synchronized void reset() { super.reset(); - currentTranslogOperations.set(0); + recovered = 0; + total = UNKNOWN; + totalOnStart = UNKNOWN; } - public void addTranslogOperations(int count) { - this.currentTranslogOperations.addAndGet(count); + public synchronized void incrementRecoveredOperations() { + recovered++; + assert total == UNKNOWN || total >= recovered : "total, if known, should be > recovered. total [" + total + "], recovered [" + recovered + "]"; } - public void incrementTranslogOperations() { - this.currentTranslogOperations.incrementAndGet(); + /** returns the total number of translog operations recovered so far */ + public synchronized int recoveredOperations() { + return recovered; } - public int currentTranslogOperations() { - return this.currentTranslogOperations.get(); + /** + * returns the total number of translog operations needed to be recovered at this moment. + * Note that this can change as the number of operations grows during recovery. + *

+ * A value of -1 ({@link RecoveryState.Translog#UNKNOWN} is return if this is unknown (typically a gateway recovery) + */ + public synchronized int totalOperations() { + return total; + } + + public synchronized void totalOperations(int total) { + this.total = total; + assert total == UNKNOWN || total >= recovered : "total, if known, should be > recovered. total [" + total + "], recovered [" + recovered + "]"; + } + + /** + * returns the total number of translog operations to recovered, on the start of the recovery. Unlike {@link #totalOperations} + * this does change during recovery. + *

+ * A value of -1 ({@link RecoveryState.Translog#UNKNOWN} is return if this is unknown (typically a gateway recovery) + */ + public synchronized int totalOperationsOnStart() { + return this.totalOnStart; + } + + public synchronized void totalOperationsOnStart(int total) { + this.totalOnStart = total; + } + + public synchronized float recoveredPercent() { + if (total == UNKNOWN) { + return -1.f; + } + if (total == 0) { + return 100.f; + } + return recovered * 100.0f / total; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - currentTranslogOperations.set(in.readVInt()); + recovered = in.readVInt(); + total = in.readVInt(); + totalOnStart = in.readVInt(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeVInt(currentTranslogOperations.get()); + out.writeVInt(recovered); + out.writeVInt(total); + out.writeVInt(totalOnStart); } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.RECOVERED, currentTranslogOperations.get()); + public synchronized XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(Fields.RECOVERED, recovered); + builder.field(Fields.TOTAL, total); + builder.field(Fields.PERCENT, String.format(Locale.ROOT, "%1.1f%%", recoveredPercent())); + builder.field(Fields.TOTAL_ON_START, totalOnStart); builder.timeValueField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, time()); return builder; } diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 3b72a3059d2..437560c16d8 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -277,6 +277,7 @@ public class RecoveryTarget extends AbstractComponent { public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { final RecoveryStatus recoveryStatus = statusRef.status(); + recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps()); recoveryStatus.indexShard().prepareForTranslogRecovery(); } channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -322,9 +323,11 @@ public class RecoveryTarget extends AbstractComponent { public void messageReceived(RecoveryTranslogOperationsRequest request, TransportChannel channel) throws Exception { try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { final RecoveryStatus recoveryStatus = statusRef.status(); + final RecoveryState.Translog translog = recoveryStatus.state().getTranslog(); + translog.totalOperations(request.totalTranslogOps()); for (Translog.Operation operation : request.operations()) { recoveryStatus.indexShard().performRecoveryOperation(operation); - recoveryStatus.state().getTranslog().incrementTranslogOperations(); + translog.incrementRecoveredOperations(); } } channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -355,6 +358,8 @@ public class RecoveryTarget extends AbstractComponent { for (int i = 0; i < request.phase1FileNames.size(); i++) { index.addFileDetail(request.phase1FileNames.get(i), request.phase1FileSizes.get(i), false); } + recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps); + recoveryStatus.state().getTranslog().totalOperationsOnStart(request.totalTranslogOps); // recoveryBytesCount / recoveryFileCount will be set as we go... channel.sendResponse(TransportResponse.Empty.INSTANCE); } @@ -377,6 +382,7 @@ public class RecoveryTarget extends AbstractComponent { public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception { try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { final RecoveryStatus recoveryStatus = statusRef.status(); + recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps()); // first, we go and move files that were created with the recovery id suffix to // the actual names, its ok if we have a corrupted index here, since we have replicas // to recover from in case of a full cluster shutdown just when this code executes... @@ -425,6 +431,7 @@ public class RecoveryTarget extends AbstractComponent { try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { final RecoveryStatus recoveryStatus = statusRef.status(); final Store store = recoveryStatus.store(); + recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps()); IndexOutput indexOutput; if (request.position() == 0) { indexOutput = recoveryStatus.openAndPutIndexOutput(request.name(), request.metadata(), store); diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java index f5729fce9db..30c693d827e 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -20,12 +20,11 @@ package org.elasticsearch.indices.recovery; import com.google.common.collect.Lists; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.TranslogStreams; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogStreams; import org.elasticsearch.transport.TransportRequest; import java.io.IOException; @@ -39,14 +38,16 @@ class RecoveryTranslogOperationsRequest extends TransportRequest { private long recoveryId; private ShardId shardId; private List operations; + private int totalTranslogOps = RecoveryState.Translog.UNKNOWN; RecoveryTranslogOperationsRequest() { } - RecoveryTranslogOperationsRequest(long recoveryId, ShardId shardId, List operations) { + RecoveryTranslogOperationsRequest(long recoveryId, ShardId shardId, List operations, int totalTranslogOps) { this.recoveryId = recoveryId; this.shardId = shardId; this.operations = operations; + this.totalTranslogOps = totalTranslogOps; } public long recoveryId() { @@ -61,6 +62,10 @@ class RecoveryTranslogOperationsRequest extends TransportRequest { return operations; } + public int totalTranslogOps() { + return totalTranslogOps; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -71,6 +76,7 @@ class RecoveryTranslogOperationsRequest extends TransportRequest { for (int i = 0; i < size; i++) { operations.add(TranslogStreams.CHECKSUMMED_TRANSLOG_STREAM.read(in)); } + totalTranslogOps = in.readVInt(); } @Override @@ -82,5 +88,6 @@ class RecoveryTranslogOperationsRequest extends TransportRequest { for (Translog.Operation operation : operations) { TranslogStreams.CHECKSUMMED_TRANSLOG_STREAM.write(out, operation); } + out.writeVInt(totalTranslogOps); } } diff --git a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 4cdebbfbb90..a42dc1758ce 100644 --- a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -296,9 +296,18 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe IndexMetaData indexMeta = clusterState.getMetaData().indices().get(shardId.getIndex()); try { indicesService.deleteShardStore("no longer used", shardId, indexMeta); - } catch (Exception ex) { + } catch (Throwable ex) { logger.debug("{} failed to delete unallocated shard, ignoring", ex, shardId); } + // if the index doesn't exists anymore, delete its store as well, but only if its a non master node, since master + // nodes keep the index metadata around + if (indicesService.hasIndex(shardId.getIndex()) == false && currentState.nodes().localNode().masterNode() == false) { + try { + indicesService.deleteIndexStore("no longer used", indexMeta, currentState); + } catch (Throwable ex) { + logger.debug("{} failed to delete unallocated index, ignoring", ex, shardId.getIndex()); + } + } return currentState; } diff --git a/src/main/java/org/elasticsearch/node/Node.java b/src/main/java/org/elasticsearch/node/Node.java index b407eb5039c..17b6ab1503a 100644 --- a/src/main/java/org/elasticsearch/node/Node.java +++ b/src/main/java/org/elasticsearch/node/Node.java @@ -380,7 +380,11 @@ public class Node implements Releasable { } stopWatch.stop().start("script"); - injector.getInstance(ScriptService.class).close(); + try { + injector.getInstance(ScriptService.class).close(); + } catch(IOException e) { + logger.warn("ScriptService close failed", e); + } stopWatch.stop().start("thread_pool"); // TODO this should really use ThreadPool.terminate() diff --git a/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/src/main/java/org/elasticsearch/percolator/PercolateContext.java index 471e58c13e1..da239114c7e 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -90,6 +90,7 @@ public class PercolateContext extends SearchContext { private final ScriptService scriptService; private final ConcurrentMap percolateQueries; private final int numberOfShards; + private final Filter aliasFilter; private String[] types; private Engine.Searcher docSearcher; @@ -109,7 +110,7 @@ public class PercolateContext extends SearchContext { public PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, IndexShard indexShard, IndexService indexService, PageCacheRecycler pageCacheRecycler, - BigArrays bigArrays, ScriptService scriptService) { + BigArrays bigArrays, ScriptService scriptService, Filter aliasFilter) { this.indexShard = indexShard; this.indexService = indexService; this.fieldDataService = indexService.fieldData(); @@ -123,6 +124,7 @@ public class PercolateContext extends SearchContext { this.searcher = new ContextIndexSearcher(this, engineSearcher); this.scriptService = scriptService; this.numberOfShards = request.getNumberOfShards(); + this.aliasFilter = aliasFilter; } public IndexSearcher docSearcher() { @@ -277,7 +279,7 @@ public class PercolateContext extends SearchContext { @Override public Filter searchFilter(String[] types) { - throw new UnsupportedOperationException(); + return aliasFilter(); } @Override @@ -509,7 +511,7 @@ public class PercolateContext extends SearchContext { @Override public Filter aliasFilter() { - throw new UnsupportedOperationException(); + return aliasFilter; } @Override diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index ac29b5d754f..f19b3b076e7 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -23,13 +23,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.memory.ExtendedMemoryIndex; import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilteredQuery; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.*; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; @@ -48,6 +42,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.XBooleanFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.BytesText; import org.elasticsearch.common.text.StringText; @@ -63,22 +58,14 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.internal.IdFieldMapper; +import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.percolator.QueryCollector.Count; -import org.elasticsearch.percolator.QueryCollector.Match; -import org.elasticsearch.percolator.QueryCollector.MatchAndScore; -import org.elasticsearch.percolator.QueryCollector.MatchAndSort; +import org.elasticsearch.percolator.QueryCollector.*; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchShardTarget; @@ -96,9 +83,7 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.index.mapper.SourceToParse.source; -import static org.elasticsearch.percolator.QueryCollector.count; -import static org.elasticsearch.percolator.QueryCollector.match; -import static org.elasticsearch.percolator.QueryCollector.matchAndScore; +import static org.elasticsearch.percolator.QueryCollector.*; public class PercolatorService extends AbstractComponent { @@ -174,9 +159,15 @@ public class PercolatorService extends AbstractComponent { shardPercolateService.prePercolate(); long startTime = System.nanoTime(); + String[] filteringAliases = clusterService.state().getMetaData().filteringAliases( + indexShard.shardId().index().name(), + request.indices() + ); + Filter aliasFilter = percolateIndexService.aliasesService().aliasFilter(filteringAliases); + SearchShardTarget searchShardTarget = new SearchShardTarget(clusterService.localNode().id(), request.shardId().getIndex(), request.shardId().id()); final PercolateContext context = new PercolateContext( - request, searchShardTarget, indexShard, percolateIndexService, pageCacheRecycler, bigArrays, scriptService + request, searchShardTarget, indexShard, percolateIndexService, pageCacheRecycler, bigArrays, scriptService, aliasFilter ); try { ParsedDocument parsedDocument = parseRequest(percolateIndexService, request, context); @@ -190,7 +181,7 @@ public class PercolatorService extends AbstractComponent { throw new ElasticsearchIllegalArgumentException("Nothing to percolate"); } - if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.aggregations() != null)) { + if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.aggregations() != null) || context.aliasFilter() != null) { context.percolateQuery(new MatchAllDocsQuery()); } @@ -779,8 +770,19 @@ public class PercolatorService extends AbstractComponent { private void queryBasedPercolating(Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException { Filter percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter(); - percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter, null, context.indexService().queryParserService().autoFilterCachePolicy()); - FilteredQuery query = new FilteredQuery(context.percolateQuery(), percolatorTypeFilter); + percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter, null, context.queryParserService().autoFilterCachePolicy()); + + final Filter filter; + if (context.aliasFilter() != null) { + XBooleanFilter booleanFilter = new XBooleanFilter(); + booleanFilter.add(context.aliasFilter(), BooleanClause.Occur.MUST); + booleanFilter.add(percolatorTypeFilter, BooleanClause.Occur.MUST); + filter = booleanFilter; + } else { + filter = percolatorTypeFilter; + } + + FilteredQuery query = new FilteredQuery(context.percolateQuery(), filter); percolatorSearcher.searcher().search(query, percolateCollector); percolateCollector.aggregatorCollector.postCollection(); if (context.aggregations() != null) { diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index daa193bd954..b712fd945fb 100644 --- a/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -35,10 +35,32 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.index.cache.filter.FilterCacheStats; +import org.elasticsearch.index.cache.id.IdCacheStats; +import org.elasticsearch.index.cache.query.QueryCacheStats; +import org.elasticsearch.index.engine.SegmentsStats; +import org.elasticsearch.index.fielddata.FieldDataStats; +import org.elasticsearch.index.flush.FlushStats; +import org.elasticsearch.index.get.GetStats; +import org.elasticsearch.index.indexing.IndexingStats; +import org.elasticsearch.index.merge.MergeStats; +import org.elasticsearch.index.percolator.stats.PercolateStats; +import org.elasticsearch.index.refresh.RefreshStats; +import org.elasticsearch.index.search.stats.SearchStats; +import org.elasticsearch.index.suggest.stats.SuggestStats; +import org.elasticsearch.indices.NodeIndicesStats; +import org.elasticsearch.monitor.fs.FsStats; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.monitor.jvm.JvmStats; +import org.elasticsearch.monitor.os.OsInfo; +import org.elasticsearch.monitor.os.OsStats; +import org.elasticsearch.monitor.process.ProcessInfo; +import org.elasticsearch.monitor.process.ProcessStats; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestActionListener; import org.elasticsearch.rest.action.support.RestResponseListener; import org.elasticsearch.rest.action.support.RestTable; +import org.elasticsearch.search.suggest.completion.CompletionStats; import java.util.Locale; @@ -200,6 +222,16 @@ public class RestNodesAction extends AbstractCatAction { NodeInfo info = nodesInfo.getNodesMap().get(node.id()); NodeStats stats = nodesStats.getNodesMap().get(node.id()); + JvmInfo jvmInfo = info == null ? null : info.getJvm(); + OsInfo osInfo = info == null ? null : info.getOs(); + ProcessInfo processInfo = info == null ? null : info.getProcess(); + + JvmStats jvmStats = stats == null ? null : stats.getJvm(); + FsStats fsStats = stats == null ? null : stats.getFs(); + OsStats osStats = stats == null ? null : stats.getOs(); + ProcessStats processStats = stats == null ? null : stats.getProcess(); + NodeIndicesStats indicesStats = stats == null ? null : stats.getIndices(); + table.startRow(); table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4)); @@ -214,93 +246,107 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(node.getVersion().number()); table.addCell(info == null ? null : info.getBuild().hashShort()); - table.addCell(info == null ? null : info.getJvm().version()); - table.addCell(stats == null ? null : stats.getFs() == null ? null : stats.getFs().total().getAvailable()); - table.addCell(stats == null ? null : stats.getJvm().getMem().getHeapUsed()); - table.addCell(stats == null ? null : stats.getJvm().getMem().getHeapUsedPrecent()); - table.addCell(info == null ? null : info.getJvm().getMem().getHeapMax()); - table.addCell(stats == null ? null : stats.getOs().mem() == null ? null : stats.getOs().mem().used()); - table.addCell(stats == null ? null : stats.getOs().mem() == null ? null : stats.getOs().mem().usedPercent()); - table.addCell(info == null ? null : info.getOs().mem() == null ? null : info.getOs().mem().total()); // sigar fails to load in IntelliJ - table.addCell(stats == null ? null : stats.getProcess().getOpenFileDescriptors()); - table.addCell(stats == null || info == null ? null : - calculatePercentage(stats.getProcess().getOpenFileDescriptors(), info.getProcess().getMaxFileDescriptors())); - table.addCell(info == null ? null : info.getProcess().getMaxFileDescriptors()); + table.addCell(jvmInfo == null ? null : jvmInfo.version()); + table.addCell(fsStats == null ? null : fsStats.getTotal().getAvailable()); + table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsed()); + table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsedPrecent()); + table.addCell(jvmInfo == null ? null : jvmInfo.getMem().getHeapMax()); + table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().used()); + table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().usedPercent()); + table.addCell(osInfo == null ? null : osInfo.getMem() == null ? null : osInfo.getMem().total()); // sigar fails to load in IntelliJ + table.addCell(processStats == null ? null : processStats.getOpenFileDescriptors()); + table.addCell(processStats == null || processInfo == null ? null : + calculatePercentage(processStats.getOpenFileDescriptors(), processInfo.getMaxFileDescriptors())); + table.addCell(processInfo == null ? null : processInfo.getMaxFileDescriptors()); - table.addCell(stats == null ? null : stats.getOs() == null ? null : stats.getOs().getLoadAverage().length < 1 ? null : String.format(Locale.ROOT, "%.2f", stats.getOs().getLoadAverage()[0])); - table.addCell(stats == null ? null : stats.getJvm().uptime()); + table.addCell(osStats == null ? null : osStats.getLoadAverage().length < 1 ? null : String.format(Locale.ROOT, "%.2f", osStats.getLoadAverage()[0])); + table.addCell(jvmStats == null ? null : jvmStats.uptime()); table.addCell(node.clientNode() ? "c" : node.dataNode() ? "d" : "-"); table.addCell(masterId == null ? "x" : masterId.equals(node.id()) ? "*" : node.masterNode() ? "m" : "-"); table.addCell(node.name()); - table.addCell(stats == null ? null : stats.getIndices().getCompletion().getSize()); + CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion(); + table.addCell(completionStats == null ? null : completionStats.getSize()); - table.addCell(stats == null ? null : stats.getIndices().getFieldData().getMemorySize()); - table.addCell(stats == null ? null : stats.getIndices().getFieldData().getEvictions()); + FieldDataStats fdStats = indicesStats == null ? null : stats.getIndices().getFieldData(); + table.addCell(fdStats == null ? null : fdStats.getMemorySize()); + table.addCell(fdStats == null ? null : fdStats.getEvictions()); - table.addCell(stats == null ? null : stats.getIndices().getFilterCache().getMemorySize()); - table.addCell(stats == null ? null : stats.getIndices().getFilterCache().getEvictions()); + FilterCacheStats fcStats = indicesStats == null ? null : indicesStats.getFilterCache(); + table.addCell(fcStats == null ? null : fcStats.getMemorySize()); + table.addCell(fcStats == null ? null : fcStats.getEvictions()); - table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getMemorySize()); - table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getEvictions()); - table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getHitCount()); - table.addCell(stats == null ? null : stats.getIndices().getQueryCache().getMissCount()); + QueryCacheStats qcStats = indicesStats == null ? null : indicesStats.getQueryCache(); + table.addCell(qcStats == null ? null : qcStats.getMemorySize()); + table.addCell(qcStats == null ? null : qcStats.getEvictions()); + table.addCell(qcStats == null ? null : qcStats.getHitCount()); + table.addCell(qcStats == null ? null : qcStats.getMissCount()); - table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotal()); - table.addCell(stats == null ? null : stats.getIndices().getFlush().getTotalTime()); + FlushStats flushStats = indicesStats == null ? null : indicesStats.getFlush(); + table.addCell(flushStats == null ? null : flushStats.getTotal()); + table.addCell(flushStats == null ? null : flushStats.getTotalTime()); - table.addCell(stats == null ? null : stats.getIndices().getGet().current()); - table.addCell(stats == null ? null : stats.getIndices().getGet().getTime()); - table.addCell(stats == null ? null : stats.getIndices().getGet().getCount()); - table.addCell(stats == null ? null : stats.getIndices().getGet().getExistsTime()); - table.addCell(stats == null ? null : stats.getIndices().getGet().getExistsCount()); - table.addCell(stats == null ? null : stats.getIndices().getGet().getMissingTime()); - table.addCell(stats == null ? null : stats.getIndices().getGet().getMissingCount()); + GetStats getStats = indicesStats == null ? null : indicesStats.getGet(); + table.addCell(getStats == null ? null : getStats.current()); + table.addCell(getStats == null ? null : getStats.getTime()); + table.addCell(getStats == null ? null : getStats.getCount()); + table.addCell(getStats == null ? null : getStats.getExistsTime()); + table.addCell(getStats == null ? null : getStats.getExistsCount()); + table.addCell(getStats == null ? null : getStats.getMissingTime()); + table.addCell(getStats == null ? null : getStats.getMissingCount()); - table.addCell(stats == null ? null : stats.getIndices().getIdCache().getMemorySize()); + IdCacheStats idCacheStats = indicesStats == null ? null : indicesStats.getIdCache(); + table.addCell(idCacheStats == null ? null : idCacheStats.getMemorySize()); - table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteCurrent()); - table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteTime()); - table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getDeleteCount()); - table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexCurrent()); - table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexTime()); - table.addCell(stats == null ? null : stats.getIndices().getIndexing().getTotal().getIndexCount()); + IndexingStats indexingStats = indicesStats == null ? null : indicesStats.getIndexing(); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCurrent()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteTime()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCount()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCurrent()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexTime()); + table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCount()); - table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrent()); - table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrentNumDocs()); - table.addCell(stats == null ? null : stats.getIndices().getMerge().getCurrentSize()); - table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotal()); - table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalNumDocs()); - table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalSize()); - table.addCell(stats == null ? null : stats.getIndices().getMerge().getTotalTime()); + MergeStats mergeStats = indicesStats == null ? null : indicesStats.getMerge(); + table.addCell(mergeStats == null ? null : mergeStats.getCurrent()); + table.addCell(mergeStats == null ? null : mergeStats.getCurrentNumDocs()); + table.addCell(mergeStats == null ? null : mergeStats.getCurrentSize()); + table.addCell(mergeStats == null ? null : mergeStats.getTotal()); + table.addCell(mergeStats == null ? null : mergeStats.getTotalNumDocs()); + table.addCell(mergeStats == null ? null : mergeStats.getTotalSize()); + table.addCell(mergeStats == null ? null : mergeStats.getTotalTime()); - table.addCell(stats == null ? null : stats.getIndices().getPercolate().getCurrent()); - table.addCell(stats == null ? null : stats.getIndices().getPercolate().getMemorySize()); - table.addCell(stats == null ? null : stats.getIndices().getPercolate().getNumQueries()); - table.addCell(stats == null ? null : stats.getIndices().getPercolate().getTime()); - table.addCell(stats == null ? null : stats.getIndices().getPercolate().getCount()); + PercolateStats percolateStats = indicesStats == null ? null : indicesStats.getPercolate(); + table.addCell(percolateStats == null ? null : percolateStats.getCurrent()); + table.addCell(percolateStats == null ? null : percolateStats.getMemorySize()); + table.addCell(percolateStats == null ? null : percolateStats.getNumQueries()); + table.addCell(percolateStats == null ? null : percolateStats.getTime()); + table.addCell(percolateStats == null ? null : percolateStats.getCount()); - table.addCell(stats == null ? null : stats.getIndices().getRefresh().getTotal()); - table.addCell(stats == null ? null : stats.getIndices().getRefresh().getTotalTime()); + RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh(); + table.addCell(refreshStats == null ? null : refreshStats.getTotal()); + table.addCell(refreshStats == null ? null : refreshStats.getTotalTime()); - table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchCurrent()); - table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchTime()); - table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getFetchCount()); - table.addCell(stats == null ? null : stats.getIndices().getSearch().getOpenContexts()); - table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryCurrent()); - table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryTime()); - table.addCell(stats == null ? null : stats.getIndices().getSearch().getTotal().getQueryCount()); + SearchStats searchStats = indicesStats == null ? null : indicesStats.getSearch(); + table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCount()); + table.addCell(searchStats == null ? null : searchStats.getOpenContexts()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount()); - table.addCell(stats == null ? null : stats.getIndices().getSegments().getCount()); - table.addCell(stats == null ? null : stats.getIndices().getSegments().getMemory()); - table.addCell(stats == null ? null : stats.getIndices().getSegments().getIndexWriterMemory()); - table.addCell(stats == null ? null : stats.getIndices().getSegments().getIndexWriterMaxMemory()); - table.addCell(stats == null ? null : stats.getIndices().getSegments().getVersionMapMemory()); - table.addCell(stats == null ? null : stats.getIndices().getSegments().getBitsetMemory()); + SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments(); + table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); + table.addCell(segmentsStats == null ? null : segmentsStats.getMemory()); + table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMemory()); + table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMaxMemory()); + table.addCell(segmentsStats == null ? null : segmentsStats.getVersionMapMemory()); + table.addCell(segmentsStats == null ? null : segmentsStats.getBitsetMemory()); - table.addCell(stats == null ? null : stats.getIndices().getSuggest().getCurrent()); - table.addCell(stats == null ? null : stats.getIndices().getSuggest().getTime()); - table.addCell(stats == null ? null : stats.getIndices().getSuggest().getCount()); + SuggestStats suggestStats = indicesStats == null ? null : indicesStats.getSuggest(); + table.addCell(suggestStats == null ? null : suggestStats.getCurrent()); + table.addCell(suggestStats == null ? null : suggestStats.getTime()); + table.addCell(suggestStats == null ? null : suggestStats.getCount()); table.endRow(); } diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java index 87053d72f4a..a7a7eb53e85 100644 --- a/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java @@ -98,6 +98,9 @@ public class RestRecoveryAction extends AbstractCatAction { .addCell("bytes_percent", "alias:bp;desc:percent of bytes recovered") .addCell("total_files", "alias:tf;desc:total number of files") .addCell("total_bytes", "alias:tb;desc:total number of bytes") + .addCell("translog", "alias:tr;desc:translog operations recovered") + .addCell("translog_percent", "alias:trp;desc:percent of translog recovery") + .addCell("total_translog", "alias:trt;desc:current total translog operations") .endHeaders(); return t; } @@ -156,6 +159,9 @@ public class RestRecoveryAction extends AbstractCatAction { t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); t.addCell(state.getIndex().totalFileCount()); t.addCell(state.getIndex().totalBytes()); + t.addCell(state.getTranslog().recoveredOperations()); + t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getTranslog().recoveredPercent())); + t.addCell(state.getTranslog().totalOperations()); t.endRow(); } } diff --git a/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index d0bdba767bd..975416c70db 100644 --- a/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; @@ -58,7 +59,7 @@ public class RestShardsAction extends AbstractCatAction { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices); + clusterStateRequest.clear().nodes(true).metaData(true).routingTable(true).indices(indices); client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override public void processResponse(final ClusterStateResponse clusterStateResponse) { @@ -165,7 +166,21 @@ public class RestShardsAction extends AbstractCatAction { table.addCell(shard.index()); table.addCell(shard.id()); - table.addCell(shard.primary() ? "p" : "r"); + + IndexMetaData indexMeta = state.getState().getMetaData().index(shard.index()); + boolean usesShadowReplicas = false; + if (indexMeta != null) { + usesShadowReplicas = IndexMetaData.isIndexUsingShadowReplicas(indexMeta.settings()); + } + if (shard.primary()) { + table.addCell("p"); + } else { + if (usesShadowReplicas) { + table.addCell("s"); + } else { + table.addCell("r"); + } + } table.addCell(shard.state()); table.addCell(shardStats == null ? null : shardStats.getDocs().getCount()); table.addCell(shardStats == null ? null : shardStats.getStore().getSize()); diff --git a/src/main/java/org/elasticsearch/rest/action/template/RestDeleteSearchTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/template/RestDeleteSearchTemplateAction.java index ebb217187a3..9b205a8070f 100644 --- a/src/main/java/org/elasticsearch/rest/action/template/RestDeleteSearchTemplateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/template/RestDeleteSearchTemplateAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction; +import org.elasticsearch.script.mustache.MustacheScriptEngineService; import static org.elasticsearch.rest.RestRequest.Method.DELETE; @@ -37,6 +38,6 @@ public class RestDeleteSearchTemplateAction extends RestDeleteIndexedScriptActio @Override protected String getScriptLang(RestRequest request) { - return "mustache"; + return MustacheScriptEngineService.NAME; } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/rest/action/template/RestGetSearchTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/template/RestGetSearchTemplateAction.java index 4c505c13360..4ffca8a1744 100644 --- a/src/main/java/org/elasticsearch/rest/action/template/RestGetSearchTemplateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/template/RestGetSearchTemplateAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction; +import org.elasticsearch.script.mustache.MustacheScriptEngineService; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -40,7 +41,7 @@ public class RestGetSearchTemplateAction extends RestGetIndexedScriptAction { @Override protected String getScriptLang(RestRequest request) { - return "mustache"; + return MustacheScriptEngineService.NAME; } @Override diff --git a/src/main/java/org/elasticsearch/rest/action/template/RestPutSearchTemplateAction.java b/src/main/java/org/elasticsearch/rest/action/template/RestPutSearchTemplateAction.java index 304f8d117f8..a734ce37ca2 100644 --- a/src/main/java/org/elasticsearch/rest/action/template/RestPutSearchTemplateAction.java +++ b/src/main/java/org/elasticsearch/rest/action/template/RestPutSearchTemplateAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction; +import org.elasticsearch.script.mustache.MustacheScriptEngineService; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; @@ -58,6 +59,6 @@ public class RestPutSearchTemplateAction extends RestPutIndexedScriptAction { @Override protected String getScriptLang(RestRequest request) { - return "mustache"; + return MustacheScriptEngineService.NAME; } } diff --git a/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java index 9fe4e02a7db..a91825d3e38 100644 --- a/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java @@ -34,6 +34,8 @@ import java.util.Map; */ public class NativeScriptEngineService extends AbstractComponent implements ScriptEngineService { + public static final String NAME = "native"; + private final ImmutableMap scripts; @Inject @@ -44,7 +46,7 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri @Override public String[] types() { - return new String[]{"native"}; + return new String[]{NAME}; } @Override diff --git a/src/main/java/org/elasticsearch/script/ScriptEngineService.java b/src/main/java/org/elasticsearch/script/ScriptEngineService.java index 25a1a817db3..7b78427ebc3 100644 --- a/src/main/java/org/elasticsearch/script/ScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/ScriptEngineService.java @@ -22,12 +22,13 @@ package org.elasticsearch.script; import org.elasticsearch.common.Nullable; import org.elasticsearch.search.lookup.SearchLookup; +import java.io.Closeable; import java.util.Map; /** * */ -public interface ScriptEngineService { +public interface ScriptEngineService extends Closeable { String[] types(); @@ -45,8 +46,6 @@ public interface ScriptEngineService { Object unwrap(Object value); - void close(); - /** * Handler method called when a script is removed from the Guava cache. * diff --git a/src/main/java/org/elasticsearch/script/ScriptService.java b/src/main/java/org/elasticsearch/script/ScriptService.java index 6859bca2c2a..e7754368229 100644 --- a/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/src/main/java/org/elasticsearch/script/ScriptService.java @@ -25,7 +25,7 @@ import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; import com.google.common.collect.ImmutableMap; - +import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.ActionListener; @@ -60,11 +60,13 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.TemplateQueryParser; import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.groovy.GroovyScriptEngineService; +import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; import org.elasticsearch.watcher.ResourceWatcherService; +import java.io.Closeable; import java.io.IOException; import java.io.InputStreamReader; import java.nio.file.Files; @@ -78,7 +80,7 @@ import java.util.concurrent.TimeUnit; /** * */ -public class ScriptService extends AbstractComponent { +public class ScriptService extends AbstractComponent implements Closeable { public static final String DEFAULT_SCRIPTING_LANGUAGE_SETTING = "script.default_lang"; public static final String DISABLE_DYNAMIC_SCRIPTING_SETTING = "script.disable_dynamic"; @@ -91,9 +93,11 @@ public class ScriptService extends AbstractComponent { private final String defaultLang; - private final ImmutableMap scriptEngines; + private final Set scriptEngines; + private final ImmutableMap scriptEnginesByLang; + private final ImmutableMap scriptEnginesByExt; - private final ConcurrentMap staticCache = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap staticCache = ConcurrentCollections.newConcurrentMap(); private final Cache cache; private final Path scriptsDirectory; @@ -139,99 +143,12 @@ public class ScriptService extends AbstractComponent { public static final ParseField SCRIPT_ID = new ParseField("script_id"); public static final ParseField SCRIPT_INLINE = new ParseField("script"); - public static enum ScriptType { - - INLINE, - INDEXED, - FILE; - - private static final int INLINE_VAL = 0; - private static final int INDEXED_VAL = 1; - private static final int FILE_VAL = 2; - - public static ScriptType readFrom(StreamInput in) throws IOException { - int scriptTypeVal = in.readVInt(); - switch (scriptTypeVal) { - case INDEXED_VAL: - return INDEXED; - case INLINE_VAL: - return INLINE; - case FILE_VAL: - return FILE; - default: - throw new ElasticsearchIllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal + - "] expected one of ["+INLINE_VAL +"," + INDEXED_VAL + "," + FILE_VAL+"]"); - } - } - - public static void writeTo(ScriptType scriptType, StreamOutput out) throws IOException{ - if (scriptType != null) { - switch (scriptType){ - case INDEXED: - out.writeVInt(INDEXED_VAL); - return; - case INLINE: - out.writeVInt(INLINE_VAL); - return; - case FILE: - out.writeVInt(FILE_VAL); - return; - default: - throw new ElasticsearchIllegalStateException("Unknown ScriptType " + scriptType); - } - } else { - out.writeVInt(INLINE_VAL); //Default to inline - } - } - } - - static class IndexedScript { - private final String lang; - private final String id; - - IndexedScript(String lang, String script) { - this.lang = lang; - final String[] parts = script.split("/"); - if (parts.length == 1) { - this.id = script; - } else { - if (parts.length != 3) { - throw new ElasticsearchIllegalArgumentException("Illegal index script format [" + script + "]" + - " should be /lang/id"); - } else { - if (!parts[1].equals(this.lang)) { - throw new ElasticsearchIllegalStateException("Conflicting script language, found [" + parts[1] + "] expected + ["+ this.lang + "]"); - } - this.id = parts[2]; - } - } - } - } - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - GroovyScriptEngineService engine = (GroovyScriptEngineService) ScriptService.this.scriptEngines.get("groovy"); - if (engine != null) { - String[] patches = settings.getAsArray(GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, Strings.EMPTY_ARRAY); - boolean blacklistChanged = engine.addToBlacklist(patches); - if (blacklistChanged) { - logger.info("adding {} to [{}], new blacklisted methods: {}", patches, - GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, engine.blacklistAdditions()); - engine.reloadConfig(); - // Because the GroovyScriptEngineService knows nothing about the - // cache, we need to clear it here if the setting changes - ScriptService.this.clearCache(); - } - } - } - } - @Inject public ScriptService(Settings settings, Environment env, Set scriptEngines, ResourceWatcherService resourceWatcherService, NodeSettingsService nodeSettingsService) throws IOException { super(settings); + this.scriptEngines = scriptEngines; int cacheMaxSize = settings.getAsInt(SCRIPT_CACHE_SIZE_SETTING, 100); TimeValue cacheExpire = settings.getAsTime(SCRIPT_CACHE_EXPIRE_SETTING, null); logger.debug("using script cache with max_size [{}], expire [{}]", cacheMaxSize, cacheExpire); @@ -249,13 +166,18 @@ public class ScriptService extends AbstractComponent { cacheBuilder.removalListener(new ScriptCacheRemovalListener()); this.cache = cacheBuilder.build(); - ImmutableMap.Builder builder = ImmutableMap.builder(); + ImmutableMap.Builder enginesByLangBuilder = ImmutableMap.builder(); + ImmutableMap.Builder enginesByExtBuilder = ImmutableMap.builder(); for (ScriptEngineService scriptEngine : scriptEngines) { for (String type : scriptEngine.types()) { - builder.put(type, scriptEngine); + enginesByLangBuilder.put(type, scriptEngine); + } + for (String ext : scriptEngine.extensions()) { + enginesByExtBuilder.put(ext, scriptEngine); } } - this.scriptEngines = builder.build(); + this.scriptEnginesByLang = enginesByLangBuilder.build(); + this.scriptEnginesByExt = enginesByExtBuilder.build(); // add file watcher for static scripts scriptsDirectory = env.configFile().resolve("scripts"); @@ -281,18 +203,9 @@ public class ScriptService extends AbstractComponent { this.client = client; } - public void close() { - for (ScriptEngineService engineService : scriptEngines.values()) { - engineService.close(); - } - } - - public CompiledScript compile(String script) { - return compile(defaultLang, script); - } - - public CompiledScript compile(String lang, String script) { - return compile(lang, script, ScriptType.INLINE); + @Override + public void close() throws IOException { + IOUtils.close(scriptEngines); } /** @@ -310,77 +223,68 @@ public class ScriptService extends AbstractComponent { this.fileWatcher.clearState(); } + private ScriptEngineService getScriptEngineServiceForLang(String lang) { + ScriptEngineService scriptEngineService = scriptEnginesByLang.get(lang); + if (scriptEngineService == null) { + throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + lang + "]"); + } + return scriptEngineService; + } + + private ScriptEngineService getScriptEngineServiceForFileExt(String fileExtension) { + ScriptEngineService scriptEngineService = scriptEnginesByExt.get(fileExtension); + if (scriptEngineService == null) { + throw new ElasticsearchIllegalArgumentException("script file extension not supported [" + fileExtension + "]"); + } + return scriptEngineService; + } + + /** + * Compiles a script straight-away, or returns the previously compiled and cached script, without checking if it can be executed based on settings. + */ public CompiledScript compile(String lang, String script, ScriptType scriptType) { + if (lang == null) { + lang = defaultLang; + } if (logger.isTraceEnabled()) { logger.trace("Compiling lang: [{}] type: [{}] script: {}", lang, scriptType, script); } - CacheKey cacheKey; - CompiledScript compiled; + ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); + CacheKey cacheKey = newCacheKey(scriptEngineService, script); - if (lang == null) { - lang = defaultLang; - } - - if(scriptType == ScriptType.INDEXED) { - if (client == null) { - throw new ElasticsearchIllegalArgumentException("Got an indexed script with no Client registered."); - } - - final IndexedScript indexedScript = new IndexedScript(lang, script); - - verifyDynamicScripting(indexedScript.lang); //Since anyone can index a script, disable indexed scripting - // if dynamic scripting is disabled, perhaps its own setting ? - - script = getScriptFromIndex(client, indexedScript.lang, indexedScript.id); - } else if (scriptType == ScriptType.FILE) { - - compiled = staticCache.get(script); //On disk scripts will be loaded into the staticCache by the listener - - if (compiled != null) { - return compiled; - } else { + if (scriptType == ScriptType.FILE) { + CompiledScript compiled = staticCache.get(cacheKey); //On disk scripts will be loaded into the staticCache by the listener + if (compiled == null) { throw new ElasticsearchIllegalArgumentException("Unable to find on disk script " + script); } - } - - //This is an inline script check to see if we have it in the cache - verifyDynamicScripting(lang); - - cacheKey = new CacheKey(lang, script); - - compiled = cache.getIfPresent(cacheKey); - if (compiled != null) { return compiled; } - //Either an un-cached inline script or an indexed script + verifyDynamicScripting(lang, scriptEngineService); - if (!dynamicScriptEnabled(lang)) { - throw new ScriptException("dynamic scripting for [" + lang + "] disabled"); + if (scriptType == ScriptType.INDEXED) { + if (client == null) { + throw new ElasticsearchIllegalArgumentException("Got an indexed script with no Client registered."); + } + final IndexedScript indexedScript = new IndexedScript(lang, script); + script = getScriptFromIndex(client, indexedScript.lang, indexedScript.id); } - // not the end of the world if we compile it twice... - compiled = getCompiledScript(lang, script); - //Since the cache key is the script content itself we don't need to - //invalidate/check the cache if an indexed script changes. - cache.put(cacheKey, compiled); - + CompiledScript compiled = cache.getIfPresent(cacheKey); + if (compiled == null) { + //Either an un-cached inline script or an indexed script + // not the end of the world if we compile it twice... + compiled = new CompiledScript(lang, scriptEngineService.compile(script)); + //Since the cache key is the script content itself we don't need to + //invalidate/check the cache if an indexed script changes. + cache.put(cacheKey, compiled); + } return compiled; } - private CompiledScript getCompiledScript(String lang, String script) { - CompiledScript compiled;ScriptEngineService service = scriptEngines.get(lang); - if (service == null) { - throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + lang + "]"); - } - - compiled = new CompiledScript(lang, service.compile(script)); - return compiled; - } - - private void verifyDynamicScripting(String lang) { - if (!dynamicScriptEnabled(lang)) { + private void verifyDynamicScripting(String lang, ScriptEngineService scriptEngineService) { + if (!dynamicScriptEnabled(lang, scriptEngineService)) { throw new ScriptException("dynamic scripting for [" + lang + "] disabled"); } } @@ -396,8 +300,8 @@ public class ScriptService extends AbstractComponent { private String validateScriptLanguage(String scriptLang) { if (scriptLang == null) { scriptLang = defaultLang; - } else if (!scriptEngines.containsKey(scriptLang)) { - throw new ElasticsearchIllegalArgumentException("script_lang not supported ["+scriptLang+"]"); + } else if (scriptEnginesByLang.containsKey(scriptLang) == false) { + throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + scriptLang + "]"); } return scriptLang; } @@ -421,7 +325,7 @@ public class ScriptService extends AbstractComponent { //Just try and compile it //This will have the benefit of also adding the script to the cache if it compiles try { - CompiledScript compiledScript = compile(scriptLang, context.template(), ScriptService.ScriptType.INLINE); + CompiledScript compiledScript = compile(scriptLang, context.template(), ScriptType.INLINE); if (compiledScript == null) { throw new ElasticsearchIllegalArgumentException("Unable to parse [" + context.template() + "] lang [" + scriptLang + "] (ScriptService.compile returned null)"); @@ -456,6 +360,7 @@ public class ScriptService extends AbstractComponent { client.delete(deleteRequest, listener); } + @SuppressWarnings("unchecked") public static String getScriptFromResponse(GetResponse responseFields) { Map source = responseFields.getSourceAsMap(); if (source.containsKey("template")) { @@ -484,37 +389,39 @@ public class ScriptService extends AbstractComponent { } } - public ExecutableScript executable(String lang, String script, ScriptType scriptType, Map vars) { + /** + * Compiles (or retrieves from cache) and executes the provided script + */ + public ExecutableScript executable(String lang, String script, ScriptType scriptType, Map vars) { return executable(compile(lang, script, scriptType), vars); } - public ExecutableScript executable(CompiledScript compiledScript, Map vars) { - return scriptEngines.get(compiledScript.lang()).executable(compiledScript.compiled(), vars); - } - - public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map vars) { - return scriptEngines.get(compiledScript.lang()).search(compiledScript.compiled(), lookup, vars); + /** + * Executes a previously compiled script provided as an argument + */ + public ExecutableScript executable(CompiledScript compiledScript, Map vars) { + return getScriptEngineServiceForLang(compiledScript.lang()).executable(compiledScript.compiled(), vars); } + /** + * Compiles (or retrieves from cache) and executes the provided search script + */ public SearchScript search(SearchLookup lookup, String lang, String script, ScriptType scriptType, @Nullable Map vars) { - return search(compile(lang, script, scriptType), lookup, vars); + CompiledScript compiledScript = compile(lang, script, scriptType); + return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript.compiled(), lookup, vars); } - private boolean dynamicScriptEnabled(String lang) { - ScriptEngineService service = scriptEngines.get(lang); - if (service == null) { - throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + lang + "]"); - } - + private boolean dynamicScriptEnabled(String lang, ScriptEngineService scriptEngineService) { // Templating languages (mustache) and native scripts are always // allowed, "native" executions are registered through plugins - if (this.dynamicScriptingDisabled == DynamicScriptDisabling.EVERYTHING_ALLOWED || "native".equals(lang) || "mustache".equals(lang)) { + if (this.dynamicScriptingDisabled == DynamicScriptDisabling.EVERYTHING_ALLOWED || + NativeScriptEngineService.NAME.equals(lang) || MustacheScriptEngineService.NAME.equals(lang)) { return true; - } else if (this.dynamicScriptingDisabled == DynamicScriptDisabling.ONLY_DISK_ALLOWED) { - return false; - } else { - return service.sandboxed(); } + if (this.dynamicScriptingDisabled == DynamicScriptDisabling.ONLY_DISK_ALLOWED) { + return false; + } + return scriptEngineService.sandboxed(); } /** @@ -529,7 +436,7 @@ public class ScriptService extends AbstractComponent { if (logger.isDebugEnabled()) { logger.debug("notifying script services of script removal due to: [{}]", notification.getCause()); } - for (ScriptEngineService service : scriptEngines.values()) { + for (ScriptEngineService service : scriptEngines) { try { service.scriptRemoved(notification.getValue()); } catch (Exception e) { @@ -562,27 +469,20 @@ public class ScriptService extends AbstractComponent { } Tuple scriptNameExt = scriptNameExt(file); if (scriptNameExt != null) { - boolean found = false; - for (ScriptEngineService engineService : scriptEngines.values()) { - for (String s : engineService.extensions()) { - if (s.equals(scriptNameExt.v2())) { - found = true; - try { - logger.info("compiling script file [{}]", file.toAbsolutePath()); - String script = Streams.copyToString(new InputStreamReader(Files.newInputStream(file), Charsets.UTF_8)); - staticCache.put(scriptNameExt.v1(), new CompiledScript(engineService.types()[0], engineService.compile(script))); - } catch (Throwable e) { - logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); - } - break; - } - } - if (found) { - break; - } - } - if (!found) { + ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); + if (engineService == null) { logger.warn("no script engine found for [{}]", scriptNameExt.v2()); + } else { + try { + logger.info("compiling script file [{}]", file.toAbsolutePath()); + try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), Charsets.UTF_8)) { + String script = Streams.copyToString(reader); + CacheKey cacheKey = newCacheKey(engineService, scriptNameExt.v1()); + staticCache.put(cacheKey, new CompiledScript(engineService.types()[0], engineService.compile(script))); + } + } catch (Throwable e) { + logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); + } } } } @@ -596,8 +496,10 @@ public class ScriptService extends AbstractComponent { public void onFileDeleted(Path file) { Tuple scriptNameExt = scriptNameExt(file); if (scriptNameExt != null) { + ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); + assert engineService != null; logger.info("removing script file [{}]", file.toAbsolutePath()); - staticCache.remove(scriptNameExt.v1()); + staticCache.remove(newCacheKey(engineService, scriptNameExt.v1())); } } @@ -608,7 +510,63 @@ public class ScriptService extends AbstractComponent { } - public final static class CacheKey { + /** + * The type of a script, more specifically where it gets loaded from: + * - provided dynamically at request time + * - loaded from an index + * - loaded from file + */ + public static enum ScriptType { + + INLINE, + INDEXED, + FILE; + + private static final int INLINE_VAL = 0; + private static final int INDEXED_VAL = 1; + private static final int FILE_VAL = 2; + + public static ScriptType readFrom(StreamInput in) throws IOException { + int scriptTypeVal = in.readVInt(); + switch (scriptTypeVal) { + case INDEXED_VAL: + return INDEXED; + case INLINE_VAL: + return INLINE; + case FILE_VAL: + return FILE; + default: + throw new ElasticsearchIllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal + + "] expected one of [" + INLINE_VAL + "," + INDEXED_VAL + "," + FILE_VAL + "]"); + } + } + + public static void writeTo(ScriptType scriptType, StreamOutput out) throws IOException{ + if (scriptType != null) { + switch (scriptType){ + case INDEXED: + out.writeVInt(INDEXED_VAL); + return; + case INLINE: + out.writeVInt(INLINE_VAL); + return; + case FILE: + out.writeVInt(FILE_VAL); + return; + default: + throw new ElasticsearchIllegalStateException("Unknown ScriptType " + scriptType); + } + } else { + out.writeVInt(INLINE_VAL); //Default to inline + } + } + } + + private static CacheKey newCacheKey(ScriptEngineService engineService, String script) { + return new CacheKey(engineService.types()[0], script); + } + + private static class CacheKey { public final String lang; public final String script; @@ -631,4 +589,46 @@ public class ScriptService extends AbstractComponent { return lang.hashCode() + 31 * script.hashCode(); } } + + private static class IndexedScript { + private final String lang; + private final String id; + + IndexedScript(String lang, String script) { + this.lang = lang; + final String[] parts = script.split("/"); + if (parts.length == 1) { + this.id = script; + } else { + if (parts.length != 3) { + throw new ElasticsearchIllegalArgumentException("Illegal index script format [" + script + "]" + + " should be /lang/id"); + } else { + if (!parts[1].equals(this.lang)) { + throw new ElasticsearchIllegalStateException("Conflicting script language, found [" + parts[1] + "] expected + ["+ this.lang + "]"); + } + this.id = parts[2]; + } + } + } + } + + private class ApplySettings implements NodeSettingsService.Listener { + @Override + public void onRefreshSettings(Settings settings) { + GroovyScriptEngineService engine = (GroovyScriptEngineService) ScriptService.this.scriptEnginesByLang.get(GroovyScriptEngineService.NAME); + if (engine != null) { + String[] patches = settings.getAsArray(GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, Strings.EMPTY_ARRAY); + boolean blacklistChanged = engine.addToBlacklist(patches); + if (blacklistChanged) { + logger.info("adding {} to [{}], new blacklisted methods: {}", patches, + GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, engine.blacklistAdditions()); + engine.reloadConfig(); + // Because the GroovyScriptEngineService knows nothing about the + // cache, we need to clear it here if the setting changes + ScriptService.this.clearCache(); + } + } + } + } } diff --git a/src/main/java/org/elasticsearch/script/SearchScript.java b/src/main/java/org/elasticsearch/script/SearchScript.java index 15955d98d99..62744870931 100644 --- a/src/main/java/org/elasticsearch/script/SearchScript.java +++ b/src/main/java/org/elasticsearch/script/SearchScript.java @@ -20,8 +20,6 @@ package org.elasticsearch.script; import org.elasticsearch.common.lucene.ReaderContextAware; import org.elasticsearch.common.lucene.ScorerAware; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.lookup.SearchLookup; import java.util.Map; @@ -39,36 +37,4 @@ public interface SearchScript extends ExecutableScript, ReaderContextAware, Scor long runAsLong(); double runAsDouble(); - - public static class Builder { - - private String script; - private ScriptService.ScriptType scriptType; - private String lang; - private Map params; - - public Builder script(String script, ScriptService.ScriptType scriptType) { - this.script = script; - this.scriptType = scriptType; - return this; - } - - public Builder lang(String lang) { - this.lang = lang; - return this; - } - - public Builder params(Map params) { - this.params = params; - return this; - } - - public SearchScript build(SearchContext context) { - return build(context.scriptService(), context.lookup()); - } - - public SearchScript build(ScriptService service, SearchLookup lookup) { - return service.search(lookup, lang, script, scriptType, params); - } - } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java b/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java index dcf83df6680..23841942104 100644 --- a/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java @@ -48,6 +48,8 @@ import java.util.Map; */ public class ExpressionScriptEngineService extends AbstractComponent implements ScriptEngineService { + public static final String NAME = "expression"; + @Inject public ExpressionScriptEngineService(Settings settings) { super(settings); @@ -55,12 +57,12 @@ public class ExpressionScriptEngineService extends AbstractComponent implements @Override public String[] types() { - return new String[]{"expression"}; + return new String[]{NAME}; } @Override public String[] extensions() { - return new String[]{"expression"}; + return new String[]{NAME}; } @Override diff --git a/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 912e6c18784..2dedab8d41d 100644 --- a/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -48,10 +48,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.math.BigDecimal; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; +import java.util.*; import java.util.concurrent.atomic.AtomicLong; /** @@ -59,6 +56,7 @@ import java.util.concurrent.atomic.AtomicLong; */ public class GroovyScriptEngineService extends AbstractComponent implements ScriptEngineService { + public static final String NAME = "groovy"; public static String GROOVY_SCRIPT_SANDBOX_ENABLED = "script.groovy.sandbox.enabled"; public static String GROOVY_SCRIPT_BLACKLIST_PATCH = "script.groovy.sandbox.method_blacklist_patch"; @@ -85,9 +83,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri */ public boolean addToBlacklist(String... additions) { Set newBlackList = new HashSet<>(blacklistAdditions); - for (String addition : additions) { - newBlackList.add(addition); - } + Collections.addAll(newBlackList, additions); boolean changed = this.blacklistAdditions.equals(newBlackList) == false; this.blacklistAdditions = ImmutableSet.copyOf(newBlackList); return changed; @@ -120,7 +116,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri @Override public void scriptRemoved(@Nullable CompiledScript script) { // script could be null, meaning the script has already been garbage collected - if (script == null || "groovy".equals(script.lang())) { + if (script == null || NAME.equals(script.lang())) { // Clear the cache, this removes old script versions from the // cache to prevent running out of PermGen space loader.clearCache(); @@ -129,12 +125,12 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri @Override public String[] types() { - return new String[]{"groovy"}; + return new String[]{NAME}; } @Override public String[] extensions() { - return new String[]{"groovy"}; + return new String[]{NAME}; } @Override @@ -157,6 +153,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri /** * Return a script object with the given vars from the compiled script object */ + @SuppressWarnings("unchecked") private Script createScript(Object compiledScript, Map vars) throws InstantiationException, IllegalAccessException { Class scriptClass = (Class) compiledScript; Script scriptObject = (Script) scriptClass.newInstance(); @@ -225,12 +222,12 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri private final SearchLookup lookup; private final Map variables; private final ESLogger logger; - private Scorer scorer; public GroovyScript(Script script, ESLogger logger) { this(script, null, logger); } + @SuppressWarnings("unchecked") public GroovyScript(Script script, @Nullable SearchLookup lookup, ESLogger logger) { this.script = script; this.lookup = lookup; @@ -240,7 +237,6 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri @Override public void setScorer(Scorer scorer) { - this.scorer = scorer; this.variables.put("_score", new ScoreAccessor(scorer)); } diff --git a/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index 23f5a65cf75..c40523750bb 100644 --- a/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -47,6 +47,8 @@ import java.util.Map; */ public class MustacheScriptEngineService extends AbstractComponent implements ScriptEngineService { + public static final String NAME = "mustache"; + /** Thread local UTF8StreamWriter to store template execution results in, thread local to save object creation.*/ private static ThreadLocal> utf8StreamWriter = new ThreadLocal<>(); @@ -116,12 +118,12 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc @Override public String[] types() { - return new String[] {"mustache"}; + return new String[] {NAME}; } @Override public String[] extensions() { - return new String[] {"mustache"}; + return new String[] {NAME}; } @Override @@ -172,7 +174,7 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc public MustacheExecutableScript(Mustache mustache, Map vars) { this.mustache = mustache; - this.vars = vars == null ? Collections.EMPTY_MAP : vars; + this.vars = vars == null ? Collections.emptyMap() : vars; } @Override @@ -184,7 +186,7 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc public Object run() { BytesStreamOutput result = new BytesStreamOutput(); UTF8StreamWriter writer = utf8StreamWriter().setOutput(result); - ((Mustache) mustache).execute(writer, vars); + mustache.execute(writer, vars); try { writer.flush(); } catch (IOException e) { diff --git a/src/main/java/org/elasticsearch/search/SearchService.java b/src/main/java/org/elasticsearch/search/SearchService.java index 066d618b300..ddeb0575d48 100644 --- a/src/main/java/org/elasticsearch/search/SearchService.java +++ b/src/main/java/org/elasticsearch/search/SearchService.java @@ -24,7 +24,6 @@ import com.carrotsearch.hppc.ObjectSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableMap; - import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -73,6 +72,7 @@ import org.elasticsearch.indices.IndicesWarmer.WarmerContext; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.search.dfs.CachedDfSource; import org.elasticsearch.search.dfs.DfsPhase; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -85,7 +85,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.util.HashMap; -import java.util.Iterator; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -629,7 +628,7 @@ public class SearchService extends AbstractLifecycleComponent { final ExecutableScript executable; if (hasLength(request.templateName())) { - executable = this.scriptService.executable("mustache", request.templateName(), request.templateType(), request.templateParams()); + executable = this.scriptService.executable(MustacheScriptEngineService.NAME, request.templateName(), request.templateType(), request.templateParams()); } else { if (!hasLength(request.templateSource())) { return; @@ -641,7 +640,7 @@ public class SearchService extends AbstractLifecycleComponent { parser = XContentFactory.xContent(request.templateSource()).createParser(request.templateSource()); templateContext = TemplateQueryParser.parse(parser, "params", "template"); - if (templateContext.scriptType().equals(ScriptService.ScriptType.INLINE)) { + if (templateContext.scriptType() == ScriptService.ScriptType.INLINE) { //Try to double parse for nested template id/file parser = null; try { @@ -666,10 +665,10 @@ public class SearchService extends AbstractLifecycleComponent { Releasables.closeWhileHandlingException(parser); } - if (templateContext == null || !hasLength(templateContext.template())) { + if (!hasLength(templateContext.template())) { throw new ElasticsearchParseException("Template must have [template] field configured"); } - executable = this.scriptService.executable("mustache", templateContext.template(), templateContext.scriptType(), templateContext.params()); + executable = this.scriptService.executable(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), templateContext.params()); } BytesReference processedQuery = (BytesReference) executable.run(); @@ -811,8 +810,8 @@ public class SearchService extends AbstractLifecycleComponent { @Override public void run() { try { - for (Iterator> it = warmUp.iterator(); it.hasNext(); ) { - final String indexName = it.next().value; + for (ObjectCursor stringObjectCursor : warmUp) { + final String indexName = stringObjectCursor.value; final long start = System.nanoTime(); for (final LeafReaderContext ctx : context.searcher().reader().leaves()) { final NumericDocValues values = ctx.reader().getNormValues(indexName); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java index 91ad85364e7..a48fc850b90 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java @@ -185,6 +185,7 @@ public abstract class InternalSignificantTerms extends InternalMultiBucketAggreg } } + significanceHeuristic.initialize(reduceContext); final int size = Math.min(requiredSize, buckets.size()); BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue(size); for (Map.Entry> entry : buckets.entrySet()) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java index bfb0b70458b..85ae983ef18 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.significant; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -97,6 +98,11 @@ public class SignificantLongTerms extends InternalSignificantTerms { this.term = term; } + public Bucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, long term, InternalAggregations aggregations, double score) { + this(subsetDf, subsetSize, supersetDf, supersetSize, term, aggregations, null); + this.score = score; + } + @Override public Object getKey() { return term; @@ -127,7 +133,9 @@ public class SignificantLongTerms extends InternalSignificantTerms { subsetDf = in.readVLong(); supersetDf = in.readVLong(); term = in.readLong(); + score = in.readDouble(); aggregations = InternalAggregations.readAggregations(in); + } @Override @@ -135,6 +143,7 @@ public class SignificantLongTerms extends InternalSignificantTerms { out.writeVLong(subsetDf); out.writeVLong(supersetDf); out.writeLong(term); + out.writeDouble(getSignificanceScore()); aggregations.writeTo(out); } @@ -194,8 +203,8 @@ public class SignificantLongTerms extends InternalSignificantTerms { for (int i = 0; i < size; i++) { Bucket bucket = new Bucket(subsetSize, supersetSize, formatter); bucket.readFrom(in); - bucket.updateScore(significanceHeuristic); buckets.add(bucket); + } this.buckets = buckets; this.bucketMap = null; @@ -211,6 +220,7 @@ public class SignificantLongTerms extends InternalSignificantTerms { significanceHeuristic.writeTo(out); out.writeVInt(buckets.size()); for (InternalSignificantTerms.Bucket bucket : buckets) { + bucket.writeTo(out); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java index 295fadd41b9..d8fc74c9bc5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.significant; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -95,6 +96,11 @@ public class SignificantStringTerms extends InternalSignificantTerms { this.termBytes = term; } + public Bucket(BytesRef term, long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations, double score) { + this(term, subsetDf, subsetSize, supersetDf, supersetSize, aggregations); + this.score = score; + } + @Override public Number getKeyAsNumber() { // this method is needed for scripted numeric aggregations @@ -126,6 +132,7 @@ public class SignificantStringTerms extends InternalSignificantTerms { termBytes = in.readBytesRef(); subsetDf = in.readVLong(); supersetDf = in.readVLong(); + score = in.readDouble(); aggregations = InternalAggregations.readAggregations(in); } @@ -134,6 +141,7 @@ public class SignificantStringTerms extends InternalSignificantTerms { out.writeBytesRef(termBytes); out.writeVLong(subsetDf); out.writeVLong(supersetDf); + out.writeDouble(getSignificanceScore()); aggregations.writeTo(out); } @@ -183,7 +191,6 @@ public class SignificantStringTerms extends InternalSignificantTerms { for (int i = 0; i < size; i++) { Bucket bucket = new Bucket(subsetSize, supersetSize); bucket.readFrom(in); - bucket.updateScore(significanceHeuristic); buckets.add(bucket); } this.buckets = buckets; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java new file mode 100644 index 00000000000..ddc672723a3 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -0,0 +1,266 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.search.aggregations.bucket.significant.heuristics; + + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptParameterParser; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.aggregations.InternalAggregation; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class ScriptHeuristic extends SignificanceHeuristic { + + protected static final ParseField NAMES_FIELD = new ParseField("script_heuristic"); + private final LongAccessor subsetSizeHolder; + private final LongAccessor supersetSizeHolder; + private final LongAccessor subsetDfHolder; + private final LongAccessor supersetDfHolder; + ExecutableScript script = null; + String scriptLang; + String scriptString; + ScriptService.ScriptType scriptType; + Map params; + + public static final SignificanceHeuristicStreams.Stream STREAM = new SignificanceHeuristicStreams.Stream() { + @Override + public SignificanceHeuristic readResult(StreamInput in) throws IOException { + return new ScriptHeuristic(null, in.readOptionalString(), in.readString(), ScriptService.ScriptType.readFrom(in), in.readMap()); + } + + @Override + public String getName() { + return NAMES_FIELD.getPreferredName(); + } + }; + + public ScriptHeuristic(ExecutableScript searchScript, String scriptLang, String scriptString, ScriptService.ScriptType scriptType, Map params) { + subsetSizeHolder = new LongAccessor(); + supersetSizeHolder = new LongAccessor(); + subsetDfHolder = new LongAccessor(); + supersetDfHolder = new LongAccessor(); + this.script = searchScript; + if (script != null) { + script.setNextVar("_subset_freq", subsetDfHolder); + script.setNextVar("_subset_size", subsetSizeHolder); + script.setNextVar("_superset_freq", supersetDfHolder); + script.setNextVar("_superset_size", supersetSizeHolder); + } + this.scriptLang = scriptLang; + this.scriptString = scriptString; + this.scriptType = scriptType; + this.params = params; + + + } + + public void initialize(InternalAggregation.ReduceContext context) { + script = context.scriptService().executable(scriptLang, scriptString, scriptType, params); + script.setNextVar("_subset_freq", subsetDfHolder); + script.setNextVar("_subset_size", subsetSizeHolder); + script.setNextVar("_superset_freq", supersetDfHolder); + script.setNextVar("_superset_size", supersetSizeHolder); + } + + /** + * Calculates score with a script + * + * @param subsetFreq The frequency of the term in the selected sample + * @param subsetSize The size of the selected sample (typically number of docs) + * @param supersetFreq The frequency of the term in the superset from which the sample was taken + * @param supersetSize The size of the superset from which the sample was taken (typically number of docs) + * @return a "significance" score + */ + @Override + public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) { + if (script == null) { + //In tests, wehn calling assertSearchResponse(..) the response is streamed one additional time with an arbitrary version, see assertVersionSerializable(..). + // Now, for version before 1.5.0 the score is computed after streaming the response but for scripts the script does not exists yet. + // assertSearchResponse() might therefore fail although there is no problem. + // This should be replaced by an exception in 2.0. + ESLoggerFactory.getLogger("script heuristic").warn("cannot compute score - script has not been initialized yet."); + return 0; + } + subsetSizeHolder.value = subsetSize; + supersetSizeHolder.value = supersetSize; + subsetDfHolder.value = subsetFreq; + supersetDfHolder.value = supersetFreq; + return ((Number) script.run()).doubleValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(STREAM.getName()); + out.writeOptionalString(scriptLang); + out.writeString(scriptString); + ScriptService.ScriptType.writeTo(scriptType, out); + out.writeMap(params); + } + + public static class ScriptHeuristicParser implements SignificanceHeuristicParser { + private final ScriptService scriptService; + @Inject + public ScriptHeuristicParser(ScriptService scriptService) { + this.scriptService = scriptService; + } + + @Override + public SignificanceHeuristic parse(XContentParser parser) throws IOException, QueryParsingException { + NAMES_FIELD.match(parser.currentName(), ParseField.EMPTY_FLAGS); + String script = null; + String scriptLang; + XContentParser.Token token; + Map params = new HashMap<>(); + String currentFieldName = null; + ScriptService.ScriptType scriptType = ScriptService.ScriptType.INLINE; + ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token.equals(XContentParser.Token.FIELD_NAME)) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("params".equals(currentFieldName)) { + params = parser.map(); + } else { + throw new ElasticsearchParseException("unknown object " + currentFieldName + " in script_heuristic"); + } + } else if (!scriptParameterParser.token(currentFieldName, token, parser)) { + throw new ElasticsearchParseException("unknown field " + currentFieldName + " in script_heuristic"); + } + } + + ScriptParameterParser.ScriptParameterValue scriptValue = scriptParameterParser.getDefaultScriptParameterValue(); + if (scriptValue != null) { + script = scriptValue.script(); + scriptType = scriptValue.scriptType(); + } + scriptLang = scriptParameterParser.lang(); + + if (script == null) { + throw new ElasticsearchParseException("No script found in script_heuristic"); + } + ExecutableScript searchScript; + try { + searchScript = scriptService.executable(scriptLang, script, scriptType, params); + } catch (Exception e) { + throw new ElasticsearchParseException("The script [" + script + "] could not be loaded", e); + } + return new ScriptHeuristic(searchScript, scriptLang, script, scriptType, params); + } + + @Override + public String[] getNames() { + return NAMES_FIELD.getAllNamesIncludedDeprecated(); + } + } + + public static class ScriptHeuristicBuilder implements SignificanceHeuristicBuilder { + + private String script = null; + private String lang = null; + private Map params = null; + private String scriptId; + private String scriptFile; + public ScriptHeuristicBuilder setScript(String script) { + this.script = script; + return this; + } + + public ScriptHeuristicBuilder setScriptFile(String script) { + this.scriptFile = script; + return this; + } + + public ScriptHeuristicBuilder setLang(String lang) { + this.lang = lang; + return this; + } + + public ScriptHeuristicBuilder setParams(Map params) { + this.params = params; + return this; + } + + public ScriptHeuristicBuilder setScriptId(String scriptId) { + this.scriptId = scriptId; + return this; + } + + @Override + public void toXContent(XContentBuilder builder) throws IOException { + builder.startObject(STREAM.getName()); + if (script != null) { + builder.field("script", script); + } + if (lang != null) { + builder.field("lang", lang); + } + if (params != null) { + builder.field("params", params); + } + if (scriptId != null) { + builder.field("script_id", scriptId); + } + if (scriptFile != null) { + builder.field("script_file", scriptFile); + } + builder.endObject(); + } + + } + + public final class LongAccessor extends Number { + public long value; + public int intValue() { + return (int)value; + } + public long longValue() { + return value; + } + + @Override + public float floatValue() { + return (float)value; + } + + @Override + public double doubleValue() { + return (double)value; + } + + @Override + public String toString() { + return Long.toString(value); + } + } +} + diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java index 61e29e83e90..b3da709323b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.InternalAggregation; import java.io.IOException; @@ -48,4 +49,8 @@ public abstract class SignificanceHeuristic { throw new ElasticsearchIllegalArgumentException("supersetFreq > supersetSize, in " + scoreFunctionName); } } + + public void initialize(InternalAggregation.ReduceContext reduceContext) { + + } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificantTermsHeuristicModule.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificantTermsHeuristicModule.java index aa0048ed1cf..45143d73016 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificantTermsHeuristicModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificantTermsHeuristicModule.java @@ -33,11 +33,13 @@ public class SignificantTermsHeuristicModule extends AbstractModule { private List> parsers = Lists.newArrayList(); public SignificantTermsHeuristicModule() { + registerParser(JLHScore.JLHScoreParser.class); registerParser(PercentageScore.PercentageScoreParser.class); registerParser(MutualInformation.MutualInformationParser.class); registerParser(GND.GNDParser.class); registerParser(ChiSquare.ChiSquareParser.class); + registerParser(ScriptHeuristic.ScriptHeuristicParser.class); } public void registerParser(Class parser) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/TransportSignificantTermsHeuristicModule.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/TransportSignificantTermsHeuristicModule.java index d8de1fabb70..1be51f51d66 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/TransportSignificantTermsHeuristicModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/TransportSignificantTermsHeuristicModule.java @@ -37,6 +37,7 @@ public class TransportSignificantTermsHeuristicModule extends AbstractModule { registerStream(MutualInformation.STREAM); registerStream(GND.STREAM); registerStream(ChiSquare.STREAM); + registerStream(ScriptHeuristic.STREAM); } public void registerStream(SignificanceHeuristicStreams.Stream stream) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index 3939528ba3b..6549672f346 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -93,9 +93,9 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement if (firstAggregation.reduceScript != null) { Map params; if (firstAggregation.reduceParams != null) { - params = new HashMap(firstAggregation.reduceParams); + params = new HashMap<>(firstAggregation.reduceParams); } else { - params = new HashMap(); + params = new HashMap<>(); } params.put("_aggs", aggregationObjects); ExecutableScript script = reduceContext.scriptService().executable(firstAggregation.scriptLang, firstAggregation.reduceScript, diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index 22781a18612..0fba054d8ae 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -53,7 +53,6 @@ public class ScriptedMetricAggregator extends MetricsAggregator { private final Map params; // initial parameters for {reduce} private final Map reduceParams; - private final ScriptService scriptService; private final ScriptType reduceScriptType; protected ScriptedMetricAggregator(String name, String scriptLang, ScriptType initScriptType, String initScript, @@ -61,7 +60,6 @@ public class ScriptedMetricAggregator extends MetricsAggregator { String reduceScript, Map params, Map reduceParams, AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { super(name, context, parent, reducers, metaData); - this.scriptService = context.searchContext().scriptService(); this.scriptLang = scriptLang; this.reduceScriptType = reduceScriptType; if (params == null) { @@ -75,6 +73,7 @@ public class ScriptedMetricAggregator extends MetricsAggregator { } else { this.reduceParams = reduceParams; } + ScriptService scriptService = context.searchContext().scriptService(); if (initScript != null) { scriptService.executable(scriptLang, initScript, initScriptType, this.params).run(); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java index 7cb2a6eb315..1b0b5aa3290 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java @@ -51,7 +51,7 @@ public class ScriptedMetricParser implements Aggregator.Parser { @Override public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException { - String scriptLang = null; + String scriptLang; Map params = null; Map reduceParams = null; XContentParser.Token token; diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java index 2d6e4c2520e..f282439b733 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java @@ -163,12 +163,10 @@ public class InnerHitsParseElement implements SearchParseElement { if (!childObjectMapper.nested().isNested()) { throw new ElasticsearchIllegalArgumentException("path [" + nestedPath +"] isn't nested"); } - DocumentMapper childDocumentMapper = smartNameObjectMapper.docMapper(); - parseContext.nestedScope().nextLevel(childObjectMapper); + ObjectMapper parentObjectMapper = parseContext.nestedScope().nextLevel(childObjectMapper); ParseResult parseResult = parseSubSearchContext(searchContext, parseContext, parser); parseContext.nestedScope().previousLevel(); - ObjectMapper parentObjectMapper = childDocumentMapper.findParentObjectMapper(childObjectMapper); return new InnerHitsContext.NestedInnerHits(parseResult.context(), parseResult.query(), parseResult.childInnerHits(), parentObjectMapper, childObjectMapper); } diff --git a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java index afacd802154..ae507f54e2a 100644 --- a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java @@ -56,7 +56,6 @@ public class ScriptFieldsParseElement implements SearchParseElement { String fieldName = currentFieldName; ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); String script = null; - String scriptLang = null; ScriptService.ScriptType scriptType = null; Map params = null; boolean ignoreException = false; @@ -79,9 +78,7 @@ public class ScriptFieldsParseElement implements SearchParseElement { script = scriptValue.script(); scriptType = scriptValue.scriptType(); } - scriptLang = scriptParameterParser.lang(); - - SearchScript searchScript = context.scriptService().search(context.lookup(), scriptLang, script, scriptType, params); + SearchScript searchScript = context.scriptService().search(context.lookup(), scriptParameterParser.lang(), script, scriptType, params); context.scriptFields().add(new ScriptFieldsContext.ScriptField(fieldName, searchScript, ignoreException)); } } diff --git a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index 27c37ea82d8..dafff7e9697 100644 --- a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -32,7 +32,6 @@ import org.elasticsearch.index.fielddata.*; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; -import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.script.ScriptService; @@ -128,7 +127,6 @@ public class ScriptSortParser implements SortParser { } // If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource` - ObjectMapper objectMapper; final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 6e796f5e61d..6d54cdeaefb 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -30,6 +30,8 @@ import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.search.suggest.SuggestContextParser; import org.elasticsearch.search.suggest.SuggestUtils; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -150,7 +152,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { if (suggestion.getCollateQueryScript() != null) { throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] query already set, doesn't support additional [" + fieldName + "]"); } - CompiledScript compiledScript = suggester.scriptService().compile("mustache", templateNameOrTemplateContent); + CompiledScript compiledScript = suggester.scriptService().compile(MustacheScriptEngineService.NAME, templateNameOrTemplateContent, ScriptService.ScriptType.INLINE); if ("query".equals(fieldName)) { suggestion.setCollateQueryScript(compiledScript); } else { diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 0c5d9b9b114..a8b2cf92a68 100644 --- a/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -24,7 +24,6 @@ import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.*; @@ -747,8 +746,7 @@ public class SnapshotsService extends AbstractLifecycleComponent shardEntry : snapshot.shards().entrySet()) { - ShardSnapshotStatus shardStatus = shardEntry.getValue(); + for (ShardSnapshotStatus shardStatus : snapshot.shards().values()) { if (!shardStatus.state().completed() && node.getId().equals(shardStatus.nodeId())) { // At least one shard was running on the removed node - we need to fail it return true; @@ -1121,9 +1119,25 @@ public class SnapshotsService extends AbstractLifecycleComponent implem @Override protected void doStart() throws ElasticsearchException { - clientBootstrap = createClientBootstrap(); + boolean success = false; + try { + clientBootstrap = createClientBootstrap(); + if (settings.getAsBoolean("network.server", true)) { + final OpenChannelsHandler openChannels = new OpenChannelsHandler(logger); + this.serverOpenChannels = openChannels; - if (!settings.getAsBoolean("network.server", true)) { - return; - } - - final OpenChannelsHandler openChannels = new OpenChannelsHandler(logger); - this.serverOpenChannels = openChannels; - - // extract default profile first and create standard bootstrap - Map profiles = settings.getGroups("transport.profiles", true); - if (!profiles.containsKey(DEFAULT_PROFILE)) { - profiles = Maps.newHashMap(profiles); - profiles.put(DEFAULT_PROFILE, ImmutableSettings.EMPTY); - } - - Settings fallbackSettings = createFallbackSettings(); - Settings defaultSettings = profiles.get(DEFAULT_PROFILE); - - // loop through all profiles and strart them app, special handling for default one - for (Map.Entry entry : profiles.entrySet()) { - Settings profileSettings = entry.getValue(); - String name = entry.getKey(); - - if (DEFAULT_PROFILE.equals(name)) { - profileSettings = settingsBuilder() - .put(profileSettings) - .put("port", profileSettings.get("port", settings.get("port", this.settings.get("transport.tcp.port", DEFAULT_PORT_RANGE)))) - .build(); - } else { - // if profile does not have a port, skip it - if (profileSettings.get("port") == null) { - logger.info("No port configured for profile [{}], not binding", name); - continue; + // extract default profile first and create standard bootstrap + Map profiles = settings.getGroups("transport.profiles", true); + if (!profiles.containsKey(DEFAULT_PROFILE)) { + profiles = Maps.newHashMap(profiles); + profiles.put(DEFAULT_PROFILE, ImmutableSettings.EMPTY); } + + Settings fallbackSettings = createFallbackSettings(); + Settings defaultSettings = profiles.get(DEFAULT_PROFILE); + + // loop through all profiles and strart them app, special handling for default one + for (Map.Entry entry : profiles.entrySet()) { + Settings profileSettings = entry.getValue(); + String name = entry.getKey(); + + if (DEFAULT_PROFILE.equals(name)) { + profileSettings = settingsBuilder() + .put(profileSettings) + .put("port", profileSettings.get("port", this.settings.get("transport.tcp.port", DEFAULT_PORT_RANGE))) + .build(); + } else { + // if profile does not have a port, skip it + if (profileSettings.get("port") == null) { + logger.info("No port configured for profile [{}], not binding", name); + continue; + } + } + + // merge fallback settings with default settings with profile settings so we have complete settings with default values + Settings mergedSettings = settingsBuilder() + .put(fallbackSettings) + .put(defaultSettings) + .put(profileSettings) + .build(); + + createServerBootstrap(name, mergedSettings); + bindServerBootstrap(name, mergedSettings); + } + + InetSocketAddress boundAddress = (InetSocketAddress) serverChannels.get(DEFAULT_PROFILE).getLocalAddress(); + int publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", boundAddress.getPort())); + String publishHost = settings.get("transport.netty.publish_host", settings.get("transport.publish_host", settings.get("transport.host"))); + InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort); + this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress)); + } + success = true; + } finally { + if (success == false) { + doStop(); } - - // merge fallback settings with default settings with profile settings so we have complete settings with default values - Settings mergedSettings = settingsBuilder() - .put(fallbackSettings) - .put(defaultSettings) - .put(profileSettings) - .build(); - - createServerBootstrap(name, mergedSettings); - bindServerBootstrap(name, mergedSettings); } - - InetSocketAddress boundAddress = (InetSocketAddress) serverChannels.get(DEFAULT_PROFILE).getLocalAddress(); - int publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", boundAddress.getPort())); - String publishHost = settings.get("transport.netty.publish_host", settings.get("transport.publish_host", settings.get("transport.host"))); - InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort); - this.boundAddress = new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress)); } @Override @@ -604,6 +610,18 @@ public class NettyTransport extends AbstractLifecycleComponent implem // close the channel as safe measure, which will cause a node to be disconnected if relevant ctx.getChannel().close(); disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); + } else if (e.getCause() instanceof SizeHeaderFrameDecoder.HttpOnTransportException) { + // in case we are able to return data, serialize the exception content and sent it back to the client + if (ctx.getChannel().isOpen()) { + ChannelBuffer buffer = ChannelBuffers.wrappedBuffer(e.getCause().getMessage().getBytes(Charsets.UTF_8)); + ChannelFuture channelFuture = ctx.getChannel().write(buffer); + channelFuture.addListener(new ChannelFutureListener() { + @Override + public void operationComplete(ChannelFuture future) throws Exception { + future.getChannel().close(); + } + }); + } } else { logger.warn("exception caught on transport layer [{}], closing connection", e.getCause(), ctx.getChannel()); // close the channel, which will cause a node to be disconnected if relevant diff --git a/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java b/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java index 85c6b898bde..d3fd096ffb8 100644 --- a/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java +++ b/src/main/java/org/elasticsearch/transport/netty/SizeHeaderFrameDecoder.java @@ -19,8 +19,11 @@ package org.elasticsearch.transport.netty; +import com.google.common.base.Charsets; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.rest.RestStatus; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelHandlerContext; @@ -43,6 +46,19 @@ public class SizeHeaderFrameDecoder extends FrameDecoder { int readerIndex = buffer.readerIndex(); if (buffer.getByte(readerIndex) != 'E' || buffer.getByte(readerIndex + 1) != 'S') { + // special handling for what is probably HTTP + if (bufferStartsWith(buffer, readerIndex, "GET ") || + bufferStartsWith(buffer, readerIndex, "POST ") || + bufferStartsWith(buffer, readerIndex, "PUT ") || + bufferStartsWith(buffer, readerIndex, "HEAD ") || + bufferStartsWith(buffer, readerIndex, "DELETE ") || + bufferStartsWith(buffer, readerIndex, "OPTIONS ") || + bufferStartsWith(buffer, readerIndex, "PATCH ") || + bufferStartsWith(buffer, readerIndex, "TRACE ")) { + + throw new HttpOnTransportException("This is not a HTTP port"); + } + // we have 6 readable bytes, show 4 (should be enough) throw new StreamCorruptedException("invalid internal transport message format, got (" + Integer.toHexString(buffer.getByte(readerIndex) & 0xFF) + "," @@ -67,4 +83,31 @@ public class SizeHeaderFrameDecoder extends FrameDecoder { buffer.skipBytes(6); return buffer; } + + private boolean bufferStartsWith(ChannelBuffer buffer, int readerIndex, String method) { + char[] chars = method.toCharArray(); + for (int i = 0; i < chars.length; i++) { + if (buffer.getByte(readerIndex + i) != chars[i]) { + return false; + } + } + + return true; + } + + /** + * A helper exception to mark an incoming connection as potentially being HTTP + * so an appropriate error code can be returned + */ + public class HttpOnTransportException extends ElasticsearchException { + + public HttpOnTransportException(String msg) { + super(msg); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/tribe/TribeService.java b/src/main/java/org/elasticsearch/tribe/TribeService.java index 6e810f6e522..90989561eff 100644 --- a/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -128,10 +128,11 @@ public class TribeService extends AbstractLifecycleComponent { ImmutableSettings.Builder sb = ImmutableSettings.builder().put(entry.getValue()); sb.put("node.name", settings.get("name") + "/" + entry.getKey()); sb.put(TRIBE_NAME, entry.getKey()); + sb.put("config.ignore_system_properties", true); if (sb.get("http.enabled") == null) { sb.put("http.enabled", false); } - nodes.add(NodeBuilder.nodeBuilder().settings(sb).client(true).build()); + nodes.add(NodeBuilder.nodeBuilder().settings(sb).client(true).loadConfigSettings(false).build()); } String[] blockIndicesWrite = Strings.EMPTY_ARRAY; diff --git a/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java b/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java index 7c4fa859c43..85383dd46b5 100644 --- a/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java +++ b/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java @@ -132,7 +132,7 @@ public class ReplicaRecoveryBenchmark { long translogOps; long bytes; if (indexRecoveries.size() > 0) { - translogOps = indexRecoveries.get(0).recoveryState().getTranslog().currentTranslogOperations(); + translogOps = indexRecoveries.get(0).recoveryState().getTranslog().recoveredOperations(); bytes = recoveryResponse.shardResponses().get(INDEX_NAME).get(0).recoveryState().getIndex().recoveredBytes(); } else { bytes = lastBytes = 0; @@ -161,6 +161,7 @@ public class ReplicaRecoveryBenchmark { long totalRecoveryTime = 0; long startTime = System.currentTimeMillis(); + long[] recoveryTimes = new long[3]; for (int iteration = 0; iteration < 3; iteration++) { logger.info("--> removing replicas"); client1.admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 0").get(); @@ -170,7 +171,9 @@ public class ReplicaRecoveryBenchmark { client1.admin().cluster().prepareHealth(INDEX_NAME).setWaitForGreenStatus().setTimeout("15m").get(); long recoveryTime = System.currentTimeMillis() - recoveryStart; totalRecoveryTime += recoveryTime; + recoveryTimes[iteration] = recoveryTime; logger.info("--> recovery done in [{}]", new TimeValue(recoveryTime)); + // sleep some to let things clean up Thread.sleep(10000); } @@ -185,7 +188,9 @@ public class ReplicaRecoveryBenchmark { backgroundLogger.join(); - logger.info("average doc/s [{}], average relocation time [{}]", (endDocIndexed - startDocIndexed) * 1000.0 / totalTime, new TimeValue(totalRecoveryTime / 3)); + logger.info("average doc/s [{}], average relocation time [{}], taking [{}], [{}], [{}]", (endDocIndexed - startDocIndexed) * 1000.0 / totalTime, new TimeValue(totalRecoveryTime / 3), + TimeValue.timeValueMillis(recoveryTimes[0]), TimeValue.timeValueMillis(recoveryTimes[1]), TimeValue.timeValueMillis(recoveryTimes[2]) + ); client1.close(); node1.close(); diff --git a/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java b/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java index 9a449b0dd4e..af419cf68a9 100644 --- a/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java +++ b/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java @@ -38,7 +38,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.util.HashMap; diff --git a/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java b/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java index da376703543..bf55ca2d6e5 100644 --- a/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java +++ b/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java @@ -22,9 +22,7 @@ package org.elasticsearch.cluster.settings; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; /** * @@ -83,6 +81,24 @@ public class SettingsValidatorTests extends ElasticsearchTestCase { assertThat(Validator.POSITIVE_INTEGER.validate("", "0"), notNullValue()); assertThat(Validator.POSITIVE_INTEGER.validate("", "-1"), notNullValue()); assertThat(Validator.POSITIVE_INTEGER.validate("", "10.2"), notNullValue()); + + assertThat(Validator.PERCENTAGE.validate("", "asdasd"), notNullValue()); + assertThat(Validator.PERCENTAGE.validate("", "-1"), notNullValue()); + assertThat(Validator.PERCENTAGE.validate("", "20"), notNullValue()); // we expect 20% + assertThat(Validator.PERCENTAGE.validate("", "-1%"), notNullValue()); + assertThat(Validator.PERCENTAGE.validate("", "101%"), notNullValue()); + assertThat(Validator.PERCENTAGE.validate("", "100%"), nullValue()); + assertThat(Validator.PERCENTAGE.validate("", "99%"), nullValue()); + assertThat(Validator.PERCENTAGE.validate("", "0%"), nullValue()); + + assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "asdasd"), notNullValue()); + assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "20"), nullValue()); + assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "20mb"), nullValue()); + assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "-1%"), notNullValue()); + assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "101%"), notNullValue()); + assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "100%"), nullValue()); + assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "99%"), nullValue()); + assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "0%"), nullValue()); } @Test diff --git a/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index 95223cccb96..0a6d8f980d3 100644 --- a/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -280,6 +280,23 @@ public class TimeZoneRoundingTests extends ElasticsearchTestCase { equalTo(tzRounding.round(time("2014-08-11T17:00:00", JERUSALEM_TIMEZONE)))); } + /** + * test for #10025, strict local to UTC conversion can cause joda exceptions + * on DST start + */ + @Test + public void testLenientConversionDST() { + DateTimeZone tz = DateTimeZone.forID("America/Sao_Paulo"); + long start = time("2014-10-18T20:50:00.000", tz); + long end = time("2014-10-19T01:00:00.000", tz); + Rounding tzRounding = new TimeZoneRounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz); + Rounding dayTzRounding = new TimeZoneRounding.TimeIntervalRounding(60000, tz); + for (long time = start; time < end; time = time + 60000) { + assertThat(tzRounding.nextRoundingValue(time), greaterThan(time)); + assertThat(dayTzRounding.nextRoundingValue(time), greaterThan(time)); + } + } + private DateTimeUnit randomTimeUnit() { byte id = (byte) randomIntBetween(1, 8); return DateTimeUnit.resolve(id); diff --git a/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index 6ca424a27a1..371c5b8e231 100644 --- a/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -19,13 +19,15 @@ package org.elasticsearch.common.unit; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.test.ElasticsearchTestCase; import org.joda.time.PeriodType; import org.junit.Test; +import java.io.IOException; import java.util.concurrent.TimeUnit; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; @@ -66,4 +68,22 @@ public class TimeValueTests extends ElasticsearchTestCase { public void testMinusOne() { assertThat(new TimeValue(-1).nanos(), lessThan(0l)); } + + private void assertEqualityAfterSerialize(TimeValue value) throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + value.writeTo(out); + + BytesStreamInput in = new BytesStreamInput(out.bytes()); + TimeValue inValue = TimeValue.readTimeValue(in); + + assertThat(inValue, equalTo(value)); + } + + @Test + public void testSerialize() throws Exception { + assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS)); + assertEqualityAfterSerialize(new TimeValue(-1)); + assertEqualityAfterSerialize(new TimeValue(1, TimeUnit.NANOSECONDS)); + + } } diff --git a/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java b/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java index 094d3bf70d1..b0e5aa8cec8 100644 --- a/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java +++ b/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java @@ -624,6 +624,7 @@ public class DiscoveryWithServiceDisruptionsTests extends ElasticsearchIntegrati DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); if (!Objects.equals(previousMaster, currentMaster)) { + logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), event.previousState()); String previousMasterNodeName = previousMaster != null ? previousMaster.name() : null; String currentMasterNodeName = currentMaster != null ? currentMaster.name() : null; masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); @@ -647,8 +648,9 @@ public class DiscoveryWithServiceDisruptionsTests extends ElasticsearchIntegrati masterNodeDisruption.startDisrupting(); // Wait for the majority side to get stable - ensureStableCluster(2, majoritySide.get(0)); - ensureStableCluster(2, majoritySide.get(1)); + assertDifferentMaster(majoritySide.get(0), oldMasterNode); + assertDifferentMaster(majoritySide.get(1), oldMasterNode); + assertDiscoveryCompleted(majoritySide); // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, // but will be queued and once the old master node un-freezes it gets executed. @@ -884,7 +886,7 @@ public class DiscoveryWithServiceDisruptionsTests extends ElasticsearchIntegrati public void testClusterFormingWithASlowNode() throws Exception { configureCluster(3, 2); - SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(getRandom(), 0, 0, 5000, 6000); + SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(getRandom(), 0, 0, 1000, 2000); // don't wait for initial state, wat want to add the disruption while the cluster is forming.. internalCluster().startNodesAsync(3, diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java index 2230c5281ed..469da2078e0 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java @@ -19,26 +19,40 @@ package org.elasticsearch.discovery.zen; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.fd.FaultDetection; +import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.*; import org.hamcrest.Matchers; import org.junit.Test; import java.io.IOException; import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.*; @@ -154,4 +168,58 @@ public class ZenDiscoveryTests extends ElasticsearchIntegrationTest { client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // wait for all to be processed assertThat(statesFound, Matchers.hasSize(2)); } + + @Test + public void testNodeRejectsClusterStateWithWrongMasterNode() throws Exception { + Settings settings = ImmutableSettings.builder() + .put("discovery.type", "zen") + .build(); + List nodeNames = internalCluster().startNodesAsync(2, settings).get(); + client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); + + List nonMasterNodes = new ArrayList<>(nodeNames); + nonMasterNodes.remove(internalCluster().getMasterName()); + String noneMasterNode = nonMasterNodes.get(0); + + ClusterState state = internalCluster().getInstance(ClusterService.class).state(); + DiscoveryNode node = null; + for (DiscoveryNode discoveryNode : state.nodes()) { + if (discoveryNode.name().equals(noneMasterNode)) { + node = discoveryNode; + } + } + assert node != null; + + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(state.nodes()) + .put(new DiscoveryNode("abc", new LocalTransportAddress("abc"), Version.CURRENT)).masterNodeId("abc"); + ClusterState.Builder builder = ClusterState.builder(state); + builder.nodes(nodes); + BytesStreamOutput bStream = new BytesStreamOutput(); + StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); + stream.setVersion(node.version()); + ClusterState.Builder.writeTo(builder.build(), stream); + stream.close(); + BytesReference bytes = bStream.bytes(); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference reference = new AtomicReference<>(); + internalCluster().getInstance(TransportService.class, noneMasterNode).sendRequest(node, PublishClusterStateAction.ACTION_NAME, new BytesTransportRequest(bytes, Version.CURRENT), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + + @Override + public void handleResponse(TransportResponse.Empty response) { + super.handleResponse(response); + latch.countDown(); + } + + @Override + public void handleException(TransportException exp) { + super.handleException(exp); + reference.set(exp); + latch.countDown(); + } + }); + latch.await(); + assertThat(reference.get(), notNullValue()); + assertThat(ExceptionsHelper.detailedMessage(reference.get()), containsString("cluster state from a different master then the current one, rejecting ")); + } } diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java index 79cf07f5e07..4d7811fa023 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.zen; +import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -29,7 +30,7 @@ import java.util.LinkedList; import java.util.Queue; import static org.elasticsearch.discovery.zen.ZenDiscovery.ProcessClusterState; -import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreNewClusterState; +import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState; import static org.hamcrest.Matchers.*; import static org.hamcrest.core.IsNull.nullValue; @@ -52,13 +53,13 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { currentState.version(2); newState.version(1); - assertTrue("should ignore, because new state's version is lower to current state's version", shouldIgnoreNewClusterState(logger, currentState.build(), newState.build())); + assertTrue("should ignore, because new state's version is lower to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentState.version(1); newState.version(1); - assertFalse("should not ignore, because new state's version is equal to current state's version", shouldIgnoreNewClusterState(logger, currentState.build(), newState.build())); + assertFalse("should not ignore, because new state's version is equal to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentState.version(1); newState.version(2); - assertFalse("should not ignore, because new state's version is higher to current state's version", shouldIgnoreNewClusterState(logger, currentState.build(), newState.build())); + assertFalse("should not ignore, because new state's version is higher to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentNodes = DiscoveryNodes.builder(); currentNodes.masterNodeId("b"); @@ -71,7 +72,12 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { newState.version(2); } currentState.nodes(currentNodes); - assertTrue("should ignore, because current state's master is not equal to new state's master", shouldIgnoreNewClusterState(logger, currentState.build(), newState.build())); + try { + shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()); + fail("should ignore, because current state's master is not equal to new state's master"); + } catch (ElasticsearchIllegalStateException e) { + assertThat(e.getMessage(), containsString("cluster state from a different master then the current one, rejecting")); + } currentNodes = DiscoveryNodes.builder(); currentNodes.masterNodeId(null); @@ -84,7 +90,7 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { currentState.version(1); newState.version(2); } - assertFalse("should not ignore, because current state doesn't have a master", shouldIgnoreNewClusterState(logger, currentState.build(), newState.build())); + assertFalse("should not ignore, because current state doesn't have a master", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); } public void testSelectNextStateToProcess_empty() { diff --git a/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java b/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java new file mode 100644 index 00000000000..4dd8726a96f --- /dev/null +++ b/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gateway; + +import com.google.common.collect.ImmutableList; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.hamcrest.Matchers; +import org.junit.Test; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + */ +public class DanglingIndicesStateTests extends ElasticsearchTestCase { + + private static Settings indexSettings = ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + @Test + public void testCleanupWhenEmpty() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(ImmutableSettings.EMPTY, env); + DanglingIndicesState danglingState = new DanglingIndicesState(ImmutableSettings.EMPTY, env, metaStateService, null); + + assertTrue(danglingState.getDanglingIndices().isEmpty()); + MetaData metaData = MetaData.builder().build(); + danglingState.cleanupAllocatedDangledIndices(metaData); + assertTrue(danglingState.getDanglingIndices().isEmpty()); + } + } + + @Test + public void testDanglingProcessing() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(ImmutableSettings.EMPTY, env); + DanglingIndicesState danglingState = new DanglingIndicesState(ImmutableSettings.EMPTY, env, metaStateService, null); + + MetaData metaData = MetaData.builder().build(); + + IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(indexSettings).build(); + metaStateService.writeIndex("test_write", dangledIndex, null); + + // check that several runs when not in the metadata still keep the dangled index around + int numberOfChecks = randomIntBetween(1, 10); + for (int i = 0; i < numberOfChecks; i++) { + Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); + assertThat(newDanglingIndices.size(), equalTo(1)); + assertThat(newDanglingIndices.keySet(), Matchers.hasItems("test1")); + assertTrue(danglingState.getDanglingIndices().isEmpty()); + } + + for (int i = 0; i < numberOfChecks; i++) { + danglingState.findNewAndAddDanglingIndices(metaData); + + assertThat(danglingState.getDanglingIndices().size(), equalTo(1)); + assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems("test1")); + } + + // simulate allocation to the metadata + metaData = MetaData.builder(metaData).put(dangledIndex, true).build(); + + // check that several runs when in the metadata, but not cleaned yet, still keeps dangled + for (int i = 0; i < numberOfChecks; i++) { + Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); + assertTrue(newDanglingIndices.isEmpty()); + + assertThat(danglingState.getDanglingIndices().size(), equalTo(1)); + assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems("test1")); + } + + danglingState.cleanupAllocatedDangledIndices(metaData); + assertTrue(danglingState.getDanglingIndices().isEmpty()); + } + } + + @Test + public void testRenameOfIndexState() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(ImmutableSettings.EMPTY, env); + DanglingIndicesState danglingState = new DanglingIndicesState(ImmutableSettings.EMPTY, env, metaStateService, null); + + MetaData metaData = MetaData.builder().build(); + + IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(indexSettings).build(); + metaStateService.writeIndex("test_write", dangledIndex, null); + + for (Path path : env.indexPaths(new Index("test1"))) { + Files.move(path, path.getParent().resolve("test1_renamed")); + } + + Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); + assertThat(newDanglingIndices.size(), equalTo(1)); + assertThat(newDanglingIndices.keySet(), Matchers.hasItems("test1_renamed")); + } + } +} diff --git a/src/test/java/org/elasticsearch/gateway/GatewayIndexStateTests.java b/src/test/java/org/elasticsearch/gateway/GatewayIndexStateTests.java index 9268eeac28f..0cee6e9588c 100644 --- a/src/test/java/org/elasticsearch/gateway/GatewayIndexStateTests.java +++ b/src/test/java/org/elasticsearch/gateway/GatewayIndexStateTests.java @@ -107,6 +107,10 @@ public class GatewayIndexStateTests extends ElasticsearchIntegrationTest { logger.info("--> indexing a simple document"); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet(); + // we need this until we have https://github.com/elasticsearch/elasticsearch/issues/8688 + // the test rarely fails else because the master does not apply the new mapping quick enough and it is lost + waitForConcreteMappingsOnAll("test", "type1", "field1"); + logger.info("--> closing test index..."); client().admin().indices().prepareClose("test").execute().actionGet(); @@ -234,8 +238,7 @@ public class GatewayIndexStateTests extends ElasticsearchIntegrationTest { logger.info("--> cleaning nodes"); logger.info("--> starting 2 nodes"); - internalCluster().startNode(); - internalCluster().startNode(); + internalCluster().startNodesAsync(2).get(); logger.info("--> indexing a simple document"); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet(); @@ -328,19 +331,13 @@ public class GatewayIndexStateTests extends ElasticsearchIntegrationTest { logger.info("--> verifying dangling index contains doc"); assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - } @Test - public void testDanglingIndicesAutoImportYes() throws Exception { - Settings settings = settingsBuilder() - .put(GatewayMetaState.GATEWAY_AUTO_IMPORT_DANGLED, "yes") - .put(GatewayMetaState.GATEWAY_DANGLING_TIMEOUT, randomIntBetween(0, 120)) - .build(); + public void testDanglingIndices() throws Exception { logger.info("--> starting two nodes"); - final String node_1 = internalCluster().startNode(settings); - internalCluster().startNode(settings); + final String node_1 = internalCluster().startNodesAsync(2).get().get(0); logger.info("--> indexing a simple document"); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet(); @@ -386,187 +383,4 @@ public class GatewayIndexStateTests extends ElasticsearchIntegrationTest { logger.info("--> verify the doc is there"); assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); } - - @Test - public void testDanglingIndicesAutoImportClose() throws Exception { - Settings settings = settingsBuilder() - .put("gateway.local.auto_import_dangled", "closed") - .build(); - - - logger.info("--> starting two nodes"); - final String node_1 = internalCluster().startNode(settings); - internalCluster().startNode(settings); - - logger.info("--> indexing a simple document"); - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet(); - - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify 1 doc in the index"); - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l); - } - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - - logger.info("--> restarting the nodes"); - final Gateway gateway1 = internalCluster().getInstance(Gateway.class, node_1); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - if (node_1.equals(nodeName)) { - logger.info("--> deleting the data for the first node"); - gateway1.reset(); - } - return null; - } - }); - - logger.info("--> waiting for green status"); - ensureGreen(); - - // spin a bit waiting for the index to exists - long time = System.currentTimeMillis(); - while ((System.currentTimeMillis() - time) < TimeValue.timeValueSeconds(10).millis()) { - if (client().admin().indices().prepareExists("test").execute().actionGet().isExists()) { - break; - } - } - - logger.info("--> verify that the dangling index exists"); - assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true)); - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify the index state is closed"); - assertThat(client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE)); - logger.info("--> open the index"); - assertAcked(client().admin().indices().prepareOpen("test").get()); - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify the doc is there"); - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - } - - @Test - public void testDanglingIndicesNoAutoImport() throws Exception { - Settings settings = settingsBuilder() - .put("gateway.local.auto_import_dangled", "no") - .build(); - logger.info("--> starting two nodes"); - final String node_1 = internalCluster().startNodesAsync(2, settings).get().get(0); - internalCluster().startNode(settings); - - logger.info("--> indexing a simple document"); - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet(); - - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify 1 doc in the index"); - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 1l); - } - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - - logger.info("--> restarting the nodes"); - final Gateway gateway1 = internalCluster().getInstance(Gateway.class, node_1); - internalCluster().fullRestart(new RestartCallback() { - - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - if (node_1.equals(nodeName)) { - logger.info("--> deleting the data for the first node"); - gateway1.reset(); - } - return null; - } - }); - - logger.info("--> waiting for green status"); - ensureGreen(); - - // we need to wait for the allocate dangled to kick in (even though in this case its disabled) - // just to make sure - Thread.sleep(500); - - logger.info("--> verify that the dangling index does not exists"); - assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(false)); - - logger.info("--> restart start the nodes, but make sure we do recovery only after we have 2 nodes in the cluster"); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - return settingsBuilder().put("gateway.recover_after_nodes", 2).build(); - } - }); - - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify that the dangling index does exists now!"); - assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true)); - logger.info("--> verify the doc is there"); - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - } - - @Test - public void testDanglingIndicesNoAutoImportStillDanglingAndCreatingSameIndex() throws Exception { - Settings settings = settingsBuilder() - .put("gateway.local.auto_import_dangled", "no") - .build(); - - logger.info("--> starting two nodes"); - final String node_1 = internalCluster().startNode(settings); - internalCluster().startNode(settings); - - logger.info("--> indexing a simple document"); - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet(); - - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify 1 doc in the index"); - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l); - } - - logger.info("--> restarting the nodes"); - final Gateway gateway1 = internalCluster().getInstance(Gateway.class, node_1); - internalCluster().fullRestart(new RestartCallback() { - - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - if (node_1.equals(nodeName)) { - logger.info("--> deleting the data for the first node"); - gateway1.reset(); - } - return null; - } - }); - - logger.info("--> waiting for green status"); - ensureGreen(); - - logger.info("--> verify that the dangling index does not exists"); - assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(false)); - - logger.info("--> close the first node, so we remain with the second that has the dangling index"); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_1)); - - logger.info("--> index a different doc"); - client().prepareIndex("test", "type1", "2").setSource("field1", "value2").setRefresh(true).execute().actionGet(); - - logger.info("--> verify that doc 2 does exist"); - assertThat(client().prepareGet("test", "type1", "2").execute().actionGet().isExists(), equalTo(true)); - - // Need an ensure yellow here, since the index gets created (again) when we index doc2, so the shard that doc - // with id 1 is assigned to might not be in a started state. We don't need to do this when verifying if doc 2 - // exists, because we index into the shard that doc gets assigned to. - ensureYellow("test"); - logger.info("--> verify that doc 1 doesn't exist"); - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - } } diff --git a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java index 1ffc0df60ad..e572a2f4f8c 100644 --- a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java +++ b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java @@ -258,7 +258,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { // If the latest version doesn't use the legacy format while previous versions do, then fail hard public void testLatestVersionDoesNotUseLegacy() throws IOException { final ToXContent.Params params = ToXContent.EMPTY_PARAMS; - MetaDataStateFormat format = GatewayMetaState.globalStateFormat(randomFrom(XContentType.values()), params, randomBoolean()); + MetaDataStateFormat format = MetaStateService.globalStateFormat(randomFrom(XContentType.values()), params, randomBoolean()); final Path[] dirs = new Path[2]; dirs[0] = newTempDirPath(LifecycleScope.TEST); dirs[1] = newTempDirPath(LifecycleScope.TEST); @@ -268,14 +268,14 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { final Path dir1 = randomFrom(dirs); final int v1 = randomInt(10); // write a first state file in the new format - format.write(randomMeta(), GatewayMetaState.GLOBAL_STATE_FILE_PREFIX, v1, dir1); + format.write(randomMeta(), MetaStateService.GLOBAL_STATE_FILE_PREFIX, v1, dir1); // write older state files in the old format but with a newer version final int numLegacyFiles = randomIntBetween(1, 5); for (int i = 0; i < numLegacyFiles; ++i) { final Path dir2 = randomFrom(dirs); final int v2 = v1 + 1 + randomInt(10); - try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(GatewayMetaState.GLOBAL_STATE_FILE_PREFIX + v2)))) { + try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(MetaStateService.GLOBAL_STATE_FILE_PREFIX + v2)))) { xcontentBuilder.startObject(); MetaData.Builder.toXContent(randomMeta(), xcontentBuilder, params); xcontentBuilder.endObject(); @@ -283,7 +283,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { } try { - MetaDataStateFormat.loadLatestState(logger, format, GatewayMetaState.GLOBAL_STATE_FILE_PATTERN, "foobar", dirs); + MetaDataStateFormat.loadLatestState(logger, format, MetaStateService.GLOBAL_STATE_FILE_PATTERN, "foobar", dirs); fail("latest version can not be read"); } catch (ElasticsearchIllegalStateException ex) { assertThat(ex.getMessage(), startsWith("Could not find a state file to recover from among ")); @@ -293,7 +293,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { // If both the legacy and the new format are available for the latest version, prefer the new format public void testPrefersNewerFormat() throws IOException { final ToXContent.Params params = ToXContent.EMPTY_PARAMS; - MetaDataStateFormat format = GatewayMetaState.globalStateFormat(randomFrom(XContentType.values()), params, randomBoolean()); + MetaDataStateFormat format = MetaStateService.globalStateFormat(randomFrom(XContentType.values()), params, randomBoolean()); final Path[] dirs = new Path[2]; dirs[0] = newTempDirPath(LifecycleScope.TEST); dirs[1] = newTempDirPath(LifecycleScope.TEST); @@ -310,16 +310,16 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { final Path dir2 = randomFrom(dirs); MetaData meta2 = randomMeta(); assertFalse(meta2.uuid().equals(uuid)); - try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(GatewayMetaState.GLOBAL_STATE_FILE_PREFIX + v)))) { + try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(MetaStateService.GLOBAL_STATE_FILE_PREFIX + v)))) { xcontentBuilder.startObject(); MetaData.Builder.toXContent(randomMeta(), xcontentBuilder, params); xcontentBuilder.endObject(); } // write a second state file in the new format but with the same version - format.write(meta, GatewayMetaState.GLOBAL_STATE_FILE_PREFIX, v, dir1); + format.write(meta, MetaStateService.GLOBAL_STATE_FILE_PREFIX, v, dir1); - MetaData state = MetaDataStateFormat.loadLatestState(logger, format, GatewayMetaState.GLOBAL_STATE_FILE_PATTERN, "foobar", dirs); + MetaData state = MetaDataStateFormat.loadLatestState(logger, format, MetaStateService.GLOBAL_STATE_FILE_PATTERN, "foobar", dirs); assertThat(state.uuid(), equalTo(uuid)); } @@ -334,7 +334,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { meta.add(randomMeta()); } Set corruptedFiles = new HashSet<>(); - MetaDataStateFormat format = GatewayMetaState.globalStateFormat(randomFrom(XContentType.values()), params, randomBoolean()); + MetaDataStateFormat format = MetaStateService.globalStateFormat(randomFrom(XContentType.values()), params, randomBoolean()); for (int i = 0; i < dirs.length; i++) { dirs[i] = newTempDirPath(LifecycleScope.TEST); Files.createDirectories(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME)); @@ -352,7 +352,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { } } for (int j = numLegacy; j < numStates; j++) { - format.write(meta.get(j), GatewayMetaState.GLOBAL_STATE_FILE_PREFIX, j, dirs[i]); + format.write(meta.get(j), MetaStateService.GLOBAL_STATE_FILE_PREFIX, j, dirs[i]); if (randomBoolean() && (j < numStates - 1 || dirs.length > 0 && i != 0)) { // corrupt a file that we do not necessarily need here.... Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + j + ".st"); corruptedFiles.add(file); @@ -363,7 +363,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { } List dirList = Arrays.asList(dirs); Collections.shuffle(dirList, getRandom()); - MetaData loadedMetaData = MetaDataStateFormat.loadLatestState(logger, format, GatewayMetaState.GLOBAL_STATE_FILE_PATTERN, "foobar", dirList.toArray(new Path[0])); + MetaData loadedMetaData = MetaDataStateFormat.loadLatestState(logger, format, MetaStateService.GLOBAL_STATE_FILE_PATTERN, "foobar", dirList.toArray(new Path[0])); MetaData latestMetaData = meta.get(numStates-1); assertThat(loadedMetaData.uuid(), not(equalTo("_na_"))); assertThat(loadedMetaData.uuid(), equalTo(latestMetaData.uuid())); @@ -387,7 +387,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { MetaDataStateFormatTest.corruptFile(file, logger); } try { - MetaDataStateFormat.loadLatestState(logger, format, GatewayMetaState.GLOBAL_STATE_FILE_PATTERN, "foobar", dirList.toArray(new Path[0])); + MetaDataStateFormat.loadLatestState(logger, format, MetaStateService.GLOBAL_STATE_FILE_PATTERN, "foobar", dirList.toArray(new Path[0])); fail("latest version can not be read"); } catch (ElasticsearchException ex) { assertThat(ex.getCause(), instanceOf(CorruptStateException.class)); diff --git a/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java new file mode 100644 index 00000000000..86296541f12 --- /dev/null +++ b/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gateway; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +/** + */ +public class MetaStateServiceTests extends ElasticsearchTestCase { + + private static Settings indexSettings = ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + @Test + public void testWriteLoadIndex() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(randomSettings(), env); + + IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build(); + metaStateService.writeIndex("test_write", index, null); + assertThat(metaStateService.loadIndexState("test1"), equalTo(index)); + } + } + + @Test + public void testLoadMissingIndex() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(randomSettings(), env); + assertThat(metaStateService.loadIndexState("test1"), nullValue()); + } + } + + @Test + public void testWriteLoadGlobal() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(randomSettings(), env); + + MetaData metaData = MetaData.builder() + .persistentSettings(ImmutableSettings.builder().put("test1", "value1").build()) + .build(); + metaStateService.writeGlobalState("test_write", metaData); + assertThat(metaStateService.loadGlobalState().persistentSettings(), equalTo(metaData.persistentSettings())); + } + } + + @Test + public void testWriteGlobalStateWithIndexAndNoIndexIsLoaded() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(randomSettings(), env); + + MetaData metaData = MetaData.builder() + .persistentSettings(ImmutableSettings.builder().put("test1", "value1").build()) + .build(); + IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build(); + MetaData metaDataWithIndex = MetaData.builder(metaData).put(index, true).build(); + + metaStateService.writeGlobalState("test_write", metaDataWithIndex); + assertThat(metaStateService.loadGlobalState().persistentSettings(), equalTo(metaData.persistentSettings())); + assertThat(metaStateService.loadGlobalState().hasIndex("test1"), equalTo(false)); + } + } + + @Test + public void tesLoadGlobal() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(randomSettings(), env); + + IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build(); + MetaData metaData = MetaData.builder() + .persistentSettings(ImmutableSettings.builder().put("test1", "value1").build()) + .put(index, true) + .build(); + + metaStateService.writeGlobalState("test_write", metaData); + metaStateService.writeIndex("test_write", index, null); + + MetaData loadedState = metaStateService.loadFullState(); + assertThat(loadedState.persistentSettings(), equalTo(metaData.persistentSettings())); + assertThat(loadedState.hasIndex("test1"), equalTo(true)); + assertThat(loadedState.index("test1"), equalTo(index)); + } + } + + private Settings randomSettings() { + ImmutableSettings.Builder builder = ImmutableSettings.builder(); + if (randomBoolean()) { + builder.put(MetaStateService.FORMAT_SETTING, randomXContentType().shortName()); + } + return builder.build(); + } +} diff --git a/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java b/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java index 32c96f3d09e..fe11f3459ec 100644 --- a/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java +++ b/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java @@ -296,6 +296,9 @@ public class RecoveryFromGatewayTests extends ElasticsearchIntegrationTest { if (numNodes == 1) { logger.info("--> one node is closed - start indexing data into the second one"); client.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute().actionGet(); + // TODO: remove once refresh doesn't fail immediately if there a master block: + // https://github.com/elasticsearch/elasticsearch/issues/9997 + client.admin().cluster().prepareHealth("test").setWaitForYellowStatus().get(); client.admin().indices().prepareRefresh().execute().actionGet(); for (int i = 0; i < 10; i++) { diff --git a/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java b/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java index c9bdc514396..db23b5aeec5 100644 --- a/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java +++ b/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.index; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -28,19 +30,26 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShadowIndexShard; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.InternalTestCluster; import org.junit.Test; import java.nio.file.Path; import java.util.List; +import java.util.concurrent.ExecutionException; import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; /** @@ -49,7 +58,69 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; @ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0) public class IndexWithShadowReplicasTests extends ElasticsearchIntegrationTest { - @Test + /** + * Tests the case where we create an index without shadow replicas, snapshot it and then restore into + * an index with shadow replicas enabled. + */ + public void testRestoreToShadow() throws ExecutionException, InterruptedException { + Settings nodeSettings = ImmutableSettings.builder() + .put("node.add_id_to_custom_path", false) + .put("node.enable_custom_paths", true) + .build(); + + internalCluster().startNodesAsync(3, nodeSettings).get(); + final Path dataPath = newTempDirPath(); + Settings idxSettings = ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build(); + assertAcked(prepareCreate("foo").setSettings(idxSettings)); + ensureGreen(); + final int numDocs = randomIntBetween(10, 100); + for (int i = 0; i < numDocs; i++) { + client().prepareIndex("foo", "doc", ""+i).setSource("foo", "bar").get(); + } + assertNoFailures(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet()); + + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs").setSettings(ImmutableSettings.settingsBuilder() + .put("location", newTempDirPath()))); + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("foo").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); + + Settings shadowSettings = ImmutableSettings.builder() + .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) + .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) + .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2).build(); + + logger.info("--> restore the index into shadow replica index"); + RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setIndexSettings(shadowSettings).setWaitForCompletion(true) + .setRenamePattern("(.+)").setRenameReplacement("$1-copy") + .execute().actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(); + refresh(); + + for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) { + if (service.hasIndex("foo-copy")) { + IndexShard shard = service.indexServiceSafe("foo-copy").shard(0); + if (shard.routingEntry().primary()) { + assertFalse(shard instanceof ShadowIndexShard); + } else { + assertTrue(shard instanceof ShadowIndexShard); + } + } + } + logger.info("--> performing query"); + SearchResponse resp = client().prepareSearch("foo-copy").setQuery(matchAllQuery()).get(); + assertHitCount(resp, numDocs); + + } + + @Test public void testIndexWithFewDocuments() throws Exception { Settings nodeSettings = ImmutableSettings.builder() .put("node.add_id_to_custom_path", false) diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java index ff0c025ca09..8d3e7280c26 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java @@ -19,13 +19,13 @@ package org.elasticsearch.index.engine; import org.apache.lucene.index.LiveIndexWriterConfig; +import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.test.ElasticsearchSingleNodeTest; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { @@ -41,18 +41,24 @@ public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { assertThat(engine.getCurrentIndexWriterConfig().getUseCompoundFile(), is(true)); + // VERSION MAP SIZE + long indexBufferSize = engine.config().getIndexingBufferSize().bytes(); + long versionMapSize = engine.config().getVersionMapSize().bytes(); + assertThat(versionMapSize, equalTo((long) (indexBufferSize * 0.25))); + final int iters = between(1, 20); for (int i = 0; i < iters; i++) { boolean compoundOnFlush = randomBoolean(); - boolean failOnCorruption = randomBoolean(); - boolean failOnMerge = randomBoolean(); long gcDeletes = Math.max(0, randomLong()); + boolean versionMapAsPercent = randomBoolean(); + double versionMapPercent = randomIntBetween(0, 100); + long versionMapSizeInMB = randomIntBetween(10, 20); + String versionMapString = versionMapAsPercent ? versionMapPercent + "%" : versionMapSizeInMB + "mb"; Settings build = ImmutableSettings.builder() - .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, failOnCorruption) .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush) .put(EngineConfig.INDEX_GC_DELETES_SETTING, gcDeletes) - .put(EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING, failOnMerge) + .put(EngineConfig.INDEX_VERSION_MAP_SIZE, versionMapString) .build(); client().admin().indices().prepareUpdateSettings("foo").setSettings(build).get(); @@ -63,9 +69,14 @@ public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { assertEquals(engine.config().getGcDeletesInMillis(), gcDeletes); assertEquals(engine.getGcDeletesInMillis(), gcDeletes); - assertEquals(engine.config().isFailEngineOnCorruption(), failOnCorruption); - assertEquals(engine.config().isFailOnMergeFailure(), failOnMerge); // only on the holder + indexBufferSize = engine.config().getIndexingBufferSize().bytes(); + versionMapSize = engine.config().getVersionMapSize().bytes(); + if (versionMapAsPercent) { + assertThat(versionMapSize, equalTo((long) (indexBufferSize * (versionMapPercent / 100)))); + } else { + assertThat(versionMapSize, equalTo(1024 * 1024 * versionMapSizeInMB)); + } } Settings settings = ImmutableSettings.builder() @@ -91,6 +102,35 @@ public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { assertEquals(engine.getGcDeletesInMillis(), 1000); assertTrue(engine.config().isEnableGcDeletes()); + settings = ImmutableSettings.builder() + .put(EngineConfig.INDEX_VERSION_MAP_SIZE, "sdfasfd") + .build(); + try { + client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get(); + fail("settings update didn't fail, but should have"); + } catch (ElasticsearchIllegalArgumentException e) { + // good + } + + settings = ImmutableSettings.builder() + .put(EngineConfig.INDEX_VERSION_MAP_SIZE, "-12%") + .build(); + try { + client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get(); + fail("settings update didn't fail, but should have"); + } catch (ElasticsearchIllegalArgumentException e) { + // good + } + + settings = ImmutableSettings.builder() + .put(EngineConfig.INDEX_VERSION_MAP_SIZE, "130%") + .build(); + try { + client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get(); + fail("settings update didn't fail, but should have"); + } catch (ElasticsearchIllegalArgumentException e) { + // good + } } diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 30503c52ea0..1a5bf489cfa 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -32,6 +32,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; @@ -73,7 +74,6 @@ import org.elasticsearch.index.translog.TranslogSizeMatcher; import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ElasticsearchLuceneTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; import org.junit.After; @@ -84,9 +84,11 @@ import java.io.IOException; import java.nio.file.Paths; import java.util.Arrays; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static com.carrotsearch.randomizedtesting.RandomizedTest.*; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; import static org.apache.lucene.util.AbstractRandomizedTest.CHILD_JVM_ID; import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; @@ -95,7 +97,6 @@ import static org.elasticsearch.test.ElasticsearchTestCase.assertBusy; import static org.elasticsearch.test.ElasticsearchTestCase.terminate; import static org.hamcrest.Matchers.*; -@TestLogging("index.translog:TRACE") public class InternalEngineTests extends ElasticsearchLuceneTestCase { public static final String TRANSLOG_PRIMARY_LOCATION = "work/fs-translog/JVM_" + CHILD_JVM_ID + "/primary"; @@ -147,7 +148,6 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { defaultSettings = ImmutableSettings.builder() .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, randomBoolean()) .put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us - .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, randomBoolean()) .put(EngineConfig.INDEX_CODEC_SETTING, codecName) .put(EngineConfig.INDEX_CONCURRENCY_SETTING, indexConcurrency) .build(); // TODO randomize more settings @@ -681,7 +681,6 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, false); engine.create(new Engine.Create(null, newUid("1"), doc)); engine.flush(); - final boolean failEngine = defaultSettings.getAsBoolean(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, false); final int failInPhase = randomIntBetween(1, 3); try { engine.recover(new Engine.RecoveryHandler() { @@ -724,9 +723,9 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 2)); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(2)); searchResult.close(); - assertThat(failEngine, is(false)); + fail("engine should have failed"); } catch (EngineClosedException ex) { - assertThat(failEngine, is(true)); + // expected } } @@ -1007,6 +1006,97 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { } } + public void testForceMerge() { + int numDocs = randomIntBetween(10, 100); + for (int i=0; i < numDocs; i++) { + ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), B_1, false); + Engine.Index index = new Engine.Index(null, newUid(Integer.toString(i)), doc); + engine.index(index); + engine.refresh("test"); + } + try (Engine.Searcher test = engine.acquireSearcher("test")) { + assertEquals(numDocs, test.reader().numDocs()); + } + engine.forceMerge(true, 1, false, false); + assertEquals(engine.segments(true).size(), 1); + + ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), "test", null, -1, -1, testDocument(), B_1, false); + Engine.Index index = new Engine.Index(null, newUid(Integer.toString(0)), doc); + engine.delete(new Engine.Delete(index.type(), index.id(), index.uid())); + engine.forceMerge(true, 10, true, false); //expunge deletes + + assertEquals(engine.segments(true).size(), 1); + try (Engine.Searcher test = engine.acquireSearcher("test")) { + assertEquals(numDocs-1, test.reader().numDocs()); + assertEquals(numDocs-1, test.reader().maxDoc()); + } + + doc = testParsedDocument(Integer.toString(1), Integer.toString(1), "test", null, -1, -1, testDocument(), B_1, false); + index = new Engine.Index(null, newUid(Integer.toString(1)), doc); + engine.delete(new Engine.Delete(index.type(), index.id(), index.uid())); + engine.forceMerge(true, 10, false, false); //expunge deletes + + assertEquals(engine.segments(true).size(), 1); + try (Engine.Searcher test = engine.acquireSearcher("test")) { + assertEquals(numDocs-2, test.reader().numDocs()); + assertEquals(numDocs-1, test.reader().maxDoc()); + } + } + + public void testForceMergeAndClose() throws IOException, InterruptedException { + int numIters = randomIntBetween(2, 10); + for (int j = 0; j < numIters; j++) { + try (Store store = createStore()) { + final Translog translog = createTranslog(); + final InternalEngine engine = createEngine(store, translog); + final CountDownLatch startGun = new CountDownLatch(1); + final CountDownLatch indexed = new CountDownLatch(1); + + Thread thread = new Thread() { + public void run() { + try { + try { + startGun.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + int i = 0; + while (true) { + int numDocs = randomIntBetween(1, 20); + for (int j = 0; j < numDocs; j++) { + i++; + ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), B_1, false); + Engine.Index index = new Engine.Index(null, newUid(Integer.toString(i)), doc); + engine.index(index); + } + engine.refresh("test"); + indexed.countDown(); + try { + engine.forceMerge(randomBoolean(), 1, false, randomBoolean()); + } catch (ForceMergeFailedEngineException ex) { + // ok + return; + } + } + } catch (AlreadyClosedException | EngineClosedException ex) { + // fine + } + } + }; + + thread.start(); + startGun.countDown(); + int someIters = randomIntBetween(1, 10); + for (int i = 0; i < someIters; i++) { + engine.forceMerge(randomBoolean(), 1, false, randomBoolean()); + } + indexed.await(); + IOUtils.close(engine, translog); + } + } + + } + @Test public void testVersioningDeleteConflict() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, false); @@ -1466,7 +1556,6 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { assertEquals(store.refCount(), refCount); continue; } - holder.config().setFailEngineOnCorruption(true); assertEquals(store.refCount(), refCount + 1); final int numStarts = scaledRandomIntBetween(1, 5); for (int j = 0; j < numStarts; j++) { @@ -1474,7 +1563,6 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { assertEquals(store.refCount(), refCount + 1); holder.close(); holder = createEngine(store, translog); - holder.config().setFailEngineOnCorruption(true); assertEquals(store.refCount(), refCount + 1); } catch (EngineCreationFailureException ex) { // all is fine @@ -1502,10 +1590,8 @@ public class InternalEngineTests extends ElasticsearchLuceneTestCase { IndexDynamicSettingsModule settings = new IndexDynamicSettingsModule(); - assertTrue(settings.containsSetting(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING)); assertTrue(settings.containsSetting(EngineConfig.INDEX_COMPOUND_ON_FLUSH)); assertTrue(settings.containsSetting(EngineConfig.INDEX_GC_DELETES_SETTING)); - assertTrue(settings.containsSetting(EngineConfig.INDEX_FAIL_ON_MERGE_FAILURE_SETTING)); } @Test diff --git a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 08178714fcb..ab09e517645 100644 --- a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -121,7 +121,6 @@ public class ShadowEngineTests extends ElasticsearchLuceneTestCase { defaultSettings = ImmutableSettings.builder() .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, randomBoolean()) .put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us - .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, randomBoolean()) .put(EngineConfig.INDEX_CODEC_SETTING, codecName) .put(EngineConfig.INDEX_CONCURRENCY_SETTING, indexConcurrency) .build(); // TODO randomize more settings @@ -893,7 +892,6 @@ public class ShadowEngineTests extends ElasticsearchLuceneTestCase { assertEquals(store.refCount(), refCount); continue; } - holder.config().setFailEngineOnCorruption(true); assertEquals(store.refCount(), refCount+1); final int numStarts = scaledRandomIntBetween(1, 5); for (int j = 0; j < numStarts; j++) { @@ -901,7 +899,6 @@ public class ShadowEngineTests extends ElasticsearchLuceneTestCase { assertEquals(store.refCount(), refCount + 1); holder.close(); holder = createShadowEngine(store, translog); - holder.config().setFailEngineOnCorruption(true); assertEquals(store.refCount(), refCount + 1); } catch (EngineCreationFailureException ex) { // all is fine diff --git a/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java b/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java index cdb48cb810a..e82be52f061 100644 --- a/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java +++ b/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.search.suggest.SuggestBuilders; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -90,7 +91,7 @@ public class TransformOnIndexMapperIntegrationTest extends ElasticsearchIntegrat builder.endObject(); builder.startObject("transform"); builder.field("script", "ctx._source.suggest = ['input': ctx._source.text];ctx._source.suggest.payload = ['display': ctx._source.text, 'display_detail': 'on the fly']"); - builder.field("lang", "groovy"); + builder.field("lang", GroovyScriptEngineService.NAME); builder.endObject(); assertAcked(client().admin().indices().prepareCreate("test").addMapping("test", builder)); // Payload is stored using original source format (json, smile, yaml, whatever) @@ -127,7 +128,7 @@ public class TransformOnIndexMapperIntegrationTest extends ElasticsearchIntegrat // Single transform builder.startObject(); buildTransformScript(builder); - builder.field("lang", "groovy"); + builder.field("lang", GroovyScriptEngineService.NAME); builder.endObject(); } else { // Multiple transforms @@ -141,7 +142,7 @@ public class TransformOnIndexMapperIntegrationTest extends ElasticsearchIntegrat } else { builder.field("script", "true"); } - builder.field("lang", "groovy"); + builder.field("lang", GroovyScriptEngineService.NAME); builder.endObject(); } builder.endArray(); diff --git a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java index 7c2575c341e..ba8dd76296c 100644 --- a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java @@ -20,14 +20,20 @@ package org.elasticsearch.index.mapper.ip; import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.bootstrap.Elasticsearch; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; -import static org.hamcrest.Matchers.*; +import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; /** * @@ -77,4 +83,45 @@ public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest { } } + @Test + public void testIgnoreMalformedOption() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field1") + .field("type", "ip").field("ignore_malformed", true).endObject().startObject("field2").field("type", "ip") + .field("ignore_malformed", false).endObject().startObject("field3").field("type", "ip").endObject().endObject().endObject() + .endObject().string(); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + + ParsedDocument doc = defaultMapper.parse("type", "1", + XContentFactory.jsonBuilder().startObject().field("field1", "").field("field2", "10.20.30.40").endObject().bytes()); + assertThat(doc.rootDoc().getField("field1"), nullValue()); + assertThat(doc.rootDoc().getField("field2"), notNullValue()); + + try { + defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field2", "").endObject().bytes()); + } catch (MapperParsingException e) { + assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + } + + // Verify that the default is false + try { + defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field3", "").endObject().bytes()); + } catch (MapperParsingException e) { + assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + } + + // Unless the global ignore_malformed option is set to true + Settings indexSettings = settingsBuilder().put("index.mapping.ignore_malformed", true).build(); + defaultMapper = createIndex("test2", indexSettings).mapperService().documentMapperParser().parse(mapping); + doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field3", "").endObject().bytes()); + assertThat(doc.rootDoc().getField("field3"), nullValue()); + + // This should still throw an exception, since field2 is specifically set to ignore_malformed=false + try { + defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field2", "").endObject().bytes()); + } catch (MapperParsingException e) { + assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + } + } + } diff --git a/src/test/java/org/elasticsearch/index/query/TemplateQueryTest.java b/src/test/java/org/elasticsearch/index/query/TemplateQueryTest.java index 10a61d5f7bd..4a66b61fe44 100644 --- a/src/test/java/org/elasticsearch/index/query/TemplateQueryTest.java +++ b/src/test/java/org/elasticsearch/index/query/TemplateQueryTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; import com.google.common.collect.Maps; - import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptResponse; import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptResponse; @@ -31,6 +30,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Before; import org.junit.Test; @@ -43,9 +43,7 @@ import java.util.Map; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -230,7 +228,7 @@ public class TemplateQueryTest extends ElasticsearchIntegrationTest { createIndex(ScriptService.SCRIPT_INDEX); ensureGreen(ScriptService.SCRIPT_INDEX); - PutIndexedScriptResponse scriptResponse = client().preparePutIndexedScript("mustache", "testTemplate", "{" + + PutIndexedScriptResponse scriptResponse = client().preparePutIndexedScript(MustacheScriptEngineService.NAME, "testTemplate", "{" + "\"template\":{" + " \"query\":{" + " \"match\":{" + @@ -241,7 +239,7 @@ public class TemplateQueryTest extends ElasticsearchIntegrationTest { assertTrue(scriptResponse.isCreated()); - scriptResponse = client().preparePutIndexedScript("mustache", "testTemplate", "{" + + scriptResponse = client().preparePutIndexedScript(MustacheScriptEngineService.NAME, "testTemplate", "{" + "\"template\":{" + " \"query\":{" + " \"match\":{" + @@ -252,7 +250,7 @@ public class TemplateQueryTest extends ElasticsearchIntegrationTest { assertEquals(scriptResponse.getVersion(), 2); - GetIndexedScriptResponse getResponse = client().prepareGetIndexedScript("mustache", "testTemplate").get(); + GetIndexedScriptResponse getResponse = client().prepareGetIndexedScript(MustacheScriptEngineService.NAME, "testTemplate").get(); assertTrue(getResponse.isExists()); List builders = new ArrayList<>(); @@ -272,10 +270,10 @@ public class TemplateQueryTest extends ElasticsearchIntegrationTest { setTemplateName("testTemplate").setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get(); assertHitCount(searchResponse, 4); - DeleteIndexedScriptResponse deleteResponse = client().prepareDeleteIndexedScript("mustache","testTemplate").get(); + DeleteIndexedScriptResponse deleteResponse = client().prepareDeleteIndexedScript(MustacheScriptEngineService.NAME,"testTemplate").get(); assertTrue(deleteResponse.isFound()); - getResponse = client().prepareGetIndexedScript("mustache", "testTemplate").get(); + getResponse = client().prepareGetIndexedScript(MustacheScriptEngineService.NAME, "testTemplate").get(); assertFalse(getResponse.isExists()); client().prepareSearch("test").setTypes("type"). @@ -287,7 +285,7 @@ public class TemplateQueryTest extends ElasticsearchIntegrationTest { createIndex(ScriptService.SCRIPT_INDEX); ensureGreen(ScriptService.SCRIPT_INDEX); List builders = new ArrayList<>(); - builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, "mustache", "1a").setSource("{" + + builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "1a").setSource("{" + "\"template\":{"+ " \"query\":{" + " \"match\":{" + @@ -295,7 +293,7 @@ public class TemplateQueryTest extends ElasticsearchIntegrationTest { " }" + "}" + "}")); - builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, "mustache", "2").setSource("{" + + builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "2").setSource("{" + "\"template\":{"+ " \"query\":{" + " \"match\":{" + @@ -304,7 +302,7 @@ public class TemplateQueryTest extends ElasticsearchIntegrationTest { "}" + "}")); - builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, "mustache", "3").setSource("{" + + builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "3").setSource("{" + "\"template\":{"+ " \"match\":{" + " \"theField\" : \"{{fieldParam}}\"}" + @@ -381,7 +379,7 @@ public class TemplateQueryTest extends ElasticsearchIntegrationTest { String multiQuery = "{\"query\":{\"terms\":{\"theField\":[\"{{#fieldParam}}\",\"{{.}}\",\"{{/fieldParam}}\"]}}}"; - builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, "mustache", "4").setSource(jsonBuilder().startObject().field("template", multiQuery).endObject())); + builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "4").setSource(jsonBuilder().startObject().field("template", multiQuery).endObject())); indexRandom(true,builders); diff --git a/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java b/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java index dbb6ede06d4..a7365c4b59d 100644 --- a/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java +++ b/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.store; import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists; import com.carrotsearch.randomizedtesting.LifecycleScope; -import com.carrotsearch.randomizedtesting.annotations.Repeat; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.google.common.base.Charsets; import com.google.common.base.Predicate; @@ -51,7 +50,6 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.merge.policy.MergePolicyModule; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.IndexShard; @@ -123,19 +121,13 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest { } assertThat(cluster().numDataNodes(), greaterThanOrEqualTo(3)); - final boolean failOnCorruption = randomBoolean(); assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1") .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class) .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose - .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, failOnCorruption) .put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, true) // no translog based flush - it might change the .liv / segments.N files .put("indices.recovery.concurrent_streams", 10) )); - if (failOnCorruption == false) { // test the dynamic setting - client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder() - .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, true)).get(); - } ensureGreen(); disableAllocation("test"); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; @@ -238,7 +230,6 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class) .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose - .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, true) .put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, true) // no translog based flush - it might change the .liv / segments.N files .put("indices.recovery.concurrent_streams", 10) )); @@ -323,7 +314,6 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, true) .put("index.routing.allocation.include._name", primariesNode.getNode().name()) .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) @@ -387,7 +377,6 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, between(1, 4)) // don't go crazy here it must recovery fast - .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, true) // This does corrupt files on the replica, so we can't check: .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) .put("index.routing.allocation.include._name", primariesNode.getNode().name()) @@ -415,7 +404,7 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; if (truncate && req.length() > 1) { BytesArray array = new BytesArray(req.content().array(), req.content().arrayOffset(), (int) req.length() - 1); - request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk()); + request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps()); } else { byte[] array = req.content().array(); int i = randomIntBetween(0, req.content().length() - 1); @@ -472,7 +461,6 @@ public class CorruptedFileTest extends ElasticsearchIntegrationTest { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class) .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose - .put(EngineConfig.INDEX_FAIL_ON_CORRUPTION_SETTING, true) .put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, true) // no translog based flush - it might change the .liv / segments.N files .put("indices.recovery.concurrent_streams", 10) )); diff --git a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java index 601aaf9bf2b..20af2562fed 100644 --- a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java @@ -25,11 +25,12 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.index.Index; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ElasticsearchTestCase; -import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; @@ -50,8 +51,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import static com.google.common.collect.Lists.newArrayList; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.*; /** * @@ -111,32 +111,32 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase @Test public void testTransientTranslog() throws IOException { Translog.Snapshot snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); snapshot.close(); translog.add(new Translog.Create("test", "1", new byte[]{1})); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); snapshot.close(); translog.newTransientTranslog(2); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); snapshot.close(); translog.add(new Translog.Index("test", "2", new byte[]{2})); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(2)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(2)); assertThat(snapshot.estimatedTotalOperations(), equalTo(2)); snapshot.close(); translog.makeTransientCurrent(); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); // now its one, since it only includes "2" + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); // now its one, since it only includes "2" assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); snapshot.close(); } @@ -144,30 +144,30 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase @Test public void testSimpleOperations() throws IOException { Translog.Snapshot snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); snapshot.close(); translog.add(new Translog.Create("test", "1", new byte[]{1})); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); snapshot.close(); translog.add(new Translog.Index("test", "2", new byte[]{2})); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(2)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(2)); assertThat(snapshot.estimatedTotalOperations(), equalTo(2)); snapshot.close(); translog.add(new Translog.Delete(newUid("3"))); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(3)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(3)); assertThat(snapshot.estimatedTotalOperations(), equalTo(3)); snapshot.close(); translog.add(new Translog.DeleteByQuery(new BytesArray(new byte[]{4}), null)); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(4)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(4)); assertThat(snapshot.estimatedTotalOperations(), equalTo(4)); snapshot.close(); @@ -198,7 +198,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase assertThat(translog.currentId(), Matchers.not(equalTo(firstId))); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); assertThat(snapshot.estimatedTotalOperations(), equalTo(0)); snapshot.close(); } @@ -212,15 +212,66 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase } } + protected TranslogStats stats() throws IOException { + // force flushing and updating of stats + translog.sync(); + TranslogStats stats = translog.stats(); + if (randomBoolean()) { + BytesStreamOutput out = new BytesStreamOutput(); + stats.writeTo(out); + BytesStreamInput in = new BytesStreamInput(out.bytes()); + stats = new TranslogStats(); + stats.readFrom(in); + } + return stats; + } + + @Test + public void testStats() throws IOException { + TranslogStats stats = stats(); + assertThat(stats.estimatedNumberOfOperations(), equalTo(0l)); + long lastSize = stats.translogSizeInBytes().bytes(); + assertThat(lastSize, equalTo(17l)); + + translog.add(new Translog.Create("test", "1", new byte[]{1})); + stats = stats(); + assertThat(stats.estimatedNumberOfOperations(), equalTo(1l)); + assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); + lastSize = stats.translogSizeInBytes().bytes(); + + translog.add(new Translog.Index("test", "2", new byte[]{2})); + stats = stats(); + assertThat(stats.estimatedNumberOfOperations(), equalTo(2l)); + assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); + lastSize = stats.translogSizeInBytes().bytes(); + + translog.add(new Translog.Delete(newUid("3"))); + stats = stats(); + assertThat(stats.estimatedNumberOfOperations(), equalTo(3l)); + assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); + lastSize = stats.translogSizeInBytes().bytes(); + + + translog.add(new Translog.DeleteByQuery(new BytesArray(new byte[]{4}), null)); + stats = stats(); + assertThat(stats.estimatedNumberOfOperations(), equalTo(4l)); + assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize)); + + translog.newTranslog(2); + stats = stats(); + assertThat(stats.estimatedNumberOfOperations(), equalTo(0l)); + assertThat(stats.translogSizeInBytes().bytes(), equalTo(17l)); + } + @Test public void testSnapshot() { Translog.Snapshot snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); snapshot.close(); translog.add(new Translog.Create("test", "1", new byte[]{1})); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); snapshot.close(); @@ -231,7 +282,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase snapshot.close(); Translog.Snapshot snapshot1 = translog.snapshot(); - MatcherAssert.assertThat(snapshot1, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot1, TranslogSizeMatcher.translogSize(1)); assertThat(snapshot1.estimatedTotalOperations(), equalTo(1)); // seek to the end of the translog snapshot @@ -241,7 +292,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase translog.add(new Translog.Index("test", "2", new byte[]{2})); snapshot = translog.snapshot(snapshot1); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); assertThat(snapshot.estimatedTotalOperations(), equalTo(2)); snapshot.close(); @@ -258,7 +309,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase @Test public void testSnapshotWithNewTranslog() throws IOException { Translog.Snapshot snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); snapshot.close(); translog.add(new Translog.Create("test", "1", new byte[]{1})); @@ -271,7 +322,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase translog.add(new Translog.Index("test", "3", new byte[]{3})); snapshot = translog.snapshot(actualSnapshot); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); snapshot.close(); snapshot = translog.snapshot(actualSnapshot); @@ -289,22 +340,22 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase translog.add(new Translog.Create("test", "1", new byte[]{1})); Translog.Snapshot firstSnapshot = translog.snapshot(); - MatcherAssert.assertThat(firstSnapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(firstSnapshot, TranslogSizeMatcher.translogSize(1)); assertThat(firstSnapshot.estimatedTotalOperations(), equalTo(1)); translog.newTransientTranslog(2); assertFileIsPresent(translog, 1); translog.add(new Translog.Index("test", "2", new byte[]{2})); - MatcherAssert.assertThat(firstSnapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(firstSnapshot, TranslogSizeMatcher.translogSize(1)); assertThat(firstSnapshot.estimatedTotalOperations(), equalTo(1)); if (randomBoolean()) { translog.clearUnreferenced(); } translog.makeTransientCurrent(); - Translog.Snapshot secondSnapshot = translog.snapshot(); + Translog.Snapshot secondSnapshot = translog.snapshot(); translog.add(new Translog.Index("test", "3", new byte[]{3})); - MatcherAssert.assertThat(secondSnapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(secondSnapshot, TranslogSizeMatcher.translogSize(1)); assertThat(secondSnapshot.estimatedTotalOperations(), equalTo(1)); assertFileIsPresent(translog, 1); assertFileIsPresent(translog, 2); @@ -337,7 +388,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase public void assertFileIsPresent(Translog translog, long id) { for (Path location : translog.locations()) { - if(Files.exists(location.resolve(translog.getPath(id)))) { + if (Files.exists(location.resolve(translog.getPath(id)))) { return; } } @@ -353,12 +404,12 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase @Test public void testSnapshotWithSeekTo() { Translog.Snapshot snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(0)); snapshot.close(); translog.add(new Translog.Create("test", "1", new byte[]{1})); snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); // seek to the end of the translog snapshot while (snapshot.next() != null) { // spin @@ -369,7 +420,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase translog.add(new Translog.Create("test", "2", new byte[]{1})); snapshot = translog.snapshot(); snapshot.seekTo(lastPosition); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); snapshot.close(); snapshot = translog.snapshot(); @@ -584,8 +635,8 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase int corruptions = scaledRandomIntBetween(10, 50); for (int i = 0; i < corruptions; i++) { // note: with the current logic, this will sometimes be a no-op - long pos = randomIntBetween(0, (int)f.size()); - ByteBuffer junk = ByteBuffer.wrap(new byte[] { randomByte() }); + long pos = randomIntBetween(0, (int) f.size()); + ByteBuffer junk = ByteBuffer.wrap(new byte[]{randomByte()}); f.write(junk, pos); } f.close(); @@ -603,7 +654,7 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase assertTrue(Files.exists(path.resolve("translog-1"))); translog.add(new Translog.Create("test", "1", new byte[]{1})); Translog.Snapshot snapshot = translog.snapshot(); - MatcherAssert.assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); + assertThat(snapshot, TranslogSizeMatcher.translogSize(1)); assertThat(snapshot.estimatedTotalOperations(), equalTo(1)); if (randomBoolean()) { translog.close(); diff --git a/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java b/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java index 00cca91b422..a24352ff0ea 100644 --- a/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java +++ b/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java @@ -70,7 +70,7 @@ public class IndicesServiceTest extends ElasticsearchSingleNodeTest { assertTrue(test.hasShard(0)); try { - indicesService.deleteIndexStore("boom", firstMetaData); + indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); fail(); } catch (ElasticsearchIllegalStateException ex) { // all good @@ -100,7 +100,7 @@ public class IndicesServiceTest extends ElasticsearchSingleNodeTest { } try { - indicesService.deleteIndexStore("boom", secondMetaData); + indicesService.deleteIndexStore("boom", secondMetaData, clusterService.state()); fail(); } catch (ElasticsearchIllegalStateException ex) { // all good @@ -112,7 +112,7 @@ public class IndicesServiceTest extends ElasticsearchSingleNodeTest { // now delete the old one and make sure we resolve against the name try { - indicesService.deleteIndexStore("boom", firstMetaData); + indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); fail(); } catch (ElasticsearchIllegalStateException ex) { // all good diff --git a/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java b/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java index 75f8cf25884..311449aaece 100644 --- a/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java +++ b/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.memory.breaker; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -107,6 +108,7 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { } @Test + @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/8710") public void testMemoryBreaker() throws Exception { if (noopBreakerUsed()) { logger.info("--> noop breakers used, skipping test"); @@ -152,6 +154,7 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { } @Test + @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9270") public void testRamAccountingTermsEnum() throws Exception { if (noopBreakerUsed()) { logger.info("--> noop breakers used, skipping test"); diff --git a/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java b/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java index 12bf29aecf7..5d96a4d8f2e 100644 --- a/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java +++ b/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java @@ -343,19 +343,33 @@ public class RecoveryStateTest extends ElasticsearchTestCase { // we don't need to test the time aspect, it's done in the timer test translog.start(); - assertThat(translog.currentTranslogOperations(), equalTo(0)); + assertThat(translog.recoveredOperations(), equalTo(0)); + assertThat(translog.totalOperations(), equalTo(Translog.UNKNOWN)); + assertThat(translog.totalOperationsOnStart(), equalTo(Translog.UNKNOWN)); streamer.start(); // force one streamer.serializeDeserialize(); int ops = 0; + int totalOps = 0; + int totalOpsOnStart = randomIntBetween(10, 200); + translog.totalOperationsOnStart(totalOpsOnStart); for (int i = scaledRandomIntBetween(10, 200); i > 0; i--) { - for (int j = randomIntBetween(1, 10); j > 0; j--) { + final int iterationOps = randomIntBetween(1, 10); + totalOps += iterationOps; + translog.totalOperations(totalOps); + assertThat((double) translog.recoveredPercent(), closeTo(100.0 * ops / totalOps, 0.1)); + for (int j = iterationOps; j > 0; j--) { ops++; - translog.incrementTranslogOperations(); + translog.incrementRecoveredOperations(); } - assertThat(translog.currentTranslogOperations(), equalTo(ops)); - assertThat(streamer.lastRead().currentTranslogOperations(), greaterThanOrEqualTo(0)); - assertThat(streamer.lastRead().currentTranslogOperations(), lessThanOrEqualTo(ops)); + assertThat(translog.recoveredOperations(), equalTo(ops)); + assertThat(translog.totalOperations(), equalTo(totalOps)); + assertThat(translog.recoveredPercent(), equalTo(100.f)); + assertThat(streamer.lastRead().recoveredOperations(), greaterThanOrEqualTo(0)); + assertThat(streamer.lastRead().recoveredOperations(), lessThanOrEqualTo(ops)); + assertThat(streamer.lastRead().totalOperations(), lessThanOrEqualTo(totalOps)); + assertThat(streamer.lastRead().totalOperationsOnStart(), lessThanOrEqualTo(totalOpsOnStart)); + assertThat(streamer.lastRead().recoveredPercent(), either(greaterThanOrEqualTo(0.f)).or(equalTo(-1.f))); } boolean stopped = false; @@ -367,13 +381,19 @@ public class RecoveryStateTest extends ElasticsearchTestCase { if (randomBoolean()) { translog.reset(); ops = 0; - assertThat(translog.currentTranslogOperations(), equalTo(0)); + totalOps = Translog.UNKNOWN; + totalOpsOnStart = Translog.UNKNOWN; + assertThat(translog.recoveredOperations(), equalTo(0)); + assertThat(translog.totalOperationsOnStart(), equalTo(Translog.UNKNOWN)); + assertThat(translog.totalOperations(), equalTo(Translog.UNKNOWN)); } stop.set(true); streamer.join(); final Translog lastRead = streamer.lastRead(); - assertThat(lastRead.currentTranslogOperations(), equalTo(ops)); + assertThat(lastRead.recoveredOperations(), equalTo(ops)); + assertThat(lastRead.totalOperations(), equalTo(totalOps)); + assertThat(lastRead.totalOperationsOnStart(), equalTo(totalOpsOnStart)); assertThat(lastRead.startTime(), equalTo(translog.startTime())); assertThat(lastRead.stopTime(), equalTo(translog.stopTime())); diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java index 21e4b982fbb..38a7a9d09ae 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java @@ -217,7 +217,7 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { // Optimize does a waitForMerges, which we must do to make sure all in-flight (throttled) merges finish: logger.info("test: optimize"); - client().admin().indices().prepareOptimize("test").get(); + client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).get(); logger.info("test: optimize done"); // Record current throttling so far diff --git a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java index a43fd751d07..877808d7951 100644 --- a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.indices.store; import com.google.common.base.Predicate; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterService; @@ -27,11 +28,13 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoveryService; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; @@ -41,6 +44,8 @@ import org.junit.Test; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; @@ -53,6 +58,58 @@ import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope= Scope.TEST, numDataNodes = 0) public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { + @Test + @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/10018") + public void indexCleanup() throws Exception { + final String masterNode = internalCluster().startNode(ImmutableSettings.builder().put("node.data", false)); + final String node_1 = internalCluster().startNode(ImmutableSettings.builder().put("node.master", false)); + final String node_2 = internalCluster().startNode(ImmutableSettings.builder().put("node.master", false)); + logger.info("--> creating index [test] with one shard and on replica"); + assertAcked(prepareCreate("test").setSettings( + ImmutableSettings.builder().put(indexSettings()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + ensureGreen("test"); + + logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2"); + assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true)); + + logger.info("--> starting node server3"); + final String node_3 = internalCluster().startNode(ImmutableSettings.builder().put("node.master", false)); + logger.info("--> running cluster_health"); + ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForNodes("4") + .setWaitForRelocatingShards(0) + .get(); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + + assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(false)); + assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(false)); + + logger.info("--> move shard from node_1 to node_3, and wait for relocation to finish"); + internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_3)).get(); + clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForNodes("4") + .setWaitForRelocatingShards(0) + .get(); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + + assertThat(waitForShardDeletion(node_1, "test", 0), equalTo(false)); + assertThat(waitForIndexDeletion(node_1, "test"), equalTo(false)); + assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true)); + assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true)); + assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(true)); + } + @Test public void shardsCleanup() throws Exception { final String node_1 = internalCluster().startNode(); @@ -115,26 +172,43 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { @Test public void testShardActiveElseWhere() throws Exception { - String node_1 = internalCluster().startNode(); - String node_2 = internalCluster().startNode(); + boolean node1IsMasterEligible = randomBoolean(); + boolean node2IsMasterEligible = !node1IsMasterEligible || randomBoolean(); + Future node_1_future = internalCluster().startNodeAsync(ImmutableSettings.builder().put("node.master", node1IsMasterEligible).build()); + Future node_2_future = internalCluster().startNodeAsync(ImmutableSettings.builder().put("node.master", node2IsMasterEligible).build()); + final String node_1 = node_1_future.get(); + final String node_2 = node_2_future.get(); final String node_1_id = internalCluster().getInstance(DiscoveryService.class, node_1).localNode().getId(); final String node_2_id = internalCluster().getInstance(DiscoveryService.class, node_2).localNode().getId(); + logger.debug("node {} (node_1) is {}master eligible", node_1, node1IsMasterEligible ? "" : "not "); + logger.debug("node {} (node_2) is {}master eligible", node_2, node2IsMasterEligible ? "" : "not "); + logger.debug("node {} became master", internalCluster().getMasterName()); final int numShards = scaledRandomIntBetween(2, 20); assertAcked(prepareCreate("test") .setSettings(ImmutableSettings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)) ); ensureGreen("test"); + waitNoPendingTasksOnAll(); ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); + RoutingNode routingNode = stateResponse.getState().routingNodes().node(node_2_id); - int[] node2Shards = new int[routingNode.numberOfOwningShards()]; + final int[] node2Shards = new int[routingNode.numberOfOwningShards()]; int i = 0; for (MutableShardRouting mutableShardRouting : routingNode) { - node2Shards[i++] = mutableShardRouting.shardId().id(); + node2Shards[i] = mutableShardRouting.shardId().id(); + i++; } logger.info("Node 2 has shards: {}", Arrays.toString(node2Shards)); - waitNoPendingTasksOnAll(); + final long shardVersions[] = new long[numShards]; + final int shardIds[] = new int[numShards]; + i=0; + for (ShardRouting shardRouting : stateResponse.getState().getRoutingTable().allShards("test")) { + shardVersions[i] = shardRouting.version(); + shardIds[i] = shardRouting.getId(); + i++; + } internalCluster().getInstance(ClusterService.class, node_2).submitStateUpdateTask("test", Priority.IMMEDIATE, new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { @@ -142,7 +216,7 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numShards; i++) { indexRoutingTableBuilder.addIndexShard( new IndexShardRoutingTable.Builder(new ShardId("test", i), false) - .addShard(new ImmutableShardRouting("test", i, node_1_id, true, ShardRoutingState.STARTED, 1)) + .addShard(new ImmutableShardRouting("test", i, node_1_id, true, ShardRoutingState.STARTED, shardVersions[shardIds[i]])) .build() ); } @@ -151,6 +225,10 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { .build(); } + public boolean runOnlyOnMaster() { + return false; + } + @Override public void onFailure(String source, Throwable t) { } @@ -162,6 +240,11 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { } } + private Path indexDirectory(String server, String index) { + NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); + return env.indexPaths(new Index(index))[0]; + } + private Path shardDirectory(String server, String index, int shard) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); return env.shardPaths(new ShardId(index, shard))[0]; @@ -177,5 +260,13 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { return Files.exists(shardDirectory(server, index, shard)); } - + private boolean waitForIndexDeletion(final String server, final String index) throws InterruptedException { + awaitBusy(new Predicate() { + @Override + public boolean apply(Object o) { + return !Files.exists(indexDirectory(server, index)); + } + }); + return Files.exists(indexDirectory(server, index)); + } } diff --git a/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java b/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java index d106067d02f..312ba8c9d93 100644 --- a/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java +++ b/src/test/java/org/elasticsearch/mlt/MoreLikeThisActionTests.java @@ -671,8 +671,7 @@ public class MoreLikeThisActionTests extends ElasticsearchIntegrationTest { public void testMoreLikeThisIgnoreLike() throws ExecutionException, InterruptedException, IOException { createIndex("test"); ensureGreen(); - - int numFields = randomIntBetween(5, 35); + int numFields = randomIntBetween(5, 10); logger.info("Create a document that has all the fields."); XContentBuilder doc = jsonBuilder().startObject(); diff --git a/src/test/java/org/elasticsearch/percolator/PercolatorTests.java b/src/test/java/org/elasticsearch/percolator/PercolatorTests.java index c136df36055..d6b9bd852d1 100644 --- a/src/test/java/org/elasticsearch/percolator/PercolatorTests.java +++ b/src/test/java/org/elasticsearch/percolator/PercolatorTests.java @@ -22,6 +22,7 @@ import com.google.common.base.Predicate; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -50,7 +51,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.io.IOException; @@ -950,7 +950,84 @@ public class PercolatorTests extends ElasticsearchIntegrationTest { for (PercolateResponse.Match match : response) { assertThat(match.getIndex().string(), equalTo("test2")); } + } + @Test + public void testPercolateWithAliasFilter() throws Exception { + assertAcked(prepareCreate("my-index") + .addMapping(PercolatorService.TYPE_NAME, "a", "type=string,index=not_analyzed") + .addAlias(new Alias("a").filter(FilterBuilders.termFilter("a", "a"))) + .addAlias(new Alias("b").filter(FilterBuilders.termFilter("a", "b"))) + .addAlias(new Alias("c").filter(FilterBuilders.termFilter("a", "c"))) + ); + client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1") + .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("a", "a").endObject()) + .get(); + client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2") + .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("a", "b").endObject()) + .get(); + refresh(); + + // Specifying only the document to percolate and no filter, sorting or aggs, the queries are retrieved from + // memory directly. Otherwise we need to retrieve those queries from lucene to be able to execute filters, + // aggregations and sorting on top of them. So this test a different code execution path. + PercolateResponse response = client().preparePercolate() + .setIndices("a") + .setDocumentType("my-type") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) + .get(); + assertNoFailures(response); + assertThat(response.getCount(), equalTo(1l)); + assertThat(response.getMatches()[0].getId().string(), equalTo("1")); + + response = client().preparePercolate() + .setIndices("b") + .setDocumentType("my-type") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) + .get(); + assertNoFailures(response); + assertThat(response.getCount(), equalTo(1l)); + assertThat(response.getMatches()[0].getId().string(), equalTo("2")); + + + response = client().preparePercolate() + .setIndices("c") + .setDocumentType("my-type") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) + .get(); + assertNoFailures(response); + assertThat(response.getCount(), equalTo(0l)); + + // Testing that the alias filter and the filter specified while percolating are both taken into account. + response = client().preparePercolate() + .setIndices("a") + .setDocumentType("my-type") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) + .setPercolateFilter(FilterBuilders.matchAllFilter()) + .get(); + assertNoFailures(response); + assertThat(response.getCount(), equalTo(1l)); + assertThat(response.getMatches()[0].getId().string(), equalTo("1")); + + response = client().preparePercolate() + .setIndices("b") + .setDocumentType("my-type") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) + .setPercolateFilter(FilterBuilders.matchAllFilter()) + .get(); + assertNoFailures(response); + assertThat(response.getCount(), equalTo(1l)); + assertThat(response.getMatches()[0].getId().string(), equalTo("2")); + + + response = client().preparePercolate() + .setIndices("c") + .setDocumentType("my-type") + .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}")) + .setPercolateFilter(FilterBuilders.matchAllFilter()) + .get(); + assertNoFailures(response); + assertThat(response.getCount(), equalTo(0l)); } @Test diff --git a/src/test/java/org/elasticsearch/script/GroovyScriptTests.java b/src/test/java/org/elasticsearch/script/GroovyScriptTests.java index 847af7167f1..801f4b36e40 100644 --- a/src/test/java/org/elasticsearch/script/GroovyScriptTests.java +++ b/src/test/java/org/elasticsearch/script/GroovyScriptTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.lucene.search.function.CombineFunction; +import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -72,7 +73,7 @@ public class GroovyScriptTests extends ElasticsearchIntegrationTest { } indexRandom(true, false, reqs); try { - client().prepareSearch("test").setQuery(constantScoreQuery(scriptFilter("1 == not_found").lang("groovy"))).get(); + client().prepareSearch("test").setQuery(constantScoreQuery(scriptFilter("1 == not_found").lang(GroovyScriptEngineService.NAME))).get(); fail("should have thrown an exception"); } catch (SearchPhaseExecutionException e) { assertThat(ExceptionsHelper.detailedMessage(e) + "should not contained NotSerializableTransportException", diff --git a/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/src/test/java/org/elasticsearch/script/NativeScriptTests.java index 6d996f3cedb..09b9ccf63ec 100644 --- a/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -28,11 +28,9 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; -import org.elasticsearch.watcher.ResourceWatcherService; import org.junit.Test; import java.util.Map; -import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; @@ -51,7 +49,7 @@ public class NativeScriptTests extends ElasticsearchTestCase { ScriptService scriptService = injector.getInstance(ScriptService.class); - ExecutableScript executable = scriptService.executable("native", "my", ScriptService.ScriptType.INLINE, null); + ExecutableScript executable = scriptService.executable(NativeScriptEngineService.NAME, "my", ScriptService.ScriptType.INLINE, null); assertThat(executable.run().toString(), equalTo("test")); terminate(injector.getInstance(ThreadPool.class)); } diff --git a/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java b/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java index 190aa206311..bd0c7e5009c 100644 --- a/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java +++ b/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java @@ -44,27 +44,45 @@ public class OnDiskScriptTests extends ElasticsearchIntegrationTest { .put("path.conf", this.getResourcePath("config")).build(); } - @Test public void testFieldOnDiskScript() throws ExecutionException, InterruptedException { - List builders = new ArrayList(); + List builders = new ArrayList<>(); builders.add(client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}")); builders.add(client().prepareIndex("test", "scriptTest", "2").setSource("{\"theField\":\"foo 2\"}")); builders.add(client().prepareIndex("test", "scriptTest", "3").setSource("{\"theField\":\"foo 3\"}")); builders.add(client().prepareIndex("test", "scriptTest", "4").setSource("{\"theField\":\"foo 4\"}")); builders.add(client().prepareIndex("test", "scriptTest", "5").setSource("{\"theField\":\"bar\"}")); - indexRandom(true,builders); + indexRandom(true, builders); String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\" }, \"test2\" : { \"script_file\" : \"script2\", \"params\":{\"factor\":3} }}, size:1}"; SearchResponse searchResponse = client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get(); - assertHitCount(searchResponse,5); + assertHitCount(searchResponse, 5); assertTrue(searchResponse.getHits().hits().length == 1); SearchHit sh = searchResponse.getHits().getAt(0); assertThat((Integer)sh.field("test1").getValue(), equalTo(2)); assertThat((Integer)sh.field("test2").getValue(), equalTo(6)); } + @Test + public void testOnDiskScriptsSameNameDifferentLang() throws ExecutionException, InterruptedException { + List builders = new ArrayList<>(); + builders.add(client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}")); + builders.add(client().prepareIndex("test", "scriptTest", "2").setSource("{\"theField\":\"foo 2\"}")); + builders.add(client().prepareIndex("test", "scriptTest", "3").setSource("{\"theField\":\"foo 3\"}")); + builders.add(client().prepareIndex("test", "scriptTest", "4").setSource("{\"theField\":\"foo 4\"}")); + builders.add(client().prepareIndex("test", "scriptTest", "5").setSource("{\"theField\":\"bar\"}")); + + indexRandom(true, builders); + + String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\" }, \"test2\" : { \"script_file\" : \"script1\", \"lang\":\"expression\" }}, size:1}"; + SearchResponse searchResponse = client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get(); + assertHitCount(searchResponse, 5); + assertTrue(searchResponse.getHits().hits().length == 1); + SearchHit sh = searchResponse.getHits().getAt(0); + assertThat((Integer)sh.field("test1").getValue(), equalTo(2)); + assertThat((Double)sh.field("test2").getValue(), equalTo(10d)); + } } diff --git a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 8a5afcd13dc..6339874c52b 100644 --- a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -18,16 +18,19 @@ */ package org.elasticsearch.script; -import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableSet; +import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.script.expression.ExpressionScriptEngineService; +import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.watcher.ResourceWatcherService; +import org.junit.Before; import org.junit.Test; import java.io.IOException; @@ -36,37 +39,42 @@ import java.nio.file.Path; import java.util.Map; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.*; /** * */ public class ScriptServiceTests extends ElasticsearchTestCase { - @Test - public void testScriptsWithoutExtensions() throws IOException { - Path homeFolder = newTempDirPath(); + private ResourceWatcherService resourceWatcherService; + private ScriptService scriptService; + private Path scriptsFilePath; + + @Before + public void setup() throws IOException { Path genericConfigFolder = newTempDirPath(); Settings settings = settingsBuilder() .put("path.conf", genericConfigFolder) - .put("path.home", homeFolder) .build(); Environment environment = new Environment(settings); - ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, null); + resourceWatcherService = new ResourceWatcherService(settings, null); logger.info("--> setup script service"); - ScriptService scriptService = new ScriptService(settings, environment, - ImmutableSet.of(new TestEngineService()), resourceWatcherService, new NodeSettingsService(settings)); - Path scriptsFile = genericConfigFolder.resolve("scripts"); - Files.createDirectories(scriptsFile); - resourceWatcherService.notifyNow(); + scriptService = new ScriptService(settings, environment, + ImmutableSet.of(new TestEngineService(), new GroovyScriptEngineService(settings), new ExpressionScriptEngineService(settings)), + resourceWatcherService, new NodeSettingsService(settings)); + scriptsFilePath = genericConfigFolder.resolve("scripts"); + Files.createDirectories(scriptsFilePath); + } + + @Test + public void testScriptsWithoutExtensions() throws IOException { logger.info("--> setup two test files one with extension and another without"); - Path testFileNoExt = scriptsFile.resolve("test_no_ext"); - Path testFileWithExt = scriptsFile.resolve("test_script.tst"); + Path testFileNoExt = scriptsFilePath.resolve("test_no_ext"); + Path testFileWithExt = scriptsFilePath.resolve("test_script.tst"); Streams.copy("test_file_no_ext".getBytes("UTF-8"), Files.newOutputStream(testFileNoExt)); Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testFileWithExt)); resourceWatcherService.notifyNow(); @@ -83,17 +91,49 @@ public class ScriptServiceTests extends ElasticsearchTestCase { logger.info("--> verify that file with extension was correctly removed"); try { scriptService.compile("test", "test_script", ScriptService.ScriptType.FILE); - fail("the script test_script should no longe exist"); + fail("the script test_script should no longer exist"); } catch (ElasticsearchIllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk script test_script")); } } + @Test + public void testScriptsSameNameDifferentLanguage() throws IOException { + Path groovyScriptPath = scriptsFilePath.resolve("script.groovy"); + Path expScriptPath = scriptsFilePath.resolve("script.expression"); + Streams.copy("10".getBytes("UTF-8"), Files.newOutputStream(groovyScriptPath)); + Streams.copy("20".getBytes("UTF-8"), Files.newOutputStream(expScriptPath)); + resourceWatcherService.notifyNow(); + + CompiledScript groovyScript = scriptService.compile(GroovyScriptEngineService.NAME, "script", ScriptService.ScriptType.FILE); + assertThat(groovyScript.lang(), equalTo(GroovyScriptEngineService.NAME)); + CompiledScript expressionScript = scriptService.compile(ExpressionScriptEngineService.NAME, "script", ScriptService.ScriptType.FILE); + assertThat(expressionScript.lang(), equalTo(ExpressionScriptEngineService.NAME)); + } + + @Test + public void testInlineScriptCompiledOnceMultipleLangAcronyms() throws IOException { + CompiledScript compiledScript1 = scriptService.compile("test", "test_script", ScriptService.ScriptType.INLINE); + CompiledScript compiledScript2 = scriptService.compile("test2", "test_script", ScriptService.ScriptType.INLINE); + assertThat(compiledScript1, sameInstance(compiledScript2)); + } + + @Test + public void testFileScriptCompiledOnceMultipleLangAcronyms() throws IOException { + Path scriptPath = scriptsFilePath.resolve("test_script.tst"); + Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(scriptPath)); + resourceWatcherService.notifyNow(); + + CompiledScript compiledScript1 = scriptService.compile("test", "test_script", ScriptService.ScriptType.FILE); + CompiledScript compiledScript2 = scriptService.compile("test2", "test_script", ScriptService.ScriptType.FILE); + assertThat(compiledScript1, sameInstance(compiledScript2)); + } + public static class TestEngineService implements ScriptEngineService { @Override public String[] types() { - return new String[] {"test"}; + return new String[] {"test", "test2"}; } @Override @@ -103,7 +143,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { @Override public boolean sandboxed() { - return false; + return true; } @Override diff --git a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java index f35565a2b95..d52bac18797 100644 --- a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java +++ b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java @@ -20,24 +20,21 @@ package org.elasticsearch.script.expression; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.get.GetRequestBuilder; -import org.elasticsearch.action.search.*; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; -import java.io.IOException; -import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -225,8 +222,8 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { SearchRequestBuilder req = new SearchRequestBuilder(client()).setIndices("test"); req.setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(AggregationBuilders.stats("int_agg").field("x").script("_value * 3").lang("expression")) - .addAggregation(AggregationBuilders.stats("double_agg").field("y").script("_value - 1.1").lang("expression")); + .addAggregation(AggregationBuilders.stats("int_agg").field("x").script("_value * 3").lang(ExpressionScriptEngineService.NAME)) + .addAggregation(AggregationBuilders.stats("double_agg").field("y").script("_value - 1.1").lang(ExpressionScriptEngineService.NAME)); SearchResponse rsp = req.get(); assertEquals(3, rsp.getHits().getTotalHits()); @@ -251,7 +248,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { SearchRequestBuilder req = new SearchRequestBuilder(client()).setIndices("test"); req.setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(AggregationBuilders.terms("term_agg").field("text").script("_value").lang("expression")); + .addAggregation(AggregationBuilders.terms("term_agg").field("text").script("_value").lang(ExpressionScriptEngineService.NAME)); String message; try { diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java index cabb878dc73..3747a3786c4 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -42,8 +41,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.concurrent.TimeUnit; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -52,10 +51,12 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogra import static org.elasticsearch.search.aggregations.AggregationBuilders.max; import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.*; -import static org.hamcrest.core.IsNull.notNullValue; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsNull.notNullValue; /** * @@ -1082,13 +1083,6 @@ public class DateHistogramTests extends ElasticsearchIntegrationTest { assertThat(bucket.getDocCount(), equalTo(3l)); } - @Override - public Settings indexSettings() { - ImmutableSettings.Builder builder = ImmutableSettings.builder(); - builder.put("index.number_of_shards", 1).put("index.number_of_replicas", 0); - return builder.build(); - } - @Test public void singleValueField_WithExtendedBounds() throws Exception { @@ -1099,7 +1093,10 @@ public class DateHistogramTests extends ElasticsearchIntegrationTest { DateTime base = new DateTime(DateTimeZone.UTC).dayOfMonth().roundFloorCopy(); DateTime baseKey = new DateTime(intervalMillis * (base.getMillis() / intervalMillis), DateTimeZone.UTC); - createIndex("idx2"); + prepareCreate("idx2") + .setSettings( + ImmutableSettings.builder().put(indexSettings()).put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0)).execute().actionGet(); int numOfBuckets = randomIntBetween(3, 6); int emptyBucketIndex = randomIntBetween(1, numOfBuckets - 2); // should be in the middle diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreTests.java index d07ceb89431..ab08c6765c4 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -30,35 +31,26 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.FilterBuilders; import org.elasticsearch.index.query.QueryParsingException; import org.elasticsearch.plugins.AbstractPlugin; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; +import org.elasticsearch.search.aggregations.bucket.script.NativeSignificanceScoreScriptNoParams; +import org.elasticsearch.search.aggregations.bucket.script.NativeSignificanceScoreScriptWithParams; +import org.elasticsearch.search.aggregations.bucket.significant.SignificantStringTerms; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorFactory; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsBuilder; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.MutualInformation; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicBuilder; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicStreams; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificantTermsHeuristicModule; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.TransportSignificantTermsHeuristicModule; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.*; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import org.junit.Test; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.ExecutionException; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -69,11 +61,12 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.*; /** * */ -@ClusterScope(scope = Scope.SUITE) +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE) public class SignificantTermsSignificanceScoreTests extends ElasticsearchIntegrationTest { static final String INDEX_NAME = "testidx"; @@ -82,10 +75,11 @@ public class SignificantTermsSignificanceScoreTests extends ElasticsearchIntegra static final String CLASS_FIELD = "class"; @Override - protected Settings nodeSettings(int nodeOrdinal) { + public Settings nodeSettings(int nodeOrdinal) { return settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) .put("plugin.types", CustomSignificanceHeuristicPlugin.class.getName()) + .put("path.conf", this.getResourcePath("config")) .build(); } @@ -179,6 +173,10 @@ public class SignificantTermsSignificanceScoreTests extends ElasticsearchIntegra public void onModule(TransportSignificantTermsHeuristicModule significanceModule) { significanceModule.registerStream(SimpleHeuristic.STREAM); } + public void onModule(ScriptModule module) { + module.registerScript(NativeSignificanceScoreScriptNoParams.NATIVE_SIGNIFICANCE_SCORE_SCRIPT_NO_PARAMS, NativeSignificanceScoreScriptNoParams.Factory.class); + module.registerScript(NativeSignificanceScoreScriptWithParams.NATIVE_SIGNIFICANCE_SCORE_SCRIPT_WITH_PARAMS, NativeSignificanceScoreScriptWithParams.Factory.class); + } } public static class SimpleHeuristic extends SignificanceHeuristic { @@ -472,4 +470,139 @@ public class SignificantTermsSignificanceScoreTests extends ElasticsearchIntegra } indexRandom(true, false, indexRequestBuilders); } + + @Test + public void testScriptScore() throws ExecutionException, InterruptedException, IOException { + indexRandomFrequencies01(randomBoolean() ? "string" : "long"); + ScriptHeuristic.ScriptHeuristicBuilder scriptHeuristicBuilder = getScriptSignificanceHeuristicBuilder(); + ensureYellow(); + SearchResponse response = client().prepareSearch(INDEX_NAME) + .addAggregation(new TermsBuilder("class").field(CLASS_FIELD).subAggregation(new SignificantTermsBuilder("mySignificantTerms") + .field(TEXT_FIELD) + .executionHint(randomExecutionHint()) + .significanceHeuristic(scriptHeuristicBuilder) + .minDocCount(1).shardSize(2).size(2))) + .execute() + .actionGet(); + assertSearchResponse(response); + for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) { + for (SignificantTerms.Bucket bucket : ((SignificantTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets()) { + assertThat(bucket.getSignificanceScore(), is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize())); + } + } + } + + @Test + public void testNoNumberFormatExceptionWithDefaultScriptingEngine() throws ExecutionException, InterruptedException, IOException { + assertAcked(client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.number_of_shards", 1))); + index("test", "doc", "1", "{\"field\":\"a\"}"); + index("test", "doc", "11", "{\"field\":\"a\"}"); + index("test", "doc", "2", "{\"field\":\"b\"}"); + index("test", "doc", "22", "{\"field\":\"b\"}"); + index("test", "doc", "3", "{\"field\":\"a b\"}"); + index("test", "doc", "33", "{\"field\":\"a b\"}"); + ScriptHeuristic.ScriptHeuristicBuilder scriptHeuristicBuilder = new ScriptHeuristic.ScriptHeuristicBuilder(); + scriptHeuristicBuilder.setScript("_subset_freq/(_superset_freq - _subset_freq + 1)"); + ensureYellow(); + refresh(); + SearchResponse response = client().prepareSearch("test") + .addAggregation(new TermsBuilder("letters").field("field").subAggregation(new SignificantTermsBuilder("mySignificantTerms") + .field("field") + .executionHint(randomExecutionHint()) + .significanceHeuristic(scriptHeuristicBuilder) + .minDocCount(1).shardSize(2).size(2))) + .execute() + .actionGet(); + assertSearchResponse(response); + assertThat(((Terms) response.getAggregations().get("letters")).getBuckets().size(), equalTo(2)); + for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("letters")).getBuckets()) { + assertThat(((SignificantStringTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets().size(), equalTo(2)); + for (SignificantTerms.Bucket bucket : ((SignificantTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets()) { + assertThat(bucket.getSignificanceScore(), closeTo((double)bucket.getSubsetDf() /(bucket.getSupersetDf() - bucket.getSubsetDf()+ 1), 1.e-6)); + } + } + } + + private ScriptHeuristic.ScriptHeuristicBuilder getScriptSignificanceHeuristicBuilder() throws IOException { + Map params = null; + String script = null; + String lang = null; + String scriptId = null; + String scriptFile = null; + if (randomBoolean()) { + params = new HashMap<>(); + params.put("param", randomIntBetween(1, 100)); + } + int randomScriptKind = randomIntBetween(0, 3); + if (randomBoolean()) { + lang = "groovy"; + } + switch (randomScriptKind) { + case 0: { + if (params == null) { + script = "return _subset_freq + _subset_size + _superset_freq + _superset_size"; + } else { + script = "return param*(_subset_freq + _subset_size + _superset_freq + _superset_size)/param"; + } + break; + } + case 1: { + if (params == null) { + script = "return _subset_freq + _subset_size + _superset_freq + _superset_size"; + } else { + script = "return param*(_subset_freq + _subset_size + _superset_freq + _superset_size)/param"; + } + client().prepareIndex().setIndex(ScriptService.SCRIPT_INDEX).setType(ScriptService.DEFAULT_LANG).setId("my_script") + .setSource(XContentFactory.jsonBuilder().startObject() + .field("script", script) + .endObject()).get(); + refresh(); + scriptId = "my_script"; + script = null; + break; + } + case 2: { + if (params == null) { + scriptFile = "significance_script_no_params"; + } else { + scriptFile = "significance_script_with_params"; + } + break; + } + case 3: { + logger.info("NATIVE SCRIPT"); + if (params == null) { + script = "native_significance_score_script_no_params"; + } else { + script = "native_significance_score_script_with_params"; + } + lang = "native"; + if (randomBoolean()) { + } + break; + } + } + ScriptHeuristic.ScriptHeuristicBuilder builder = new ScriptHeuristic.ScriptHeuristicBuilder().setScript(script).setLang(lang).setParams(params).setScriptId(scriptId).setScriptFile(scriptFile); + + return builder; + } + + private void indexRandomFrequencies01(String type) throws ExecutionException, InterruptedException { + String mappings = "{\"" + DOC_TYPE + "\": {\"properties\":{\"" + TEXT_FIELD + "\": {\"type\":\"" + type + "\"}}}}"; + assertAcked(prepareCreate(INDEX_NAME).addMapping(DOC_TYPE, mappings)); + String[] gb = {"0", "1"}; + List indexRequestBuilderList = new ArrayList<>(); + for (int i = 0; i < randomInt(20); i++) { + int randNum = randomInt(2); + String[] text = new String[1]; + if (randNum == 2) { + text = gb; + } else { + text[0] = gb[randNum]; + } + indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE) + .setSource(TEXT_FIELD, text, CLASS_FIELD, randomBoolean() ? "one" : "zero")); + } + indexRandom(true, indexRequestBuilderList); + } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java new file mode 100644 index 00000000000..816c845675c --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.script; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.NativeScriptFactory; + +import java.util.Map; + +public class NativeSignificanceScoreScriptNoParams extends TestScript { + + public static final String NATIVE_SIGNIFICANCE_SCORE_SCRIPT_NO_PARAMS = "native_significance_score_script_no_params"; + + public static class Factory implements NativeScriptFactory { + + @Override + public ExecutableScript newScript(@Nullable Map params) { + return new NativeSignificanceScoreScriptNoParams(); + } + } + + private NativeSignificanceScoreScriptNoParams() { + } + + @Override + public Object run() { + return _subset_freq.longValue() + _subset_size.longValue() + _superset_freq.longValue() + _superset_size.longValue(); + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java new file mode 100644 index 00000000000..3568f8f6870 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.script; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.NativeScriptFactory; + +import java.util.Map; + +public class NativeSignificanceScoreScriptWithParams extends TestScript { + + public static final String NATIVE_SIGNIFICANCE_SCORE_SCRIPT_WITH_PARAMS = "native_significance_score_script_with_params"; + double factor = 0.0; + + public static class Factory implements NativeScriptFactory { + + @Override + public ExecutableScript newScript(@Nullable Map params) { + return new NativeSignificanceScoreScriptWithParams(params); + } + } + + private NativeSignificanceScoreScriptWithParams(Map params) { + factor = ((Number) params.get("param")).doubleValue(); + } + + @Override + public Object run() { + return factor * (_subset_freq.longValue() + _subset_size.longValue() + _superset_freq.longValue() + _superset_size.longValue()) / factor; + } + +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java new file mode 100644 index 00000000000..3060d7af818 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.script; + +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ScriptHeuristic; + +public abstract class TestScript implements ExecutableScript{ + + ScriptHeuristic.LongAccessor _subset_freq; + ScriptHeuristic.LongAccessor _subset_size; + ScriptHeuristic.LongAccessor _superset_freq; + ScriptHeuristic.LongAccessor _superset_size; + + protected TestScript() { + } + + @Override + public void setNextVar(String name, Object value) { + if (name.equals("_subset_freq")) { + _subset_freq = (ScriptHeuristic.LongAccessor)value; + } + if (name.equals("_subset_size")) { + _subset_size = (ScriptHeuristic.LongAccessor)value; + } + if (name.equals("_superset_freq")) { + _superset_freq = (ScriptHeuristic.LongAccessor)value; + } + if (name.equals("_superset_size")) { + _superset_size = (ScriptHeuristic.LongAccessor)value; + } + } + + @Override + public Double unwrap(Object value) { + return ((Number) value).doubleValue(); + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index 733dcfc9b85..c3fe8b94071 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -86,6 +86,7 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { SignificanceHeuristicStreams.registerStream(PercentageScore.STREAM, PercentageScore.STREAM.getName()); SignificanceHeuristicStreams.registerStream(GND.STREAM, GND.STREAM.getName()); SignificanceHeuristicStreams.registerStream(ChiSquare.STREAM, ChiSquare.STREAM.getName()); + SignificanceHeuristicStreams.registerStream(ScriptHeuristic.STREAM, ScriptHeuristic.STREAM.getName()); Version version = ElasticsearchIntegrationTest.randomVersion(); InternalSignificantTerms[] sigTerms = getRandomSignificantTerms(getRandomSignificanceheuristic()); diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java index 4d8a514d84c..7dfd0e050ce 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -91,11 +92,11 @@ public class ScriptedMetricTests extends ElasticsearchIntegrationTest { jsonBuilder().startObject().field("value", i * 2).endObject())); } - PutIndexedScriptResponse indexScriptResponse = client().preparePutIndexedScript("groovy", "initScript_indexed", "{\"script\":\"vars.multiplier = 3\"}").get(); + PutIndexedScriptResponse indexScriptResponse = client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "initScript_indexed", "{\"script\":\"vars.multiplier = 3\"}").get(); assertThat(indexScriptResponse.isCreated(), equalTo(true)); - indexScriptResponse = client().preparePutIndexedScript("groovy", "mapScript_indexed", "{\"script\":\"_agg.add(vars.multiplier)\"}").get(); + indexScriptResponse = client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "mapScript_indexed", "{\"script\":\"_agg.add(vars.multiplier)\"}").get(); assertThat(indexScriptResponse.isCreated(), equalTo(true)); - indexScriptResponse = client().preparePutIndexedScript("groovy", "combineScript_indexed", + indexScriptResponse = client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "combineScript_indexed", "{\"script\":\"newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation\"}") .get(); assertThat(indexScriptResponse.isCreated(), equalTo(true)); diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index fbd0af6f99a..e0ab41e6e86 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -1929,7 +1929,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get(); client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get(); client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get(); - client().admin().indices().prepareOptimize("test").setFlush(true).get(); + client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).setFlush(true).get(); client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get(); client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get(); client().prepareIndex("test", "child", "c4").setParent("p3").setSource("c_field", "green").get(); diff --git a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java index 2a710e9cd37..710481f4134 100644 --- a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java +++ b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java @@ -618,13 +618,30 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + // Directly refer to the second level: + response = client().prepareSearch("articles") + .setQuery(nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad")).innerHit(new QueryInnerHitBuilder())) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertSearchHit(response, 1, hasId("2")); + assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1)); + innerHits = response.getHits().getAt(0).getInnerHits().get("comments.remarks"); + assertThat(innerHits.totalHits(), equalTo(1l)); + assertThat(innerHits.getHits().length, equalTo(1)); + assertThat(innerHits.getAt(0).getId(), equalTo("2")); + assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks")); + assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); + response = client().prepareSearch("articles") .setQuery(nestedQuery("comments", nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad")))) .addInnerHit("comment", new InnerHitsBuilder.InnerHit() .setPath("comments") .setQuery(nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"))) - .addInnerHit("remark", new InnerHitsBuilder.InnerHit().setPath("comments.remarks").setQuery(matchQuery("comments.remarks.message", "bad"))) - ).get(); + .addInnerHit("remark", new InnerHitsBuilder.InnerHit().setPath("comments.remarks").setQuery(matchQuery("comments.remarks.message", "bad")))) + .get(); assertNoFailures(response); assertHitCount(response, 1); assertSearchHit(response, 1, hasId("2")); diff --git a/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java index 3e966ced5a0..5f39c9da200 100644 --- a/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java @@ -948,7 +948,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { if (optimize) { // make sure merging works just fine client().admin().indices().prepareFlush(INDEX).execute().actionGet(); - client().admin().indices().prepareOptimize(INDEX).execute().actionGet(); + client().admin().indices().prepareOptimize(INDEX).setMaxNumSegments(randomIntBetween(1, 5)).get(); } } diff --git a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java index 0a049cdd7cb..3926b98ac23 100644 --- a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java @@ -787,8 +787,8 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { .put("index.analysis.filter.shingler.min_shingle_size", 2) .put("index.analysis.filter.shingler.max_shingle_size", 5) .put("index.analysis.filter.shingler.output_unigrams", true)); - - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") + + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type2") .startObject("properties") .startObject("name") .field("type", "multi_field") @@ -801,7 +801,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { .endObject() .endObject() .endObject().endObject(); - assertAcked(builder.addMapping("type1", mapping)); + assertAcked(builder.addMapping("type2", mapping)); ensureGreen(); index("test", "type2", "1", "foo", "bar"); diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java index 537f6a4355d..1ae81b31c52 100644 --- a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.snapshots; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.ExceptionsHelper; @@ -39,31 +40,32 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResp import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.metadata.SnapshotMetaData; +import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.*; +import org.elasticsearch.cluster.metadata.SnapshotMetaData.*; +import org.elasticsearch.cluster.metadata.SnapshotMetaData.State; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.support.AbstractIndexStore; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.snapshots.mockstore.MockRepositoryModule; import org.junit.Test; -import java.io.FileOutputStream; -import java.nio.channels.FileChannel; import java.nio.channels.SeekableByteChannel; import java.nio.file.Files; -import java.nio.file.OpenOption; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -882,7 +884,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { logger.info("--> closing index test-idx-closed"); assertAcked(client.admin().indices().prepareClose("test-idx-closed")); ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get(); - assertThat(stateResponse.getState().metaData().index("test-idx-closed").state(), equalTo(State.CLOSE)); + assertThat(stateResponse.getState().metaData().index("test-idx-closed").state(), equalTo(IndexMetaData.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), nullValue()); logger.info("--> snapshot"); @@ -1665,6 +1667,67 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { } } + + @Test + public void deleteOrphanSnapshotTest() throws Exception { + Client client = client(); + + logger.info("--> creating repository"); + assertAcked(client.admin().cluster().preparePutRepository("test-repo") + .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(ImmutableSettings.settingsBuilder() + .put("location", newTempDirPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000)) + )); + + createIndex("test-idx"); + ensureGreen(); + + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); + + final CountDownLatch countDownLatch = new CountDownLatch(1); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + logger.info("--> emulate an orphan snapshot"); + + clusterService.submitStateUpdateTask("orphan snapshot test", new ProcessedClusterStateUpdateTask() { + + @Override + public ClusterState execute(ClusterState currentState) { + // Simulate orphan snapshot + ImmutableMap.Builder shards = ImmutableMap.builder(); + shards.put(new ShardId("test-idx", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED)); + shards.put(new ShardId("test-idx", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED)); + shards.put(new ShardId("test-idx", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED)); + ImmutableList.Builder entries = ImmutableList.builder(); + entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, State.ABORTED, ImmutableList.of("test-idx"), System.currentTimeMillis(), shards.build())); + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + mdBuilder.putCustom(SnapshotMetaData.TYPE, new SnapshotMetaData(entries.build())); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { + countDownLatch.countDown(); + } + }); + + countDownLatch.await(); + logger.info("--> try deleting the orphan snapshot"); + + assertAcked(client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get("10s")); + + } + private boolean waitForIndex(final String index, TimeValue timeout) throws InterruptedException { return awaitBusy(new Predicate() { @Override diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index c21a81db585..1f8923d8be3 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -1229,7 +1229,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase */ protected OptimizeResponse optimize() { waitForRelocation(); - OptimizeResponse actionGet = client().admin().indices().prepareOptimize().execute().actionGet(); + OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setMaxNumSegments(1).execute().actionGet(); assertNoFailures(actionGet); return actionGet; } diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 150d40435fa..09d982dd7a4 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -241,7 +241,7 @@ public abstract class ElasticsearchTestCase extends AbstractRandomizedTest { Requests.INDEX_CONTENT_TYPE = randomXContentType(); } - private static XContentType randomXContentType() { + public static XContentType randomXContentType() { return randomFrom(XContentType.values()); } diff --git a/src/test/java/org/elasticsearch/test/ExternalNode.java b/src/test/java/org/elasticsearch/test/ExternalNode.java index 45f543cf82b..69d7e886b3f 100644 --- a/src/test/java/org/elasticsearch/test/ExternalNode.java +++ b/src/test/java/org/elasticsearch/test/ExternalNode.java @@ -46,7 +46,6 @@ import java.util.Map; import java.util.Random; import java.util.concurrent.TimeUnit; -import static junit.framework.Assert.assertFalse; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; /** @@ -136,12 +135,13 @@ final class ExternalNode implements Closeable { builder.inheritIO(); boolean success = false; try { - logger.debug("starting external node [{}] with: {}", nodeName, builder.command()); + logger.info("starting external node [{}] with: {}", nodeName, builder.command()); process = builder.start(); this.nodeInfo = null; if (waitForNode(client, nodeName)) { nodeInfo = nodeInfo(client, nodeName); assert nodeInfo != null; + logger.info("external node {} found, version [{}], build {}", nodeInfo.getNode(), nodeInfo.getVersion(), nodeInfo.getBuild()); } else { throw new IllegalStateException("Node [" + nodeName + "] didn't join the cluster"); } diff --git a/src/test/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java b/src/test/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java index d9acd717430..7ded36f3809 100644 --- a/src/test/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java +++ b/src/test/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java @@ -66,6 +66,8 @@ public class RepeatOnExceptionRule implements TestRule { if (t.getClass().equals(expectedException)) { caughtThrowable = t; logger.info("Exception [{}] occurred, rerunning the test after [{}] failures", t, t.getClass().getSimpleName(), i+1); + } else { + throw t; } } } diff --git a/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java new file mode 100644 index 00000000000..928cd2e7955 --- /dev/null +++ b/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import com.google.common.base.Charsets; +import org.elasticsearch.Version; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.InetSocketTransportAddress; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.test.cache.recycler.MockBigArrays; +import org.elasticsearch.test.cache.recycler.MockPageCacheRecycler; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.netty.NettyTransport; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.net.Socket; + +import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; +import static org.hamcrest.Matchers.is; + +/** + * This test checks, if a HTTP look-alike request (starting with a HTTP method and a space) + * actually returns text response instead of just dropping the connection + */ +public class NettySizeHeaderFrameDecoderTests extends ElasticsearchTestCase { + + private final Settings settings = settingsBuilder().put("name", "foo").put("transport.host", "127.0.0.1").build(); + + private ThreadPool threadPool; + private NettyTransport nettyTransport; + private int port; + private String host; + + @Before + public void startThreadPool() { + threadPool = new ThreadPool(settings, new NodeSettingsService(settings)); + + NetworkService networkService = new NetworkService(settings); + BigArrays bigArrays = new MockBigArrays(settings, new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); + nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT); + nettyTransport.start(); + TransportService transportService = new TransportService(nettyTransport, threadPool); + nettyTransport.transportServiceAdapter(transportService.createAdapter()); + + InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) nettyTransport.boundAddress().boundAddress(); + port = transportAddress.address().getPort(); + host = transportAddress.address().getHostString(); + + } + + @After + public void terminateThreadPool() throws InterruptedException { + nettyTransport.stop(); + terminate(threadPool); + } + + @Test + public void testThatTextMessageIsReturnedOnHTTPLikeRequest() throws Exception { + String randomMethod = randomFrom("GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH"); + String data = randomMethod + " / HTTP/1.1"; + + try (Socket socket = new Socket(host, port)) { + socket.getOutputStream().write(data.getBytes(Charsets.UTF_8)); + socket.getOutputStream().flush(); + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8))) { + assertThat(reader.readLine(), is("This is not a HTTP port")); + } + } + } + + @Test + public void testThatNothingIsReturnedForOtherInvalidPackets() throws Exception { + try (Socket socket = new Socket(host, port)) { + socket.getOutputStream().write("FOOBAR".getBytes(Charsets.UTF_8)); + socket.getOutputStream().flush(); + + // end of stream + assertThat(socket.getInputStream().read(), is(-1)); + } + } +} diff --git a/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java new file mode 100644 index 00000000000..c857820ea44 --- /dev/null +++ b/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.tribe; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeBuilder; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.nio.file.Path; +import java.nio.file.Paths; + +import static org.hamcrest.CoreMatchers.either; +import static org.hamcrest.CoreMatchers.equalTo; + +/** + * This test doesn't extend {@link org.elasticsearch.test.ElasticsearchIntegrationTest} as the internal cluster ignores system properties + * all the time, while we need to make the tribe node accept them in this case, so that we can verify that they are not read again as part + * of the tribe client nodes initialization. Note that the started nodes will obey to the 'node.mode' settings as the internal cluster does. + */ +public class TribeUnitTests extends ElasticsearchTestCase { + + private static Node tribe1; + private static Node tribe2; + + private static final String NODE_MODE = InternalTestCluster.nodeMode(); + + @BeforeClass + public static void createTribes() { + tribe1 = NodeBuilder.nodeBuilder().settings(ImmutableSettings.builder().put("config.ignore_system_properties", true).put("http.enabled", false) + .put("node.mode", NODE_MODE).put("cluster.name", "tribe1").put("node.name", "tribe1_node")).node(); + tribe2 = NodeBuilder.nodeBuilder().settings(ImmutableSettings.builder().put("config.ignore_system_properties", true).put("http.enabled", false) + .put("node.mode", NODE_MODE).put("cluster.name", "tribe2").put("node.name", "tribe2_node")).node(); + + } + + @AfterClass + public static void closeTribes() { + tribe1.close(); + tribe1 = null; + tribe2.close(); + tribe2 = null; + } + + @Test + public void testThatTribeClientsIgnoreGlobalSysProps() throws Exception { + System.setProperty("es.cluster.name", "tribe_node_cluster"); + System.setProperty("es.tribe.t1.cluster.name", "tribe1"); + System.setProperty("es.tribe.t2.cluster.name", "tribe2"); + + try { + assertTribeNodeSuccesfullyCreated(ImmutableSettings.EMPTY); + } finally { + System.clearProperty("es.cluster.name"); + System.clearProperty("es.tribe.t1.cluster.name"); + System.clearProperty("es.tribe.t2.cluster.name"); + } + } + + @Test + public void testThatTribeClientsIgnoreGlobalConfig() throws Exception { + Path pathConf = Paths.get(TribeUnitTests.class.getResource("elasticsearch.yml").toURI()).getParent(); + Settings settings = ImmutableSettings.builder().put("config.ignore_system_properties", true).put("path.conf", pathConf).build(); + assertTribeNodeSuccesfullyCreated(settings); + } + + private static void assertTribeNodeSuccesfullyCreated(Settings extraSettings) throws Exception { + //tribe node doesn't need the node.mode setting, as it's forced local internally anyways. The tribe clients do need it to make sure + //they can find their corresponding tribes using the proper transport + Settings settings = ImmutableSettings.builder().put("http.enabled", false).put("node.name", "tribe_node") + .put("tribe.t1.node.mode", NODE_MODE).put("tribe.t2.node.mode", NODE_MODE).put(extraSettings).build(); + + try (Node node = NodeBuilder.nodeBuilder().settings(settings).node()) { + try (Client client = node.client()) { + assertBusy(new Runnable() { + @Override + public void run() { + ClusterState state = client.admin().cluster().prepareState().clear().setNodes(true).get().getState(); + assertThat(state.getClusterName().value(), equalTo("tribe_node_cluster")); + assertThat(state.getNodes().getSize(), equalTo(5)); + for (DiscoveryNode discoveryNode : state.getNodes()) { + assertThat(discoveryNode.getName(), either(equalTo("tribe1_node")).or(equalTo("tribe2_node")).or(equalTo("tribe_node")) + .or(equalTo("tribe_node/t1")).or(equalTo("tribe_node/t2"))); + } + } + }); + } + } + } +} diff --git a/src/test/java/org/elasticsearch/tribe/elasticsearch.yml b/src/test/java/org/elasticsearch/tribe/elasticsearch.yml new file mode 100644 index 00000000000..89f4922a6af --- /dev/null +++ b/src/test/java/org/elasticsearch/tribe/elasticsearch.yml @@ -0,0 +1,3 @@ +cluster.name: tribe_node_cluster +tribe.t1.cluster.name: tribe1 +tribe.t2.cluster.name: tribe2 \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java b/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java index a9f76b5b886..b58f6342dfa 100644 --- a/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java +++ b/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java @@ -22,10 +22,7 @@ import com.google.common.collect.Maps; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.AbstractExecutableScript; -import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.NativeScriptFactory; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.*; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.junit.Test; @@ -61,7 +58,7 @@ public class UpdateByNativeScriptTests extends ElasticsearchIntegrationTest { params.put("foo", "SETVALUE"); client().prepareUpdate("test", "type", "1") .setScript("custom", ScriptService.ScriptType.INLINE) - .setScriptLang("native").setScriptParams(params).get(); + .setScriptLang(NativeScriptEngineService.NAME).setScriptParams(params).get(); Map data = client().prepareGet("test", "type", "1").get().getSource(); assertThat(data, hasKey("foo")); diff --git a/src/test/java/org/elasticsearch/update/UpdateTests.java b/src/test/java/org/elasticsearch/update/UpdateTests.java index 844e498b152..3c0581b3206 100644 --- a/src/test/java/org/elasticsearch/update/UpdateTests.java +++ b/src/test/java/org/elasticsearch/update/UpdateTests.java @@ -19,10 +19,7 @@ package org.elasticsearch.update; -import org.apache.lucene.index.MergePolicy; -import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.util.LuceneTestCase.Slow; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; @@ -34,16 +31,13 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.transport.NoNodeAvailableException; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.index.engine.VersionConflictEngineException; -import org.elasticsearch.index.merge.policy.AbstractMergePolicyProvider; import org.elasticsearch.index.merge.policy.MergePolicyModule; -import org.elasticsearch.index.store.Store; import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.index.merge.NoMergePolicyProvider; diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.Beta1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.Beta1.zip index 7897f81783b..749a9c48f0f 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.Beta1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.Beta1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC1.zip index 136fd568f06..e83238295bf 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC2.zip index 324be48ae1e..86629fc7911 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC2.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.zip index 73b35f1e1b2..de9a803c774 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.1.zip index af845b2a919..ff213efadc6 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.10.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.10.zip index b7560ee1b9d..070d74d8bef 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.10.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.10.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.11.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.11.zip index 56d67c2e03b..c817a71a119 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.11.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.11.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.12.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.12.zip index 8f52995b6e9..0e22c872f9e 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.12.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.12.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.13.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.13.zip index 9ef5e45b9de..8a6a4a5113d 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.13.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.13.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.2.zip index 70e5fc2fa63..7ee7f1d7cdf 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.3.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.3.zip index 12f6323b542..152b03e236f 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.3.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.3.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.4.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.4.zip index 08ae1ca1c24..163f040a794 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.4.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.4.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.5.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.5.zip index 8cf010f654b..f2155a99d3f 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.5.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.5.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.6.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.6.zip index bdb50d2b597..c5f0ecacdf8 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.6.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.6.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.7.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.7.zip index 421518ffc46..e00caacafd3 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.7.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.7.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.8.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.8.zip index 6e86732c0e6..bf7953b43a8 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.8.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.8.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.9.zip b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.9.zip index 3ce9db4fd1d..f3d32b03b07 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-0.90.9.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-0.90.9.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta1.zip index 82c7a7a0667..b4b4917a3bd 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta2.zip index 68e85052012..16b1f0001bf 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta2.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC1.zip index 5c522c2175c..d71aa6d43e4 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC2.zip index bf4f018ba53..8b1867f3b0a 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC2.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.zip index f9ed6d3f9f7..91b11b8494b 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.1.zip index f0918c37d5d..dd8b486663d 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.2.zip index 2513a18af52..c22915d404e 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.3.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.3.zip index 00635987d1b..d9bdd1286e6 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.0.3.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.0.3.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.1.0.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.1.0.zip index 348edec318d..961aa6deec7 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.1.0.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.1.0.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.1.1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.1.1.zip index 709817d39ce..415b865a9bb 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.1.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.1.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.1.2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.1.2.zip index 891e343e265..4bfd18ee372 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.1.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.1.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.2.1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.2.1.zip index 8c9d3a26bb8..590ce24ecd3 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.2.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.2.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.2.2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.2.2.zip index 7a7e55268ee..f084d38be42 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.2.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.2.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.2.3.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.2.3.zip index ce4fd81e280..393f4343d13 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.2.3.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.2.3.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.2.4.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.2.4.zip index b9c6d260c90..0d64ad012b0 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.2.4.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.2.4.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.0.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.0.zip index 0563239c263..1cc0904de5d 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.0.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.0.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.1.zip index 25e2304e0f3..df1c4edc208 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.2.zip index 6c30c495265..c8804837b7a 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.3.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.3.zip index 7de7e9c6add..d17d37a6115 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.3.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.3.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.4.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.4.zip index e0439e648e4..b2c13c863b8 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.4.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.4.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.5.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.5.zip index 64eb5148fd3..650016f1131 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.5.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.5.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.6.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.6.zip index 15a896ad72b..c590ba0b513 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.6.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.6.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.7.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.7.zip index 57a3dee6455..d3f51178334 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.7.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.7.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.8.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.8.zip index e4b486de7cd..8ae792c76f7 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.8.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.8.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.9.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.9.zip index e50ca7979d9..42e52aa39d0 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.3.9.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.3.9.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.zip index 38be7f5dfa4..3b4405266fd 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.1.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.1.zip index 0c37f560f92..03bd946dbcd 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.2.zip index 6602a1dd6ad..cfa97722b82 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.3.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.3.zip index 0b585fd7c2a..79899763ecf 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.3.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.3.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.4.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.4.zip index fd89d68ddb3..e6099fa2968 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.4.zip and b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.4.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.Beta2.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.Beta2.zip index e7ea9422166..d480a5a4542 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.Beta2.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.Beta2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC1.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC1.zip index 3c9cd90724d..d2a51e721a7 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC1.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC2.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC2.zip index d29252485de..925dbff38f8 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC2.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.zip index c3640d49c21..ecc5cfa4df4 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.1.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.1.zip index 013d5385eb1..7ae36861e2b 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.2.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.2.zip index 56fea8da303..ed2921470ab 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.3.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.3.zip index 79abf532e0f..ea59e0136ac 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.3.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.3.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.0.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.0.zip index 71a6a9b96a2..476245b4c59 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.0.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.0.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.1.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.1.zip index 1290fe64f76..672e0790799 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.2.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.2.zip index 4d410ca9a32..9a0eea3ea47 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.1.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.1.zip index 44d5cfb65b7..866b8ce4b94 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.2.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.2.zip index 86b1d6e2a70..98762dd2cfc 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.3.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.3.zip index 19ef3826987..dcc887e99f8 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.3.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.3.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.4.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.4.zip index adbfe2d5166..5599a98fe1b 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.4.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.4.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.0.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.0.zip index 817e0a43d8b..cde0263f545 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.0.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.0.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.1.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.1.zip index 09089692148..ab1dd28df06 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.2.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.2.zip index ba0fff12857..1d5b7c7a5cd 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.3.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.3.zip index 55559e4c302..86357e75449 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.3.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.3.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.4.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.4.zip index fda1fcbcb44..9e2a4d8669a 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.4.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.4.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.5.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.5.zip index c216f2daaf7..7807587a731 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.5.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.5.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.6.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.6.zip index 807c43d7ccd..6612969fc7b 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.6.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.6.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.7.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.7.zip index 2e78a423a6d..ea1fb9a13bb 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.7.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.7.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.8.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.8.zip index f1b57bceaf9..06c8cb3c60c 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.8.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.8.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.9.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.9.zip index d0722d24cfe..0216155622c 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.9.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.9.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.zip index a877bf58853..c6bb71ac460 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.1.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.1.zip index fc5a72bd3de..643f7952af8 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.1.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.1.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.2.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.2.zip index 01a87420149..6230bbb857b 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.2.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.3.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.3.zip index 9ff02d07708..99125a4490a 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.3.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.3.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.4.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.4.zip index 8271857ef0b..ae6560aa69b 100644 Binary files a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.4.zip and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.4.zip differ diff --git a/src/test/resources/org/elasticsearch/script/config/scripts/script1.expression b/src/test/resources/org/elasticsearch/script/config/scripts/script1.expression new file mode 100644 index 00000000000..9a037142aa3 --- /dev/null +++ b/src/test/resources/org/elasticsearch/script/config/scripts/script1.expression @@ -0,0 +1 @@ +10 \ No newline at end of file diff --git a/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_no_params.groovy b/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_no_params.groovy new file mode 100644 index 00000000000..7178e05efbd --- /dev/null +++ b/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_no_params.groovy @@ -0,0 +1 @@ +return _subset_freq + _subset_size + _superset_freq + _superset_size diff --git a/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_with_params.groovy b/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_with_params.groovy new file mode 100644 index 00000000000..0099a531fd2 --- /dev/null +++ b/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_with_params.groovy @@ -0,0 +1 @@ +return param*(_subset_freq + _subset_size + _superset_freq + _superset_size)/param