diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 5ea58046f49..53e964188f6 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -3,7 +3,11 @@ GitHub is reserved for bug reports and feature requests. The best place to ask a general question is at the Elastic Discourse forums at https://discuss.elastic.co. If you are in fact posting a bug report or a feature request, please include one and only one of the below blocks -in your new issue. +in your new issue. Note that whether you're filing a bug report or a +feature request, ensure that your submission is for an +[OS that we support](https://www.elastic.co/support/matrix#show_os). +Bug reports on an OS that we do not support or feature requests +specific to an OS that we do not support will be closed. --> - Have you signed the [contributor license agreement](https://www.elastic.co/contributor-agreement)? -- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/master/.github/CONTRIBUTING.md)? +- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md)? - If submitting code, have you built your formula locally prior to submission with `gradle check`? - If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed. - If submitting code, have you checked that your submission is for an [OS that we support](https://www.elastic.co/support/matrix#show_os)? diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 1a4e5b58f33..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: java -jdk: - - openjdk7 - -env: - - ES_TEST_LOCAL=true - - ES_TEST_LOCAL=false - -notifications: - email: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9afcd34fad7..b0f1e054e46 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -71,12 +71,47 @@ Once your changes and tests are ready to submit for review: Then sit back and wait. There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into Elasticsearch. +Please adhere to the general guideline that you should never force push +to a publicly shared branch. Once you have opened your pull request, you +should consider your branch publicly shared. Instead of force pushing +you can just add incremental commits; this is generally easier on your +reviewers. If you need to pick up changes from master, you can merge +master into your branch. A reviewer might ask you to rebase a +long-running pull request in which case force pushing is okay for that +request. Note that squashing at the end of the review process should +also not be done, that can be done when the pull request is [integrated +via GitHub](https://github.com/blog/2141-squash-your-commits). + Contributing to the Elasticsearch codebase ------------------------------------------ **Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch) -Make sure you have [Gradle](http://gradle.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE: `gradle eclipse` then `File: Import: Existing Projects into Workspace`. Select the option `Search for nested projects`. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors. +Make sure you have [Gradle](http://gradle.org) installed, as +Elasticsearch uses it as its build system. + +Eclipse users can automatically configure their IDE: `gradle eclipse` +then `File: Import: Existing Projects into Workspace`. Select the +option `Search for nested projects`. Additionally you will want to +ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` +accordingly to avoid GC overhead errors. + +IntelliJ users can automatically configure their IDE: `gradle idea` +then `File->New Project From Existing Sources`. Point to the root of +the source directory, select +`Import project from external model->Gradle`, enable +`Use auto-import`. + +The Elasticsearch codebase makes heavy use of Java `assert`s and the +test runner requires that assertions be enabled within the JVM. This +can be accomplished by passing the flag `-ea` to the JVM on startup. + +For IntelliJ, go to +`Run->Edit Configurations...->Defaults->JUnit->VM options` and input +`-ea`. + +For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to +`VM Arguments`. Please follow these formatting guidelines: diff --git a/README.textile b/README.textile index 5c75844b108..69d3fd54767 100644 --- a/README.textile +++ b/README.textile @@ -50,19 +50,19 @@ h3. Indexing Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
-curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
+curl -XPUT 'http://localhost:9200/twitter/user/kimchy?pretty' -d '{ "name" : "Shay Banon" }'
 
-curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
+curl -XPUT 'http://localhost:9200/twitter/tweet/1?pretty' -d '
 {
     "user": "kimchy",
-    "postDate": "2009-11-15T13:12:00",
+    "post_date": "2009-11-15T13:12:00",
     "message": "Trying out Elasticsearch, so far so good?"
 }'
 
-curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
+curl -XPUT 'http://localhost:9200/twitter/tweet/2?pretty' -d '
 {
     "user": "kimchy",
-    "postDate": "2009-11-15T14:12:12",
+    "post_date": "2009-11-15T14:12:12",
     "message": "Another tweet, will it be indexed?"
 }'
 
@@ -101,7 +101,7 @@ Just for kicks, let's get all the documents stored (we should see the user as we curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d ' { "query" : { - "matchAll" : {} + "match_all" : {} } }' @@ -113,7 +113,7 @@ curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d ' { "query" : { "range" : { - "postDate" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" } + "post_date" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" } } } }' @@ -130,19 +130,19 @@ Elasticsearch supports multiple indices, as well as multiple types per index. In Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
-curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
+curl -XPUT 'http://localhost:9200/kimchy/info/1?pretty' -d '{ "name" : "Shay Banon" }'
 
-curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
+curl -XPUT 'http://localhost:9200/kimchy/tweet/1?pretty' -d '
 {
     "user": "kimchy",
-    "postDate": "2009-11-15T13:12:00",
+    "post_date": "2009-11-15T13:12:00",
     "message": "Trying out Elasticsearch, so far so good?"
 }'
 
-curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
+curl -XPUT 'http://localhost:9200/kimchy/tweet/2?pretty' -d '
 {
     "user": "kimchy",
-    "postDate": "2009-11-15T14:12:12",
+    "post_date": "2009-11-15T14:12:12",
     "message": "Another tweet, will it be indexed?"
 }'
 
@@ -152,11 +152,11 @@ The above will index information into the @kimchy@ index, with two types, @info@ Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
-curl -XPUT http://localhost:9200/another_user/ -d '
+curl -XPUT http://localhost:9200/another_user?pretty -d '
 {
     "index" : {
-        "numberOfShards" : 1,
-        "numberOfReplicas" : 1
+        "number_of_shards" : 1,
+        "number_of_replicas" : 1
     }
 }'
 
@@ -168,7 +168,7 @@ index (twitter user), for example: curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d ' { "query" : { - "matchAll" : {} + "match_all" : {} } }' @@ -179,7 +179,7 @@ Or on all the indices: curl -XGET 'http://localhost:9200/_search?pretty=true' -d ' { "query" : { - "matchAll" : {} + "match_all" : {} } }' @@ -196,15 +196,15 @@ In order to play with the distributed nature of Elasticsearch, simply bring more h3. Where to go from here? -We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website. +We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website. General questions can be asked on the "Elastic Discourse forum":https://discuss.elastic.co or on IRC on Freenode at "#elasticsearch":https://webchat.freenode.net/#elasticsearch. The Elasticsearch GitHub repository is reserved for bug reports and feature requests only. h3. Building from Source -Elasticsearch uses "Gradle":http://gradle.org for its build system. You'll need to have a modern version of Gradle installed - 2.8 should do. +Elasticsearch uses "Gradle":https://gradle.org for its build system. You'll need to have a modern version of Gradle installed - 2.13 should do. -In order to create a distribution, simply run the @gradle build@ command in the cloned directory. +In order to create a distribution, simply run the @gradle assemble@ command in the cloned directory. -The distribution for each project will be created under the @target/releases@ directory in that project. +The distribution for each project will be created under the @build/distributions@ directory in that project. See the "TESTING":TESTING.asciidoc file for more information about running the Elasticsearch test suite. diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 5eea0b8c163..44eda08020a 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -18,24 +18,18 @@ gradle assemble == Other test options -To disable and enable network transport, set the `Des.node.mode`. +To disable and enable network transport, set the `tests.es.node.mode` system property. Use network transport: ------------------------------------ --Des.node.mode=network +-Dtests.es.node.mode=network ------------------------------------ Use local transport (default since 1.3): ------------------------------------- --Des.node.mode=local -------------------------------------- - -Alternatively, you can set the `ES_TEST_LOCAL` environment variable: - -------------------------------------- -export ES_TEST_LOCAL=true && gradle test +-Dtests.es.node.mode=local ------------------------------------- === Running Elasticsearch from a checkout @@ -201,7 +195,7 @@ gradle test -Dtests.timeoutSuite=5000! ... Change the logging level of ES (not gradle) -------------------------------- -gradle test -Des.logger.level=DEBUG +gradle test -Dtests.es.logger.level=DEBUG -------------------------------- Print all the logging output from the test runs to the commandline @@ -302,7 +296,7 @@ gradle :distribution:integ-test-zip:integTest \ -Dtests.method="test {p0=cat.shards/10_basic/Help}" --------------------------------------------------------------------------- -`RestNIT` are the executable test classes that runs all the +`RestIT` are the executable test classes that runs all the yaml suites available within the `rest-api-spec` folder. The REST tests support all the options provided by the randomized runner, plus the following: diff --git a/Vagrantfile b/Vagrantfile index 4f8ee7164f6..423b50038e0 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -42,7 +42,7 @@ Vagrant.configure(2) do |config| # debian and it works fine. config.vm.define "debian-8" do |config| config.vm.box = "elastic/debian-8-x86_64" - deb_common config, 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports' + deb_common config, 'echo deb http://cloudfront.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports' end config.vm.define "centos-6" do |config| config.vm.box = "elastic/centos-6-x86_64" @@ -60,8 +60,8 @@ Vagrant.configure(2) do |config| config.vm.box = "elastic/oraclelinux-7-x86_64" rpm_common config end - config.vm.define "fedora-22" do |config| - config.vm.box = "elastic/fedora-22-x86_64" + config.vm.define "fedora-24" do |config| + config.vm.box = "elastic/fedora-24-x86_64" dnf_common config end config.vm.define "opensuse-13" do |config| diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 00000000000..03aaac7f3c4 --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,62 @@ +# Elasticsearch Microbenchmark Suite + +This directory contains the microbenchmark suite of Elasticsearch. It relies on [JMH](http://openjdk.java.net/projects/code-tools/jmh/). + +## Purpose + +We do not want to microbenchmark everything but the kitchen sink and should typically rely on our +[macrobenchmarks](https://elasticsearch-benchmarks.elastic.co/app/kibana#/dashboard/Nightly-Benchmark-Overview) with +[Rally](http://github.com/elastic/rally). Microbenchmarks are intended to spot performance regressions in performance-critical components. +The microbenchmark suite is also handy for ad-hoc microbenchmarks but please remove them again before merging your PR. + +## Getting Started + +Just run `gradle :benchmarks:jmh` from the project root directory. It will build all microbenchmarks, execute them and print the result. + +## Running Microbenchmarks + +Benchmarks are always run via Gradle with `gradle :benchmarks:jmh`. + +Running via an IDE is not supported as the results are meaningless (we have no control over the JVM running the benchmarks). + +If you want to run a specific benchmark class, e.g. `org.elasticsearch.benchmark.MySampleBenchmark` or have special requirements +generate the uberjar with `gradle :benchmarks:jmhJar` and run it directly with: + +``` +java -jar benchmarks/build/distributions/elasticsearch-benchmarks-*.jar +``` + +JMH supports lots of command line parameters. Add `-h` to the command above to see the available command line options. + +## Adding Microbenchmarks + +Before adding a new microbenchmark, make yourself familiar with the JMH API. You can check our existing microbenchmarks and also the +[JMH samples](http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/). + +In contrast to tests, the actual name of the benchmark class is not relevant to JMH. However, stick to the naming convention and +end the class name of a benchmark with `Benchmark`. To have JMH execute a benchmark, annotate the respective methods with `@Benchmark`. + +## Tips and Best Practices + +To get realistic results, you should exercise care when running benchmarks. Here are a few tips: + +### Do + +* Ensure that the system executing your microbenchmarks has as little load as possible. Shutdown every process that can cause unnecessary + runtime jitter. Watch the `Error` column in the benchmark results to see the run-to-run variance. +* Ensure to run enough warmup iterations to get the benchmark into a stable state. If you are unsure, don't change the defaults. +* Avoid CPU migrations by pinning your benchmarks to specific CPU cores. On Linux you can use `taskset`. +* Fix the CPU frequency to avoid Turbo Boost from kicking in and skewing your results. On Linux you can use `cpufreq-set` and the + `performance` CPU governor. +* Vary the problem input size with `@Param`. +* Use the integrated profilers in JMH to dig deeper if benchmark results to not match your hypotheses: + * Run the generated uberjar directly and use `-prof gc` to check whether the garbage collector runs during a microbenchmarks and skews + your results. If so, try to force a GC between runs (`-gc true`) but watch out for the caveats. + * Use `-prof perf` or `-prof perfasm` (both only available on Linux) to see hotspots. +* Have your benchmarks peer-reviewed. + +### Don't + +* Blindly believe the numbers that your microbenchmark produces but verify them by measuring e.g. with `-prof perfasm`. +* Run more threads than your number of CPU cores (in case you run multi-threaded microbenchmarks). +* Look only at the `Score` column and ignore `Error`. Instead take countermeasures to keep `Error` low / variance explainable. \ No newline at end of file diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle new file mode 100644 index 00000000000..186fdca44ea --- /dev/null +++ b/benchmarks/build.gradle @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +buildscript { + repositories { + maven { + url 'https://plugins.gradle.org/m2/' + } + } + dependencies { + classpath 'com.github.jengelman.gradle.plugins:shadow:1.2.3' + } +} + +apply plugin: 'elasticsearch.build' +// build an uberjar with all benchmarks +apply plugin: 'com.github.johnrengelman.shadow' +// have the shadow plugin provide the runShadow task +apply plugin: 'application' + +archivesBaseName = 'elasticsearch-benchmarks' +mainClassName = 'org.openjdk.jmh.Main' + +// never try to invoke tests on the benchmark project - there aren't any +check.dependsOn.remove(test) +// explicitly override the test task too in case somebody invokes 'gradle test' so it won't trip +task test(type: Test, overwrite: true) + +dependencies { + compile("org.elasticsearch:elasticsearch:${version}") { + // JMH ships with the conflicting version 4.6 (JMH will not update this dependency as it is Java 6 compatible and joptsimple is one + // of the most recent compatible version). This prevents us from using jopt-simple in benchmarks (which should be ok) but allows us + // to invoke the JMH uberjar as usual. + exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' + } + compile "org.openjdk.jmh:jmh-core:$versions.jmh" + compile "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh" + // Dependencies of JMH + runtime 'net.sf.jopt-simple:jopt-simple:4.6' + runtime 'org.apache.commons:commons-math3:3.2' +} + +compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" +compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" + +forbiddenApis { + // classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes + ignoreFailures = true +} + +// No licenses for our benchmark deps (we don't ship benchmarks) +dependencyLicenses.enabled = false + +thirdPartyAudit.excludes = [ + // these classes intentionally use JDK internal API (and this is ok since the project is maintained by Oracle employees) + 'org.openjdk.jmh.profile.AbstractHotspotProfiler', + 'org.openjdk.jmh.profile.HotspotThreadProfiler', + 'org.openjdk.jmh.profile.HotspotClassloadingProfiler', + 'org.openjdk.jmh.profile.HotspotCompilationProfiler', + 'org.openjdk.jmh.profile.HotspotMemoryProfiler', + 'org.openjdk.jmh.profile.HotspotRuntimeProfiler', + 'org.openjdk.jmh.util.Utils' +] + +shadowJar { + classifier = 'benchmarks' +} + +// alias the shadowJar and runShadow tasks to abstract from the concrete plugin that we are using and provide a more consistent interface +task jmhJar( + dependsOn: shadowJar, + description: 'Generates an uberjar with the microbenchmarks and all dependencies', + group: 'Benchmark' +) + +task jmh( + dependsOn: runShadow, + description: 'Runs all microbenchmarks', + group: 'Benchmark' +) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java new file mode 100644 index 00000000000..5e5f35f6040 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.benchmark.routing.allocation; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.Settings; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +@Fork(3) +@Warmup(iterations = 10) +@Measurement(iterations = 10) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +@SuppressWarnings("unused") //invoked by benchmarking framework +public class AllocationBenchmark { + // Do NOT make any field final (even if it is not annotated with @Param)! See also + // http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_10_ConstantFold.java + + // we cannot use individual @Params as some will lead to invalid combinations which do not let the benchmark terminate. JMH offers no + // support to constrain the combinations of benchmark parameters and we do not want to rely on OptionsBuilder as each benchmark would + // need its own main method and we cannot execute more than one class with a main method per JAR. + @Param({ + // indices, shards, replicas, nodes + " 10, 1, 0, 1", + " 10, 3, 0, 1", + " 10, 10, 0, 1", + " 100, 1, 0, 1", + " 100, 3, 0, 1", + " 100, 10, 0, 1", + + " 10, 1, 0, 10", + " 10, 3, 0, 10", + " 10, 10, 0, 10", + " 100, 1, 0, 10", + " 100, 3, 0, 10", + " 100, 10, 0, 10", + + " 10, 1, 1, 10", + " 10, 3, 1, 10", + " 10, 10, 1, 10", + " 100, 1, 1, 10", + " 100, 3, 1, 10", + " 100, 10, 1, 10", + + " 10, 1, 2, 10", + " 10, 3, 2, 10", + " 10, 10, 2, 10", + " 100, 1, 2, 10", + " 100, 3, 2, 10", + " 100, 10, 2, 10", + + " 10, 1, 0, 50", + " 10, 3, 0, 50", + " 10, 10, 0, 50", + " 100, 1, 0, 50", + " 100, 3, 0, 50", + " 100, 10, 0, 50", + + " 10, 1, 1, 50", + " 10, 3, 1, 50", + " 10, 10, 1, 50", + " 100, 1, 1, 50", + " 100, 3, 1, 50", + " 100, 10, 1, 50", + + " 10, 1, 2, 50", + " 10, 3, 2, 50", + " 10, 10, 2, 50", + " 100, 1, 2, 50", + " 100, 3, 2, 50", + " 100, 10, 2, 50" + }) + public String indicesShardsReplicasNodes = "10,1,0,1"; + + public int numTags = 2; + + private AllocationService strategy; + private ClusterState initialClusterState; + + @Setup + public void setUp() throws Exception { + final String[] params = indicesShardsReplicasNodes.split(","); + + int numIndices = toInt(params[0]); + int numShards = toInt(params[1]); + int numReplicas = toInt(params[2]); + int numNodes = toInt(params[3]); + + strategy = Allocators.createAllocationService(Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "tag") + .build()); + + MetaData.Builder mb = MetaData.builder(); + for (int i = 1; i <= numIndices; i++) { + mb.put(IndexMetaData.builder("test_" + i) + .settings(Settings.builder().put("index.version.created", Version.CURRENT)) + .numberOfShards(numShards) + .numberOfReplicas(numReplicas) + ); + } + MetaData metaData = mb.build(); + RoutingTable.Builder rb = RoutingTable.builder(); + for (int i = 1; i <= numIndices; i++) { + rb.addAsNew(metaData.index("test_" + i)); + } + RoutingTable routingTable = rb.build(); + DiscoveryNodes.Builder nb = DiscoveryNodes.builder(); + for (int i = 1; i <= numNodes; i++) { + nb.put(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags)))); + } + initialClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).nodes + (nb).build(); + } + + private int toInt(String v) { + return Integer.valueOf(v.trim()); + } + + @Benchmark + public ClusterState measureAllocation() { + ClusterState clusterState = initialClusterState; + while (clusterState.getRoutingNodes().hasUnassignedShards()) { + RoutingAllocation.Result result = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes() + .shardsWithState(ShardRoutingState.INITIALIZING)); + clusterState = ClusterState.builder(clusterState).routingResult(result).build(); + result = strategy.reroute(clusterState, "reroute"); + clusterState = ClusterState.builder(clusterState).routingResult(result).build(); + } + return clusterState; + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java new file mode 100644 index 00000000000..97fbda80dc6 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.benchmark.routing.allocation; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.gateway.GatewayAllocator; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public final class Allocators { + private static class NoopGatewayAllocator extends GatewayAllocator { + public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator(); + + protected NoopGatewayAllocator() { + super(Settings.EMPTY, null, null); + } + + @Override + public void applyStartedShards(StartedRerouteAllocation allocation) { + // noop + } + + @Override + public void applyFailedShards(FailedRerouteAllocation allocation) { + // noop + } + + @Override + public boolean allocateUnassigned(RoutingAllocation allocation) { + return false; + } + } + + private Allocators() { + throw new AssertionError("Do not instantiate"); + } + + + public static AllocationService createAllocationService(Settings settings) throws NoSuchMethodException, InstantiationException, + IllegalAccessException, InvocationTargetException { + return createAllocationService(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings + .BUILT_IN_CLUSTER_SETTINGS)); + } + + public static AllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings) throws + InvocationTargetException, NoSuchMethodException, InstantiationException, IllegalAccessException { + return new AllocationService(settings, + defaultAllocationDeciders(settings, clusterSettings), + NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE); + } + + public static AllocationDeciders defaultAllocationDeciders(Settings settings, ClusterSettings clusterSettings) throws + IllegalAccessException, InvocationTargetException, InstantiationException, NoSuchMethodException { + List list = new ArrayList<>(); + // Keep a deterministic order of allocation deciders for the benchmark + for (Class deciderClass : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) { + try { + Constructor constructor = deciderClass.getConstructor(Settings.class, ClusterSettings + .class); + list.add(constructor.newInstance(settings, clusterSettings)); + } catch (NoSuchMethodException e) { + Constructor constructor = deciderClass.getConstructor(Settings.class); + list.add(constructor.newInstance(settings)); + } + } + return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0])); + + } + + public static DiscoveryNode newNode(String nodeId, Map attributes) { + return new DiscoveryNode("", nodeId, LocalTransportAddress.buildUnique(), attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER, + DiscoveryNode.Role.DATA), Version.CURRENT); + } +} diff --git a/benchmarks/src/main/resources/log4j.properties b/benchmarks/src/main/resources/log4j.properties new file mode 100644 index 00000000000..8ca1bc87295 --- /dev/null +++ b/benchmarks/src/main/resources/log4j.properties @@ -0,0 +1,8 @@ +# Do not log at all if it is not really critical - we're in a benchmark +benchmarks.es.logger.level=ERROR +log4j.rootLogger=${benchmarks.es.logger.level}, out + +log4j.appender.out=org.apache.log4j.ConsoleAppender +log4j.appender.out.layout=org.apache.log4j.PatternLayout +log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n + diff --git a/build.gradle b/build.gradle index ad1f2456dea..540f27503f0 100644 --- a/build.gradle +++ b/build.gradle @@ -27,6 +27,31 @@ import org.apache.tools.ant.taskdefs.condition.Os subprojects { group = 'org.elasticsearch' version = org.elasticsearch.gradle.VersionProperties.elasticsearch + description = "Elasticsearch subproject ${project.path}" + + // we only use maven publish to add tasks for pom generation + plugins.withType(MavenPublishPlugin).whenPluginAdded { + publishing { + publications { + // add license information to generated poms + all { + pom.withXml { XmlProvider xml -> + Node node = xml.asNode() + node.appendNode('inceptionYear', '2009') + + Node license = node.appendNode('licenses').appendNode('license') + license.appendNode('name', 'The Apache Software License, Version 2.0') + license.appendNode('url', 'http://www.apache.org/licenses/LICENSE-2.0.txt') + license.appendNode('distribution', 'repo') + + Node developer = node.appendNode('developers').appendNode('developer') + developer.appendNode('name', 'Elastic') + developer.appendNode('url', 'http://www.elastic.co') + } + } + } + } + } plugins.withType(NexusPlugin).whenPluginAdded { modifyPom { @@ -56,7 +81,7 @@ subprojects { nexus { String buildSnapshot = System.getProperty('build.snapshot', 'true') if (buildSnapshot == 'false') { - Repository repo = new RepositoryBuilder().findGitDir(new File('.')).build() + Repository repo = new RepositoryBuilder().findGitDir(project.rootDir).build() String shortHash = repo.resolve('HEAD')?.name?.substring(0,7) repositoryUrl = project.hasProperty('build.repository') ? project.property('build.repository') : "file://${System.getenv('HOME')}/elasticsearch-releases/${version}-${shortHash}/" } @@ -119,6 +144,14 @@ subprojects { // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 javadoc.options.encoding='UTF8' javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') + /* + TODO: building javadocs with java 9 b118 is currently broken with weird errors, so + for now this is commented out...try again with the next ea build... + javadoc.executable = new File(project.javaHome, 'bin/javadoc') + if (project.javaVersion == JavaVersion.VERSION_1_9) { + // TODO: remove this hack! gradle should be passing this... + javadoc.options.addStringOption('source', '8') + }*/ } } @@ -127,8 +160,12 @@ subprojects { them as external dependencies so the build plugin that we use can be used to build elasticsearch plugins outside of the elasticsearch source tree. */ ext.projectSubstitutions = [ + "org.elasticsearch.gradle:build-tools:${version}": ':build-tools', "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:elasticsearch:${version}": ':core', + "org.elasticsearch.client:rest:${version}": ':client:rest', + "org.elasticsearch.client:sniffer:${version}": ':client:sniffer', + "org.elasticsearch.client:test:${version}": ':client:test', "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', @@ -224,7 +261,6 @@ allprojects { idea { project { - languageLevel = org.elasticsearch.gradle.BuildPlugin.minimumJava.toString() vcs = 'Git' } } @@ -236,13 +272,6 @@ tasks.idea.doLast { if (System.getProperty('idea.active') != null && ideaMarker.exists() == false) { throw new GradleException('You must run gradle idea from the root of elasticsearch before importing into IntelliJ') } -// add buildSrc itself as a groovy project -task buildSrcIdea(type: GradleBuild) { - buildFile = 'buildSrc/build.gradle' - tasks = ['cleanIdea', 'ideaModule'] -} -tasks.idea.dependsOn(buildSrcIdea) - // eclipse configuration allprojects { @@ -278,20 +307,14 @@ allprojects { into '.settings' } // otherwise .settings is not nuked entirely - tasks.cleanEclipse { + task wipeEclipseSettings(type: Delete) { delete '.settings' } + tasks.cleanEclipse.dependsOn(wipeEclipseSettings) // otherwise the eclipse merging is *super confusing* tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings) } -// add buildSrc itself as a groovy project -task buildSrcEclipse(type: GradleBuild) { - buildFile = 'buildSrc/build.gradle' - tasks = ['cleanEclipse', 'eclipse'] -} -tasks.eclipse.dependsOn(buildSrcEclipse) - // we need to add the same --debug-jvm option as // the real RunTask has, so we can pass it through class Run extends DefaultTask { diff --git a/buildSrc/.gitignore b/buildSrc/.gitignore new file mode 100644 index 00000000000..bfdaf60b97e --- /dev/null +++ b/buildSrc/.gitignore @@ -0,0 +1 @@ +build-bootstrap/ diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index e36451311e7..1be5020f4f8 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -1,5 +1,3 @@ -import java.nio.file.Files - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -19,25 +17,31 @@ import java.nio.file.Files * under the License. */ -// we must use buildscript + apply so that an external plugin -// can apply this file, since the plugins directive is not -// supported through file includes -buildscript { - repositories { - jcenter() - } - dependencies { - classpath 'com.bmuschko:gradle-nexus-plugin:2.3.1' - } -} +import java.nio.file.Files + apply plugin: 'groovy' -apply plugin: 'com.bmuschko.nexus' -// TODO: move common IDE configuration to a common file to include -apply plugin: 'idea' -apply plugin: 'eclipse' group = 'org.elasticsearch.gradle' -archivesBaseName = 'build-tools' + +// TODO: remove this when upgrading to a version that supports ProgressLogger +// gradle 2.14 made internal apis unavailable to plugins, and gradle considered +// ProgressLogger to be an internal api. Until this is made available again, +// we can't upgrade without losing our nice progress logging +// NOTE that this check duplicates that in BuildPlugin, but we need to check +// early here before trying to compile the broken classes in buildSrc +if (GradleVersion.current() != GradleVersion.version('2.13')) { + throw new GradleException('Gradle 2.13 is required to build elasticsearch') +} + +if (project == rootProject) { + // change the build dir used during build init, so that doing a clean + // won't wipe out the buildscript jar + buildDir = 'build-bootstrap' +} + +/***************************************************************************** + * Propagating version.properties to the rest of the build * + *****************************************************************************/ Properties props = new Properties() props.load(project.file('version.properties').newDataInputStream()) @@ -51,32 +55,6 @@ if (snapshot) { props.put("elasticsearch", version); } - -repositories { - mavenCentral() - maven { - name 'sonatype-snapshots' - url "https://oss.sonatype.org/content/repositories/snapshots/" - } - jcenter() -} - -dependencies { - compile gradleApi() - compile localGroovy() - compile "com.carrotsearch.randomizedtesting:junit4-ant:${props.getProperty('randomizedrunner')}" - compile("junit:junit:${props.getProperty('junit')}") { - transitive = false - } - compile 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3' - compile 'com.netflix.nebula:gradle-info-plugin:3.0.3' - compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r' - compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... - compile 'de.thetaphi:forbiddenapis:2.0' - compile 'com.bmuschko:gradle-nexus-plugin:2.3.1' - compile 'org.apache.rat:apache-rat:0.11' -} - File tempPropertiesFile = new File(project.buildDir, "version.properties") task writeVersionProperties { inputs.properties(props) @@ -96,31 +74,84 @@ processResources { from tempPropertiesFile } -extraArchive { - javadoc = false - tests = false +/***************************************************************************** + * Dependencies used by the entire build * + *****************************************************************************/ + +repositories { + jcenter() } -idea { - module { - inheritOutputDirs = false - outputDir = file('build-idea/classes/main') - testOutputDir = file('build-idea/classes/test') +dependencies { + compile gradleApi() + compile localGroovy() + compile "com.carrotsearch.randomizedtesting:junit4-ant:${props.getProperty('randomizedrunner')}" + compile("junit:junit:${props.getProperty('junit')}") { + transitive = false + } + compile 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3' + compile 'com.netflix.nebula:nebula-publishing-plugin:4.4.4' + compile 'com.netflix.nebula:gradle-info-plugin:3.0.3' + compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r' + compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... + compile 'de.thetaphi:forbiddenapis:2.2' + compile 'com.bmuschko:gradle-nexus-plugin:2.3.1' + compile 'org.apache.rat:apache-rat:0.11' + compile 'ru.vyarus:gradle-animalsniffer-plugin:1.0.1' +} + + +/***************************************************************************** + * Bootstrap repositories * + *****************************************************************************/ +// this will only happen when buildSrc is built on its own during build init +if (project == rootProject) { + + repositories { + mavenCentral() + maven { + name 'sonatype-snapshots' + url "https://oss.sonatype.org/content/repositories/snapshots/" + } + } + test.exclude 'org/elasticsearch/test/NamingConventionsCheckBadClasses*' +} + +/***************************************************************************** + * Normal project checks * + *****************************************************************************/ + +// this happens when included as a normal project in the build, which we do +// to enforce precommit checks like forbidden apis, as well as setup publishing +if (project != rootProject) { + apply plugin: 'elasticsearch.build' + apply plugin: 'nebula.maven-base-publish' + apply plugin: 'nebula.maven-scm' + + // groovydoc succeeds, but has some weird internal exception... + groovydoc.enabled = false + + // build-tools is not ready for primetime with these... + dependencyLicenses.enabled = false + forbiddenApisMain.enabled = false + forbiddenApisTest.enabled = false + jarHell.enabled = false + thirdPartyAudit.enabled = false + + // test for elasticsearch.build tries to run with ES... + test.enabled = false + + // TODO: re-enable once randomizedtesting gradle code is published and removed from here + licenseHeaders.enabled = false + + forbiddenPatterns { + exclude '**/*.wav' + // the file that actually defines nocommit + exclude '**/ForbiddenPatternsTask.groovy' + } + + namingConventions { + testClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$UnitTestCase' + integTestClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$IntegTestCase' } } - -eclipse { - classpath { - defaultOutputDir = file('build-eclipse') - } -} - -task copyEclipseSettings(type: Copy) { - from project.file('src/main/resources/eclipse.settings') - into '.settings' -} -// otherwise .settings is not nuked entirely -tasks.cleanEclipse { - delete '.settings' -} -tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings) diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy index 450d3645182..6ed6ecf8619 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy @@ -28,12 +28,6 @@ import org.gradle.api.logging.LogLevel import org.gradle.api.logging.Logger import org.junit.runner.Description -import javax.sound.sampled.AudioSystem -import javax.sound.sampled.Clip -import javax.sound.sampled.Line -import javax.sound.sampled.LineEvent -import javax.sound.sampled.LineListener -import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.atomic.AtomicInteger import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDescription @@ -123,36 +117,9 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv formatTime(e.getCurrentTime()) + ", stalled for " + formatDurationInSeconds(e.getNoEventDuration()) + " at: " + (e.getDescription() == null ? "" : formatDescription(e.getDescription()))) - try { - playBeat(); - } catch (Exception nosound) { /* handling exceptions with style */ } slowTestsFound = true } - void playBeat() throws Exception { - Clip clip = (Clip)AudioSystem.getLine(new Line.Info(Clip.class)); - final AtomicBoolean stop = new AtomicBoolean(); - clip.addLineListener(new LineListener() { - @Override - public void update(LineEvent event) { - if (event.getType() == LineEvent.Type.STOP) { - stop.set(true); - } - } - }); - InputStream stream = getClass().getResourceAsStream("/beat.wav"); - try { - clip.open(AudioSystem.getAudioInputStream(stream)); - clip.start(); - while (!stop.get()) { - Thread.sleep(20); - } - clip.close(); - } finally { - stream.close(); - } - } - @Subscribe void onQuit(AggregatedQuitEvent e) throws IOException { if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index ab2ba5abfef..8d77e7a9a34 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -19,6 +19,7 @@ package org.elasticsearch.gradle import nebula.plugin.extraconfigurations.ProvidedBasePlugin +import nebula.plugin.publishing.maven.MavenBasePublishPlugin import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.GradleException import org.gradle.api.JavaVersion @@ -33,6 +34,8 @@ import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.artifacts.maven.MavenPom +import org.gradle.api.publish.maven.MavenPublication +import org.gradle.api.publish.maven.tasks.GenerateMavenPom import org.gradle.api.tasks.bundling.Jar import org.gradle.api.tasks.compile.JavaCompile import org.gradle.internal.jvm.Jvm @@ -54,7 +57,7 @@ class BuildPlugin implements Plugin { project.pluginManager.apply('java') project.pluginManager.apply('carrotsearch.randomized-testing') // these plugins add lots of info to our jars - configureJarManifest(project) // jar config must be added before info broker + configureJars(project) // jar config must be added before info broker project.pluginManager.apply('nebula.info-broker') project.pluginManager.apply('nebula.info-basic') project.pluginManager.apply('nebula.info-java') @@ -68,6 +71,7 @@ class BuildPlugin implements Plugin { configureConfigurations(project) project.ext.versions = VersionProperties.versions configureCompile(project) + configurePomGeneration(project) configureTest(project) configurePrecommit(project) @@ -109,7 +113,7 @@ class BuildPlugin implements Plugin { } // enforce gradle version - GradleVersion minGradle = GradleVersion.version('2.8') + GradleVersion minGradle = GradleVersion.version('2.13') if (GradleVersion.current() < minGradle) { throw new GradleException("${minGradle} or above is required to build elasticsearch") } @@ -139,7 +143,7 @@ class BuildPlugin implements Plugin { } project.rootProject.ext.javaHome = javaHome - project.rootProject.ext.javaVersion = javaVersion + project.rootProject.ext.javaVersion = javaVersionEnum project.rootProject.ext.buildChecksDone = true } project.targetCompatibility = minimumJava @@ -228,7 +232,7 @@ class BuildPlugin implements Plugin { */ static void configureConfigurations(Project project) { // we are not shipping these jars, we act like dumb consumers of these things - if (project.path.startsWith(':test:fixtures')) { + if (project.path.startsWith(':test:fixtures') || project.path == ':build-tools') { return } // fail on any conflicting dependency versions @@ -266,44 +270,7 @@ class BuildPlugin implements Plugin { // add exclusions to the pom directly, for each of the transitive deps of this project's deps project.modifyPom { MavenPom pom -> - pom.withXml { XmlProvider xml -> - // first find if we have dependencies at all, and grab the node - NodeList depsNodes = xml.asNode().get('dependencies') - if (depsNodes.isEmpty()) { - return - } - - // check each dependency for any transitive deps - for (Node depNode : depsNodes.get(0).children()) { - String groupId = depNode.get('groupId').get(0).text() - String artifactId = depNode.get('artifactId').get(0).text() - String version = depNode.get('version').get(0).text() - - // collect the transitive deps now that we know what this dependency is - String depConfig = transitiveDepConfigName(groupId, artifactId, version) - Configuration configuration = project.configurations.findByName(depConfig) - if (configuration == null) { - continue // we did not make this dep non-transitive - } - Set artifacts = configuration.resolvedConfiguration.resolvedArtifacts - if (artifacts.size() <= 1) { - // this dep has no transitive deps (or the only artifact is itself) - continue - } - - // we now know we have something to exclude, so add the exclusion elements - Node exclusions = depNode.appendNode('exclusions') - for (ResolvedArtifact transitiveArtifact : artifacts) { - ModuleVersionIdentifier transitiveDep = transitiveArtifact.moduleVersion.id - if (transitiveDep.group == groupId && transitiveDep.name == artifactId) { - continue; // don't exclude the dependency itself! - } - Node exclusion = exclusions.appendNode('exclusion') - exclusion.appendNode('groupId', transitiveDep.group) - exclusion.appendNode('artifactId', transitiveDep.name) - } - } - } + pom.withXml(removeTransitiveDependencies(project)) } } @@ -332,6 +299,70 @@ class BuildPlugin implements Plugin { } } + /** Returns a closure which can be used with a MavenPom for removing transitive dependencies. */ + private static Closure removeTransitiveDependencies(Project project) { + // TODO: remove this when enforcing gradle 2.13+, it now properly handles exclusions + return { XmlProvider xml -> + // first find if we have dependencies at all, and grab the node + NodeList depsNodes = xml.asNode().get('dependencies') + if (depsNodes.isEmpty()) { + return + } + + // check each dependency for any transitive deps + for (Node depNode : depsNodes.get(0).children()) { + String groupId = depNode.get('groupId').get(0).text() + String artifactId = depNode.get('artifactId').get(0).text() + String version = depNode.get('version').get(0).text() + + // collect the transitive deps now that we know what this dependency is + String depConfig = transitiveDepConfigName(groupId, artifactId, version) + Configuration configuration = project.configurations.findByName(depConfig) + if (configuration == null) { + continue // we did not make this dep non-transitive + } + Set artifacts = configuration.resolvedConfiguration.resolvedArtifacts + if (artifacts.size() <= 1) { + // this dep has no transitive deps (or the only artifact is itself) + continue + } + + // we now know we have something to exclude, so add the exclusion elements + Node exclusions = depNode.appendNode('exclusions') + for (ResolvedArtifact transitiveArtifact : artifacts) { + ModuleVersionIdentifier transitiveDep = transitiveArtifact.moduleVersion.id + if (transitiveDep.group == groupId && transitiveDep.name == artifactId) { + continue; // don't exclude the dependency itself! + } + Node exclusion = exclusions.appendNode('exclusion') + exclusion.appendNode('groupId', transitiveDep.group) + exclusion.appendNode('artifactId', transitiveDep.name) + } + } + } + } + + /**Configuration generation of maven poms. */ + public static void configurePomGeneration(Project project) { + project.plugins.withType(MavenBasePublishPlugin.class).whenPluginAdded { + project.publishing { + publications { + all { MavenPublication publication -> // we only deal with maven + // add exclusions to the pom directly, for each of the transitive deps of this project's deps + publication.pom.withXml(removeTransitiveDependencies(project)) + } + } + } + + project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom t -> + // place the pom next to the jar it is for + t.destination = new File(project.buildDir, "distributions/${project.archivesBaseName}-${project.version}.pom") + // build poms with assemble + project.assemble.dependsOn(t) + } + } + } + /** Adds compiler settings to the project */ static void configureCompile(Project project) { project.ext.compactProfile = 'compact3' @@ -341,32 +372,40 @@ class BuildPlugin implements Plugin { options.fork = true options.forkOptions.executable = new File(project.javaHome, 'bin/javac') options.forkOptions.memoryMaximumSize = "1g" + if (project.targetCompatibility >= JavaVersion.VERSION_1_8) { + // compile with compact 3 profile by default + // NOTE: this is just a compile time check: does not replace testing with a compact3 JRE + if (project.compactProfile != 'full') { + options.compilerArgs << '-profile' << project.compactProfile + } + } /* * -path because gradle will send in paths that don't always exist. * -missing because we have tons of missing @returns and @param. * -serial because we don't use java serialization. */ // don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :) - options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing' - // compile with compact 3 profile by default - // NOTE: this is just a compile time check: does not replace testing with a compact3 JRE - if (project.compactProfile != 'full') { - options.compilerArgs << '-profile' << project.compactProfile - } + options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing' options.encoding = 'UTF-8' //options.incremental = true - // gradle ignores target/source compatibility when it is "unnecessary", but since to compile with - // java 9, gradle is running in java 8, it incorrectly thinks it is unnecessary - assert minimumJava == JavaVersion.VERSION_1_8 - options.compilerArgs << '-target' << '1.8' << '-source' << '1.8' + if (project.javaVersion == JavaVersion.VERSION_1_9) { + // hack until gradle supports java 9's new "-release" arg + assert minimumJava == JavaVersion.VERSION_1_8 + options.compilerArgs << '-release' << '8' + project.sourceCompatibility = null + project.targetCompatibility = null + } } } } - /** Adds additional manifest info to jars */ - static void configureJarManifest(Project project) { + /** Adds additional manifest info to jars, and adds source and javadoc jars */ + static void configureJars(Project project) { project.tasks.withType(Jar) { Jar jarTask -> + // we put all our distributable files under distributions + jarTask.destinationDir = new File(project.buildDir, 'distributions') + // fixup the jar manifest jarTask.doFirst { boolean isSnapshot = VersionProperties.elasticsearch.endsWith("-SNAPSHOT"); String version = VersionProperties.elasticsearch; @@ -422,7 +461,7 @@ class BuildPlugin implements Plugin { // default test sysprop values systemProperty 'tests.ifNoTests', 'fail' // TODO: remove setting logging level via system property - systemProperty 'es.logger.level', 'WARN' + systemProperty 'tests.logger.level', 'WARN' for (Map.Entry property : System.properties.entrySet()) { if (property.getKey().startsWith('tests.') || property.getKey().startsWith('es.')) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggedExec.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggedExec.groovy index 1896cdf1b67..b1b04a2ded6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggedExec.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggedExec.groovy @@ -26,14 +26,17 @@ import org.gradle.api.tasks.Exec * A wrapper around gradle's Exec task to capture output and log on error. */ class LoggedExec extends Exec { + + protected ByteArrayOutputStream output = new ByteArrayOutputStream() + LoggedExec() { if (logger.isInfoEnabled() == false) { - standardOutput = new ByteArrayOutputStream() - errorOutput = standardOutput + standardOutput = output + errorOutput = output ignoreExitValue = true doLast { if (execResult.exitValue != 0) { - standardOutput.toString('UTF-8').eachLine { line -> logger.error(line) } + output.toString('UTF-8').eachLine { line -> logger.error(line) } throw new GradleException("Process '${executable} ${args.join(' ')}' finished with non-zero exit value ${execResult.exitValue}") } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy new file mode 100644 index 00000000000..3b1ec3c5d87 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.doc + +import org.elasticsearch.gradle.test.RestTestPlugin +import org.gradle.api.Project +import org.gradle.api.Task + +/** + * Sets up tests for documentation. + */ +public class DocsTestPlugin extends RestTestPlugin { + + @Override + public void apply(Project project) { + super.apply(project) + Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) + listSnippets.group 'Docs' + listSnippets.description 'List each snippet' + listSnippets.perSnippet { println(it.toString()) } + + Task listConsoleCandidates = project.tasks.create( + 'listConsoleCandidates', SnippetsTask) + listConsoleCandidates.group 'Docs' + listConsoleCandidates.description + 'List snippets that probably should be marked // CONSOLE' + listConsoleCandidates.perSnippet { + if ( + it.console // Already marked, nothing to do + || it.testResponse // It is a response + ) { + return + } + List languages = [ + // These languages should almost always be marked console + 'js', 'json', + // These are often curl commands that should be converted but + // are probably false positives + 'sh', 'shell', + ] + if (false == languages.contains(it.language)) { + return + } + println(it.toString()) + } + + project.tasks.create('buildRestTests', RestTestsFromSnippetsTask) + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy new file mode 100644 index 00000000000..c7f4316ee04 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -0,0 +1,240 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.doc + +import org.elasticsearch.gradle.doc.SnippetsTask.Snippet +import org.gradle.api.InvalidUserDataException +import org.gradle.api.tasks.Input +import org.gradle.api.tasks.OutputDirectory + +import java.nio.file.Files +import java.nio.file.Path +import java.util.regex.Matcher + +/** + * Generates REST tests for each snippet marked // TEST. + */ +public class RestTestsFromSnippetsTask extends SnippetsTask { + @Input + Map setups = new HashMap() + + /** + * Root directory of the tests being generated. To make rest tests happy + * we generate them in a testRoot() which is contained in this directory. + */ + @OutputDirectory + File testRoot = project.file('build/rest') + + public RestTestsFromSnippetsTask() { + project.afterEvaluate { + // Wait to set this so testRoot can be customized + project.sourceSets.test.output.dir(testRoot, builtBy: this) + } + TestBuilder builder = new TestBuilder() + doFirst { outputRoot().delete() } + perSnippet builder.&handleSnippet + doLast builder.&finishLastTest + } + + /** + * Root directory containing all the files generated by this task. It is + * contained withing testRoot. + */ + File outputRoot() { + return new File(testRoot, '/rest-api-spec/test') + } + + private class TestBuilder { + private static final String SYNTAX = { + String method = /(?GET|PUT|POST|HEAD|OPTIONS|DELETE)/ + String pathAndQuery = /(?[^\n]+)/ + String badBody = /GET|PUT|POST|HEAD|OPTIONS|DELETE|#/ + String body = /(?(?:\n(?!$badBody)[^\n]+)+)/ + String nonComment = /$method\s+$pathAndQuery$body?/ + String comment = /(?#.+)/ + /(?:$comment|$nonComment)\n+/ + }() + + /** + * The file in which we saw the last snippet that made a test. + */ + Path lastDocsPath + + /** + * The file we're building. + */ + PrintWriter current + + /** + * Called each time a snippet is encountered. Tracks the snippets and + * calls buildTest to actually build the test. + */ + void handleSnippet(Snippet snippet) { + if (snippet.language == 'json') { + throw new InvalidUserDataException( + "$snippet: Use `js` instead of `json`.") + } + if (snippet.testSetup) { + setup(snippet) + return + } + if (snippet.testResponse) { + response(snippet) + return + } + if (snippet.test || snippet.console) { + test(snippet) + return + } + // Must be an unmarked snippet.... + } + + private void test(Snippet test) { + setupCurrent(test) + + if (false == test.continued) { + current.println('---') + current.println("\"$test.start\":") + } + if (test.skipTest) { + current.println(" - skip:") + current.println(" features: always_skip") + current.println(" reason: $test.skipTest") + } + if (test.setup != null) { + String setup = setups[test.setup] + if (setup == null) { + throw new InvalidUserDataException("Couldn't find setup " + + "for $test") + } + current.println(setup) + } + + body(test, false) + } + + private void response(Snippet response) { + current.println(" - match: ") + current.println(" \$body: ") + response.contents.eachLine { current.println(" $it") } + } + + void emitDo(String method, String pathAndQuery, + String body, String catchPart, boolean inSetup) { + def (String path, String query) = pathAndQuery.tokenize('?') + current.println(" - do:") + if (catchPart != null) { + current.println(" catch: $catchPart") + } + current.println(" raw:") + current.println(" method: $method") + current.println(" path: \"$path\"") + if (query != null) { + for (String param: query.tokenize('&')) { + def (String name, String value) = param.tokenize('=') + if (value == null) { + value = '' + } + current.println(" $name: \"$value\"") + } + } + if (body != null) { + // Throw out the leading newline we get from parsing the body + body = body.substring(1) + current.println(" body: |") + body.eachLine { current.println(" $it") } + } + /* Catch any shard failures. These only cause a non-200 response if + * no shard succeeds. But we need to fail the tests on all of these + * because they mean invalid syntax or broken queries or something + * else that we don't want to teach people to do. The REST test + * framework doesn't allow us to has assertions in the setup + * section so we have to skip it there. We also have to skip _cat + * actions because they don't return json so we can't is_false + * them. That is ok because they don't have this + * partial-success-is-success thing. + */ + if (false == inSetup && false == path.startsWith('_cat')) { + current.println(" - is_false: _shards.failures") + } + } + + private void setup(Snippet setup) { + if (lastDocsPath == setup.path) { + throw new InvalidUserDataException("$setup: wasn't first") + } + setupCurrent(setup) + current.println('---') + current.println("setup:") + body(setup, true) + // always wait for yellow before anything is executed + current.println( + " - do:\n" + + " raw:\n" + + " method: GET\n" + + " path: \"_cluster/health\"\n" + + " wait_for_status: \"yellow\"") + } + + private void body(Snippet snippet, boolean inSetup) { + parse("$snippet", snippet.contents, SYNTAX) { matcher, last -> + if (matcher.group("comment") != null) { + // Comment + return + } + String method = matcher.group("method") + String pathAndQuery = matcher.group("pathAndQuery") + String body = matcher.group("body") + String catchPart = last ? snippet.catchPart : null + if (pathAndQuery.startsWith('/')) { + // Leading '/'s break the generated paths + pathAndQuery = pathAndQuery.substring(1) + } + emitDo(method, pathAndQuery, body, catchPart, inSetup) + } + } + + private PrintWriter setupCurrent(Snippet test) { + if (lastDocsPath == test.path) { + return + } + finishLastTest() + lastDocsPath = test.path + + // Make the destination file: + // Shift the path into the destination directory tree + Path dest = outputRoot().toPath().resolve(test.path) + // Replace the extension + String fileName = dest.getName(dest.nameCount - 1) + dest = dest.parent.resolve(fileName.replace('.asciidoc', '.yaml')) + + // Now setup the writer + Files.createDirectories(dest.parent) + current = dest.newPrintWriter('UTF-8') + } + + void finishLastTest() { + if (current != null) { + current.close() + current = null + } + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy new file mode 100644 index 00000000000..afd91858e9d --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy @@ -0,0 +1,308 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.doc + +import org.gradle.api.DefaultTask +import org.gradle.api.InvalidUserDataException +import org.gradle.api.file.ConfigurableFileTree +import org.gradle.api.tasks.InputFiles +import org.gradle.api.tasks.TaskAction + +import java.nio.file.Path +import java.util.regex.Matcher + +/** + * A task which will run a closure on each snippet in the documentation. + */ +public class SnippetsTask extends DefaultTask { + private static final String SCHAR = /(?:\\\/|[^\/])/ + private static final String SUBSTITUTION = /s\/($SCHAR+)\/($SCHAR*)\// + private static final String CATCH = /catch:\s*((?:\/[^\/]+\/)|[^ \]]+)/ + private static final String SKIP = /skip:([^\]]+)/ + private static final String SETUP = /setup:([^ \]]+)/ + private static final String TEST_SYNTAX = + /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP) ?/ + + /** + * Action to take on each snippet. Called with a single parameter, an + * instance of Snippet. + */ + Closure perSnippet + + /** + * The docs to scan. Defaults to every file in the directory exception the + * build.gradle file because that is appropriate for Elasticsearch's docs + * directory. + */ + @InputFiles + ConfigurableFileTree docs = project.fileTree(project.projectDir) { + // No snippets in the build file + exclude 'build.gradle' + // That is where the snippets go, not where they come from! + exclude 'build' + } + + @TaskAction + public void executeTask() { + /* + * Walks each line of each file, building snippets as it encounters + * the lines that make up the snippet. + */ + for (File file: docs) { + String lastLanguage + int lastLanguageLine + Snippet snippet = null + StringBuilder contents = null + List substitutions = null + Closure emit = { + snippet.contents = contents.toString() + contents = null + if (substitutions != null) { + substitutions.each { String pattern, String subst -> + /* + * $body is really common but it looks like a + * backreference so we just escape it here to make the + * tests cleaner. + */ + subst = subst.replace('$body', '\\$body') + // \n is a new line.... + subst = subst.replace('\\n', '\n') + snippet.contents = snippet.contents.replaceAll( + pattern, subst) + } + substitutions = null + } + perSnippet(snippet) + snippet = null + } + file.eachLine('UTF-8') { String line, int lineNumber -> + Matcher matcher + if (line ==~ /-{4,}\s*/) { // Four dashes looks like a snippet + if (snippet == null) { + Path path = docs.dir.toPath().relativize(file.toPath()) + snippet = new Snippet(path: path, start: lineNumber) + if (lastLanguageLine == lineNumber - 1) { + snippet.language = lastLanguage + } + } else { + snippet.end = lineNumber + } + return + } + matcher = line =~ /\[source,(\w+)]\s*/ + if (matcher.matches()) { + lastLanguage = matcher.group(1) + lastLanguageLine = lineNumber + return + } + if (line ==~ /\/\/\s*AUTOSENSE\s*/) { + throw new InvalidUserDataException("AUTOSENSE has been " + + "replaced by CONSOLE. Use that instead at " + + "$file:$lineNumber") + } + if (line ==~ /\/\/\s*CONSOLE\s*/) { + if (snippet == null) { + throw new InvalidUserDataException("CONSOLE not " + + "paired with a snippet at $file:$lineNumber") + } + snippet.console = true + return + } + matcher = line =~ /\/\/\s*TEST(\[(.+)\])?\s*/ + if (matcher.matches()) { + if (snippet == null) { + throw new InvalidUserDataException("TEST not " + + "paired with a snippet at $file:$lineNumber") + } + snippet.test = true + if (matcher.group(2) != null) { + String loc = "$file:$lineNumber" + parse(loc, matcher.group(2), TEST_SYNTAX) { + if (it.group(1) != null) { + snippet.catchPart = it.group(1) + return + } + if (it.group(2) != null) { + if (substitutions == null) { + substitutions = [] + } + substitutions.add([it.group(2), it.group(3)]) + return + } + if (it.group(4) != null) { + snippet.skipTest = it.group(4) + return + } + if (it.group(5) != null) { + snippet.continued = true + return + } + if (it.group(6) != null) { + snippet.setup = it.group(6) + return + } + throw new InvalidUserDataException( + "Invalid test marker: $line") + } + } + return + } + matcher = line =~ /\/\/\s*TESTRESPONSE(\[(.+)\])?\s*/ + if (matcher.matches()) { + if (snippet == null) { + throw new InvalidUserDataException("TESTRESPONSE not " + + "paired with a snippet at $file:$lineNumber") + } + snippet.testResponse = true + if (matcher.group(2) != null) { + if (substitutions == null) { + substitutions = [] + } + String loc = "$file:$lineNumber" + parse(loc, matcher.group(2), /$SUBSTITUTION ?/) { + substitutions.add([it.group(1), it.group(2)]) + } + } + return + } + if (line ==~ /\/\/\s*TESTSETUP\s*/) { + snippet.testSetup = true + return + } + if (snippet == null) { + // Outside + return + } + if (snippet.end == Snippet.NOT_FINISHED) { + // Inside + if (contents == null) { + contents = new StringBuilder() + } + // We don't need the annotations + line = line.replaceAll(/<\d+>/, '') + // Nor any trailing spaces + line = line.replaceAll(/\s+$/, '') + contents.append(line).append('\n') + return + } + // Just finished + emit() + } + if (snippet != null) emit() + } + } + + static class Snippet { + static final int NOT_FINISHED = -1 + + /** + * Path to the file containing this snippet. Relative to docs.dir of the + * SnippetsTask that created it. + */ + Path path + int start + int end = NOT_FINISHED + String contents + + boolean console = false + boolean test = false + boolean testResponse = false + boolean testSetup = false + String skipTest = null + boolean continued = false + String language = null + String catchPart = null + String setup = null + + @Override + public String toString() { + String result = "$path[$start:$end]" + if (language != null) { + result += "($language)" + } + if (console) { + result += '// CONSOLE' + } + if (test) { + result += '// TEST' + if (catchPart) { + result += "[catch: $catchPart]" + } + if (skipTest) { + result += "[skip=$skipTest]" + } + if (continued) { + result += '[continued]' + } + if (setup) { + result += "[setup:$setup]" + } + } + if (testResponse) { + result += '// TESTRESPONSE' + } + if (testSetup) { + result += '// TESTSETUP' + } + return result + } + } + + /** + * Repeatedly match the pattern to the string, calling the closure with the + * matchers each time there is a match. If there are characters that don't + * match then blow up. If the closure takes two parameters then the second + * one is "is this the last match?". + */ + protected parse(String location, String s, String pattern, Closure c) { + if (s == null) { + return // Silly null, only real stuff gets to match! + } + Matcher m = s =~ pattern + int offset = 0 + Closure extraContent = { message -> + StringBuilder cutOut = new StringBuilder() + cutOut.append(s[offset - 6..offset - 1]) + cutOut.append('*') + cutOut.append(s[offset..Math.min(offset + 5, s.length() - 1)]) + String cutOutNoNl = cutOut.toString().replace('\n', '\\n') + throw new InvalidUserDataException("$location: Extra content " + + "$message ('$cutOutNoNl') matching [$pattern]: $s") + } + while (m.find()) { + if (m.start() != offset) { + extraContent("between [$offset] and [${m.start()}]") + } + offset = m.end() + if (c.maximumNumberOfParameters == 1) { + c(m) + } else { + c(m, offset == s.length()) + } + } + if (offset == 0) { + throw new InvalidUserDataException("$location: Didn't match " + + "$pattern: $s") + } + if (offset != s.length()) { + extraContent("after [$offset]") + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index b04f959e068..ba013da31e9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -18,14 +18,14 @@ */ package org.elasticsearch.gradle.plugin +import nebula.plugin.publishing.maven.MavenBasePublishPlugin +import nebula.plugin.publishing.maven.MavenScmPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RunTask import org.gradle.api.Project -import org.gradle.api.artifacts.Dependency import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.bundling.Zip - /** * Encapsulates build configuration for an Elasticsearch plugin. */ @@ -50,10 +50,11 @@ public class PluginBuildPlugin extends BuildPlugin { } else { project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files) project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files) + addPomGeneration(project) } project.namingConventions { - // Plugins decalare extensions of ESIntegTestCase as "Tests" instead of IT. + // Plugins declare integration tests as "Tests" instead of IT. skipIntegTestInDisguise = true } } @@ -125,4 +126,32 @@ public class PluginBuildPlugin extends BuildPlugin { project.configurations.getByName('default').extendsFrom = [] project.artifacts.add('default', bundle) } + + /** + * Adds the plugin jar and zip as publications. + */ + protected static void addPomGeneration(Project project) { + project.plugins.apply(MavenBasePublishPlugin.class) + project.plugins.apply(MavenScmPlugin.class) + + project.publishing { + publications { + nebula { + artifact project.bundlePlugin + pom.withXml { + // overwrite the name/description in the pom nebula set up + Node root = asNode() + for (Node node : root.children()) { + if (node.name() == 'name') { + node.setValue(project.pluginProperties.extension.name) + } else if (node.name() == 'description') { + node.setValue(project.pluginProperties.extension.description) + } + } + } + } + } + } + + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy index 612bc568621..52de7dac2d5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy @@ -21,11 +21,11 @@ package org.elasticsearch.gradle.precommit import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.VersionProperties +import org.gradle.api.artifacts.Dependency import org.gradle.api.file.FileCollection import org.gradle.api.tasks.Input import org.gradle.api.tasks.InputFiles import org.gradle.api.tasks.OutputFile - /** * Runs NamingConventionsCheck on a classpath/directory combo to verify that * tests are named according to our conventions so they'll be picked up by @@ -57,8 +57,27 @@ public class NamingConventionsTask extends LoggedExec { @Input boolean skipIntegTestInDisguise = false + /** + * Superclass for all tests. + */ + @Input + String testClass = 'org.apache.lucene.util.LuceneTestCase' + + /** + * Superclass for all integration tests. + */ + @Input + String integTestClass = 'org.elasticsearch.test.ESIntegTestCase' + public NamingConventionsTask() { - dependsOn(classpath) + // Extra classpath contains the actual test + project.configurations.create('namingConventions') + Dependency buildToolsDep = project.dependencies.add('namingConventions', + "org.elasticsearch.gradle:build-tools:${VersionProperties.elasticsearch}") + buildToolsDep.transitive = false // We don't need gradle in the classpath. It conflicts. + FileCollection extraClasspath = project.configurations.namingConventions + dependsOn(extraClasspath) + description = "Runs NamingConventionsCheck on ${classpath}" executable = new File(project.javaHome, 'bin/java') onlyIf { project.sourceSets.test.output.classesDir.exists() } @@ -69,9 +88,12 @@ public class NamingConventionsTask extends LoggedExec { project.afterEvaluate { doFirst { args('-Djna.nosys=true') - args('-cp', classpath.asPath, 'org.elasticsearch.test.NamingConventionsCheck') + args('-cp', (classpath + extraClasspath).asPath, 'org.elasticsearch.test.NamingConventionsCheck') + args('--test-class', testClass) if (skipIntegTestInDisguise) { args('--skip-integ-tests-in-disguise') + } else { + args('--integ-test-class', integTestClass) } /* * The test framework has classes that fail the checks to validate that the checks fail properly. @@ -79,7 +101,7 @@ public class NamingConventionsTask extends LoggedExec { * process of ignoring them lets us validate that they were found so this ignore parameter acts * as the test for the NamingConventionsCheck. */ - if (':test:framework'.equals(project.path)) { + if (':build-tools'.equals(project.path)) { args('--self-test') } args('--', project.sourceSets.test.output.classesDir.absolutePath) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 427d3191dc5..a5e1e4c8932 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -34,7 +34,6 @@ class PrecommitTasks { configureForbiddenApis(project), configureCheckstyle(project), configureNamingConventions(project), - configureLoggerUsage(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('jarHell', JarHellTask.class), @@ -49,6 +48,20 @@ class PrecommitTasks { UpdateShasTask updateShas = project.tasks.create('updateShas', UpdateShasTask.class) updateShas.parentTask = dependencyLicenses } + if (project.path != ':build-tools') { + /* + * Sadly, build-tools can't have logger-usage-check because that + * would create a circular project dependency between build-tools + * (which provides NamingConventionsCheck) and :test:logger-usage + * which provides the logger usage check. Since the build tools + * don't use the logger usage check because they don't have any + * of Elaticsearch's loggers and :test:logger-usage actually does + * use the NamingConventionsCheck we break the circular dependency + * here. + */ + precommitTasks.add(configureLoggerUsage(project)) + } + Map precommitOptions = [ name: 'precommit', @@ -62,9 +75,8 @@ class PrecommitTasks { private static Task configureForbiddenApis(Project project) { project.pluginManager.apply(ForbiddenApisPlugin.class) project.forbiddenApis { - internalRuntimeForbidden = true failOnUnsupportedJava = false - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out'] + bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out'] signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'), getClass().getResource('/forbidden/es-all-signatures.txt')] suppressAnnotations = ['**.SuppressForbidden'] diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy index 3ff5a06ad42..076a564f84a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -203,8 +203,7 @@ public class ThirdPartyAuditTask extends AntTask { Set sheistySet = getSheistyClasses(tmpDir.toPath()); try { - ant.thirdPartyAudit(internalRuntimeForbidden: false, - failOnUnsupportedJava: false, + ant.thirdPartyAudit(failOnUnsupportedJava: false, failOnMissingClasses: false, signaturesFile: new File(getClass().getResource('/forbidden/third-party-audit.txt').toURI()), classpath: classpath.asPath) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 34dde6e5dad..c3004a64b86 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -291,9 +291,10 @@ class ClusterFormationTasks { File configDir = new File(node.homeDir, 'config') copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it for (Map.Entry extraConfigFile : node.config.extraConfigFiles.entrySet()) { + Object extraConfigFileValue = extraConfigFile.getValue() copyConfig.doFirst { // make sure the copy won't be a no-op or act on a directory - File srcConfigFile = project.file(extraConfigFile.getValue()) + File srcConfigFile = project.file(extraConfigFileValue) if (srcConfigFile.isDirectory()) { throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}") } @@ -303,7 +304,7 @@ class ClusterFormationTasks { } File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey()) // wrap source file in closure to delay resolution to execution time - copyConfig.from({ extraConfigFile.getValue() }) { + copyConfig.from({ extraConfigFileValue }) { // this must be in a closure so it is only applied to the single file specified in from above into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile()) rename { destConfigFile.name } @@ -418,8 +419,7 @@ class ClusterFormationTasks { // argument are wrapped in an ExecArgWrapper that escapes commas args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) } } else { - executable 'sh' - args execArgs + commandLine execArgs } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 2ff5e333139..5d9961a0425 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -129,18 +129,18 @@ class NodeInfo { } env = [ 'JAVA_HOME' : project.javaHome ] - args.addAll("-E", "es.node.portsfile=true") + args.addAll("-E", "node.portsfile=true") String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs env.put('ES_JAVA_OPTS', esJavaOpts) for (Map.Entry property : System.properties.entrySet()) { - if (property.getKey().startsWith('es.')) { + if (property.key.startsWith('tests.es.')) { args.add("-E") - args.add("${property.getKey()}=${property.getValue()}") + args.add("${property.key.substring('tests.es.'.size())}=${property.value}") } } env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options')) - args.addAll("-E", "es.path.conf=${confDir}") + args.addAll("-E", "path.conf=${confDir}") if (Os.isFamily(Os.FAMILY_WINDOWS)) { args.add('"') // end the entire command, quoted } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 3bfe9d61018..fedcf6e87d3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -62,6 +62,7 @@ public class RestIntegTestTask extends RandomizedTestingTask { project.gradle.projectsEvaluated { NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig) systemProperty('tests.rest.cluster', "${-> node.httpUri()}") + systemProperty('tests.config.dir', "${-> node.confDir}") // TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin // that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass // both as separate sysprops diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy index 2f2030f6cd2..c68e0528c9b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy @@ -19,6 +19,7 @@ package org.elasticsearch.gradle.vagrant import org.gradle.api.DefaultTask +import org.gradle.api.tasks.Input import org.gradle.api.tasks.TaskAction import org.gradle.logging.ProgressLoggerFactory import org.gradle.process.internal.ExecAction @@ -30,41 +31,22 @@ import javax.inject.Inject * Runs bats over vagrant. Pretty much like running it using Exec but with a * nicer output formatter. */ -class BatsOverVagrantTask extends DefaultTask { - String command - String boxName - ExecAction execAction +public class BatsOverVagrantTask extends VagrantCommandTask { - BatsOverVagrantTask() { - execAction = getExecActionFactory().newExecAction() - } + @Input + String command - @Inject - ProgressLoggerFactory getProgressLoggerFactory() { - throw new UnsupportedOperationException(); - } + BatsOverVagrantTask() { + project.afterEvaluate { + args 'ssh', boxName, '--command', command + } + } - @Inject - ExecActionFactory getExecActionFactory() { - throw new UnsupportedOperationException(); - } - - void boxName(String boxName) { - this.boxName = boxName - } - - void command(String command) { - this.command = command - } - - @TaskAction - void exec() { - // It'd be nice if --machine-readable were, well, nice - execAction.commandLine(['vagrant', 'ssh', boxName, '--command', command]) - execAction.setStandardOutput(new TapLoggerOutputStream( - command: command, - factory: getProgressLoggerFactory(), - logger: logger)) - execAction.execute(); - } + @Override + protected OutputStream createLoggerOutputStream() { + return new TapLoggerOutputStream( + command: commandLine.join(' '), + factory: getProgressLoggerFactory(), + logger: logger) + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy index 5f4a5e0a0c4..3f980c57a49 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy @@ -19,9 +19,11 @@ package org.elasticsearch.gradle.vagrant import com.carrotsearch.gradle.junit4.LoggingOutputStream +import groovy.transform.PackageScope import org.gradle.api.GradleScriptException import org.gradle.api.logging.Logger import org.gradle.logging.ProgressLogger +import org.gradle.logging.ProgressLoggerFactory import java.util.regex.Matcher @@ -35,73 +37,77 @@ import java.util.regex.Matcher * There is a Tap4j project but we can't use it because it wants to parse the * entire TAP stream at once and won't parse it stream-wise. */ -class TapLoggerOutputStream extends LoggingOutputStream { - ProgressLogger progressLogger - Logger logger - int testsCompleted = 0 - int testsFailed = 0 - int testsSkipped = 0 - Integer testCount - String countsFormat +public class TapLoggerOutputStream extends LoggingOutputStream { + private final ProgressLogger progressLogger + private boolean isStarted = false + private final Logger logger + private int testsCompleted = 0 + private int testsFailed = 0 + private int testsSkipped = 0 + private Integer testCount + private String countsFormat - TapLoggerOutputStream(Map args) { - logger = args.logger - progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) - progressLogger.setDescription("TAP output for $args.command") - progressLogger.started() - progressLogger.progress("Starting $args.command...") - } - - void flush() { - if (end == start) return - line(new String(buffer, start, end - start)) - start = end - } - - void line(String line) { - // System.out.print "===> $line\n" - if (testCount == null) { - try { - testCount = line.split('\\.').last().toInteger() - def length = (testCount as String).length() - countsFormat = "%0${length}d" - countsFormat = "[$countsFormat|$countsFormat|$countsFormat/$countsFormat]" - return - } catch (Exception e) { - throw new GradleScriptException( - 'Error parsing first line of TAP stream!!', e) - } - } - Matcher m = line =~ /(?ok|not ok) \d+(? # skip (?\(.+\))?)? \[(?.+)\] (?.+)/ - if (!m.matches()) { - /* These might be failure report lines or comments or whatever. Its hard - to tell and it doesn't matter. */ - logger.warn(line) - return - } - boolean skipped = m.group('skip') != null - boolean success = !skipped && m.group('status') == 'ok' - String skipReason = m.group('skipReason') - String suiteName = m.group('suite') - String testName = m.group('test') - - String status - if (skipped) { - status = "SKIPPED" - testsSkipped++ - } else if (success) { - status = " OK" - testsCompleted++ - } else { - status = " FAILED" - testsFailed++ + TapLoggerOutputStream(Map args) { + logger = args.logger + progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) + progressLogger.setDescription("TAP output for `${args.command}`") } - String counts = sprintf(countsFormat, - [testsCompleted, testsFailed, testsSkipped, testCount]) - progressLogger.progress("Tests $counts, $status [$suiteName] $testName") - if (!success) { - logger.warn(line) + @Override + public void flush() { + if (isStarted == false) { + progressLogger.started() + isStarted = true + } + if (end == start) return + line(new String(buffer, start, end - start)) + start = end + } + + void line(String line) { + // System.out.print "===> $line\n" + if (testCount == null) { + try { + testCount = line.split('\\.').last().toInteger() + def length = (testCount as String).length() + countsFormat = "%0${length}d" + countsFormat = "[$countsFormat|$countsFormat|$countsFormat/$countsFormat]" + return + } catch (Exception e) { + throw new GradleScriptException( + 'Error parsing first line of TAP stream!!', e) + } + } + Matcher m = line =~ /(?ok|not ok) \d+(? # skip (?\(.+\))?)? \[(?.+)\] (?.+)/ + if (!m.matches()) { + /* These might be failure report lines or comments or whatever. Its hard + to tell and it doesn't matter. */ + logger.warn(line) + return + } + boolean skipped = m.group('skip') != null + boolean success = !skipped && m.group('status') == 'ok' + String skipReason = m.group('skipReason') + String suiteName = m.group('suite') + String testName = m.group('test') + + String status + if (skipped) { + status = "SKIPPED" + testsSkipped++ + } else if (success) { + status = " OK" + testsCompleted++ + } else { + status = " FAILED" + testsFailed++ + } + + String counts = sprintf(countsFormat, + [testsCompleted, testsFailed, testsSkipped, testCount]) + progressLogger.progress("Tests $counts, $status [$suiteName] $testName") + if (!success) { + logger.warn(line) + } } - } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy index 92b4a575eba..d79c2533fab 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy @@ -18,11 +18,10 @@ */ package org.elasticsearch.gradle.vagrant -import org.gradle.api.DefaultTask -import org.gradle.api.tasks.TaskAction +import org.apache.commons.io.output.TeeOutputStream +import org.elasticsearch.gradle.LoggedExec +import org.gradle.api.tasks.Input import org.gradle.logging.ProgressLoggerFactory -import org.gradle.process.internal.ExecAction -import org.gradle.process.internal.ExecActionFactory import javax.inject.Inject @@ -30,43 +29,30 @@ import javax.inject.Inject * Runs a vagrant command. Pretty much like Exec task but with a nicer output * formatter and defaults to `vagrant` as first part of commandLine. */ -class VagrantCommandTask extends DefaultTask { - List commandLine - String boxName - ExecAction execAction +public class VagrantCommandTask extends LoggedExec { - VagrantCommandTask() { - execAction = getExecActionFactory().newExecAction() - } + @Input + String boxName - @Inject - ProgressLoggerFactory getProgressLoggerFactory() { - throw new UnsupportedOperationException(); - } + public VagrantCommandTask() { + executable = 'vagrant' + project.afterEvaluate { + // It'd be nice if --machine-readable were, well, nice + standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream()) + } + } - @Inject - ExecActionFactory getExecActionFactory() { - throw new UnsupportedOperationException(); - } + protected OutputStream createLoggerOutputStream() { + return new VagrantLoggerOutputStream( + command: commandLine.join(' '), + factory: getProgressLoggerFactory(), + /* Vagrant tends to output a lot of stuff, but most of the important + stuff starts with ==> $box */ + squashedPrefix: "==> $boxName: ") + } - void boxName(String boxName) { - this.boxName = boxName - } - - void commandLine(Object... commandLine) { - this.commandLine = commandLine - } - - @TaskAction - void exec() { - // It'd be nice if --machine-readable were, well, nice - execAction.commandLine(['vagrant'] + commandLine) - execAction.setStandardOutput(new VagrantLoggerOutputStream( - command: commandLine.join(' '), - factory: getProgressLoggerFactory(), - /* Vagrant tends to output a lot of stuff, but most of the important - stuff starts with ==> $box */ - squashedPrefix: "==> $boxName: ")) - execAction.execute(); - } + @Inject + ProgressLoggerFactory getProgressLoggerFactory() { + throw new UnsupportedOperationException(); + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy index 488c4511b1f..331a638b5ca 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy @@ -19,7 +19,9 @@ package org.elasticsearch.gradle.vagrant import com.carrotsearch.gradle.junit4.LoggingOutputStream +import org.gradle.api.logging.Logger import org.gradle.logging.ProgressLogger +import org.gradle.logging.ProgressLoggerFactory /** * Adapts an OutputStream being written to by vagrant into a ProcessLogger. It @@ -42,79 +44,60 @@ import org.gradle.logging.ProgressLogger * to catch so it can render the output like * "Heading text > stdout from the provisioner". */ -class VagrantLoggerOutputStream extends LoggingOutputStream { - static final String HEADING_PREFIX = '==> ' +public class VagrantLoggerOutputStream extends LoggingOutputStream { + private static final String HEADING_PREFIX = '==> ' - ProgressLogger progressLogger - String squashedPrefix - String lastLine = '' - boolean inProgressReport = false - String heading = '' + private final ProgressLogger progressLogger + private boolean isStarted = false + private String squashedPrefix + private String lastLine = '' + private boolean inProgressReport = false + private String heading = '' - VagrantLoggerOutputStream(Map args) { - progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) - progressLogger.setDescription("Vagrant $args.command") - progressLogger.started() - progressLogger.progress("Starting vagrant $args.command...") - squashedPrefix = args.squashedPrefix - } - - void flush() { - if (end == start) return - line(new String(buffer, start, end - start)) - start = end - } - - void line(String line) { - // debugPrintLine(line) // Uncomment me to log every incoming line - if (line.startsWith('\r\u001b')) { - /* We don't want to try to be a full terminal emulator but we want to - keep the escape sequences from leaking and catch _some_ of the - meaning. */ - line = line.substring(2) - if ('[K' == line) { - inProgressReport = true - } - return + VagrantLoggerOutputStream(Map args) { + progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) + progressLogger.setDescription("Vagrant output for `$args.command`") + squashedPrefix = args.squashedPrefix } - if (line.startsWith(squashedPrefix)) { - line = line.substring(squashedPrefix.length()) - inProgressReport = false - lastLine = line - if (line.startsWith(HEADING_PREFIX)) { - line = line.substring(HEADING_PREFIX.length()) - heading = line + ' > ' - } else { - line = heading + line - } - } else if (inProgressReport) { - inProgressReport = false - line = lastLine + line - } else { - return - } - // debugLogLine(line) // Uncomment me to log every line we add to the logger - progressLogger.progress(line) - } - void debugPrintLine(line) { - System.out.print '----------> ' - for (int i = start; i < end; i++) { - switch (buffer[i] as char) { - case ' '..'~': - System.out.print buffer[i] as char - break - default: - System.out.print '%' - System.out.print Integer.toHexString(buffer[i]) - } + @Override + public void flush() { + if (isStarted == false) { + progressLogger.started() + isStarted = true + } + if (end == start) return + line(new String(buffer, start, end - start)) + start = end } - System.out.print '\n' - } - void debugLogLine(line) { - System.out.print '>>>>>>>>>>> ' - System.out.print line - System.out.print '\n' - } + void line(String line) { + if (line.startsWith('\r\u001b')) { + /* We don't want to try to be a full terminal emulator but we want to + keep the escape sequences from leaking and catch _some_ of the + meaning. */ + line = line.substring(2) + if ('[K' == line) { + inProgressReport = true + } + return + } + if (line.startsWith(squashedPrefix)) { + line = line.substring(squashedPrefix.length()) + inProgressReport = false + lastLine = line + if (line.startsWith(HEADING_PREFIX)) { + line = line.substring(HEADING_PREFIX.length()) + heading = line + ' > ' + } else { + line = heading + line + } + } else if (inProgressReport) { + inProgressReport = false + line = lastLine + line + } else { + return + } + progressLogger.progress(line) + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java b/buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java similarity index 73% rename from test/framework/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java rename to buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java index 13163cee029..cbfa31d1aaf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java +++ b/buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java @@ -25,14 +25,11 @@ import java.nio.file.FileVisitResult; import java.nio.file.FileVisitor; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.nio.file.attribute.BasicFileAttributes; import java.util.HashSet; import java.util.Set; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.io.PathUtils; - /** * Checks that all tests in a directory are named according to our naming conventions. This is important because tests that do not follow * our conventions aren't run by gradle. This was once a glorious unit test but now that Elasticsearch is a multi-module project it must be @@ -46,30 +43,37 @@ import org.elasticsearch.common.io.PathUtils; * {@code --self-test} that is only run in the test:framework project. */ public class NamingConventionsCheck { - public static void main(String[] args) throws IOException, ClassNotFoundException { - NamingConventionsCheck check = new NamingConventionsCheck(); + public static void main(String[] args) throws IOException { + Class testClass = null; + Class integTestClass = null; + Path rootPath = null; boolean skipIntegTestsInDisguise = false; boolean selfTest = false; - int i = 0; - while (true) { - switch (args[i]) { - case "--skip-integ-tests-in-disguise": - skipIntegTestsInDisguise = true; - i++; - continue; - case "--self-test": - selfTest = true; - i++; - continue; - case "--": - i++; - break; - default: - fail("Expected -- before a path."); + for (int i = 0; i < args.length; i++) { + String arg = args[i]; + switch (arg) { + case "--test-class": + testClass = loadClassWithoutInitializing(args[++i]); + break; + case "--integ-test-class": + integTestClass = loadClassWithoutInitializing(args[++i]); + break; + case "--skip-integ-tests-in-disguise": + skipIntegTestsInDisguise = true; + break; + case "--self-test": + selfTest = true; + break; + case "--": + rootPath = Paths.get(args[++i]); + break; + default: + fail("unsupported argument '" + arg + "'"); } - break; } - check.check(PathUtils.get(args[i])); + + NamingConventionsCheck check = new NamingConventionsCheck(testClass, integTestClass); + check.check(rootPath, skipIntegTestsInDisguise); if (selfTest) { assertViolation("WrongName", check.missingSuffix); @@ -82,17 +86,15 @@ public class NamingConventionsCheck { } // Now we should have no violations - assertNoViolations("Not all subclasses of " + ESTestCase.class.getSimpleName() + assertNoViolations("Not all subclasses of " + check.testClass.getSimpleName() + " match the naming convention. Concrete classes must end with [Tests]", check.missingSuffix); assertNoViolations("Classes ending with [Tests] are abstract or interfaces", check.notRunnable); assertNoViolations("Found inner classes that are tests, which are excluded from the test runner", check.innerClasses); - String classesToSubclass = String.join(",", ESTestCase.class.getSimpleName(), ESTestCase.class.getSimpleName(), - ESTokenStreamTestCase.class.getSimpleName(), LuceneTestCase.class.getSimpleName()); - assertNoViolations("Pure Unit-Test found must subclass one of [" + classesToSubclass + "]", check.pureUnitTest); - assertNoViolations("Classes ending with [Tests] must subclass [" + classesToSubclass + "]", check.notImplementing); - if (!skipIntegTestsInDisguise) { - assertNoViolations("Subclasses of ESIntegTestCase should end with IT as they are integration tests", - check.integTestsInDisguise); + assertNoViolations("Pure Unit-Test found must subclass [" + check.testClass.getSimpleName() + "]", check.pureUnitTest); + assertNoViolations("Classes ending with [Tests] must subclass [" + check.testClass.getSimpleName() + "]", check.notImplementing); + if (skipIntegTestsInDisguise == false) { + assertNoViolations("Subclasses of " + check.integTestClass.getSimpleName() + + " should end with IT as they are integration tests", check.integTestsInDisguise); } } @@ -103,7 +105,15 @@ public class NamingConventionsCheck { private final Set> notRunnable = new HashSet<>(); private final Set> innerClasses = new HashSet<>(); - public void check(Path rootPath) throws IOException { + private final Class testClass; + private final Class integTestClass; + + public NamingConventionsCheck(Class testClass, Class integTestClass) { + this.testClass = testClass; + this.integTestClass = integTestClass; + } + + public void check(Path rootPath, boolean skipTestsInDisguised) throws IOException { Files.walkFileTree(rootPath, new FileVisitor() { /** * The package name of the directory we are currently visiting. Kept as a string rather than something fancy because we load @@ -136,9 +146,9 @@ public class NamingConventionsCheck { String filename = file.getFileName().toString(); if (filename.endsWith(".class")) { String className = filename.substring(0, filename.length() - ".class".length()); - Class clazz = loadClass(className); + Class clazz = loadClassWithoutInitializing(packageName + className); if (clazz.getName().endsWith("Tests")) { - if (ESIntegTestCase.class.isAssignableFrom(clazz)) { + if (skipTestsInDisguised == false && integTestClass.isAssignableFrom(clazz)) { integTestsInDisguise.add(clazz); } if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) { @@ -164,15 +174,7 @@ public class NamingConventionsCheck { } private boolean isTestCase(Class clazz) { - return LuceneTestCase.class.isAssignableFrom(clazz); - } - - private Class loadClass(String className) { - try { - return Thread.currentThread().getContextClassLoader().loadClass(packageName + className); - } catch (ClassNotFoundException e) { - throw new RuntimeException(e); - } + return testClass.isAssignableFrom(clazz); } @Override @@ -186,7 +188,6 @@ public class NamingConventionsCheck { * Fail the process if there are any violations in the set. Named to look like a junit assertion even though it isn't because it is * similar enough. */ - @SuppressForbidden(reason = "System.err/System.exit") private static void assertNoViolations(String message, Set> set) { if (false == set.isEmpty()) { System.err.println(message + ":"); @@ -201,10 +202,9 @@ public class NamingConventionsCheck { * Fail the process if we didn't detect a particular violation. Named to look like a junit assertion even though it isn't because it is * similar enough. */ - @SuppressForbidden(reason = "System.err/System.exit") - private static void assertViolation(String className, Set> set) throws ClassNotFoundException { - className = "org.elasticsearch.test.test.NamingConventionsCheckBadClasses$" + className; - if (false == set.remove(Class.forName(className))) { + private static void assertViolation(String className, Set> set) { + className = "org.elasticsearch.test.NamingConventionsCheckBadClasses$" + className; + if (false == set.remove(loadClassWithoutInitializing(className))) { System.err.println("Error in NamingConventionsCheck! Expected [" + className + "] to be a violation but wasn't."); System.exit(1); } @@ -213,9 +213,20 @@ public class NamingConventionsCheck { /** * Fail the process with the provided message. */ - @SuppressForbidden(reason = "System.err/System.exit") private static void fail(String reason) { System.err.println(reason); System.exit(1); } + + static Class loadClassWithoutInitializing(String name) { + try { + return Class.forName(name, + // Don't initialize the class to save time. Not needed for this test and this doesn't share a VM with any other tests. + false, + // Use our classloader rather than the bootstrap class loader. + NamingConventionsCheck.class.getClassLoader()); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } } diff --git a/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.docs-test.properties similarity index 71% rename from core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties rename to buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.docs-test.properties index 4487d7c8de9..fb264ff4fd0 100644 --- a/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.docs-test.properties @@ -1,10 +1,10 @@ -################################################################ +# # Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. +# not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 @@ -15,7 +15,6 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -################################################################ -description=This is a description for a dummy test site plugin. -version=0.0.7-BOND-SITE +# +implementation-class=org.elasticsearch.gradle.doc.DocsTestPlugin diff --git a/buildSrc/src/main/resources/beat.wav b/buildSrc/src/main/resources/beat.wav deleted file mode 100644 index 4083a4ce618..00000000000 Binary files a/buildSrc/src/main/resources/beat.wav and /dev/null differ diff --git a/buildSrc/src/main/resources/checkstyle.xml b/buildSrc/src/main/resources/checkstyle.xml index de47736913f..706ef46ffa1 100644 --- a/buildSrc/src/main/resources/checkstyle.xml +++ b/buildSrc/src/main/resources/checkstyle.xml @@ -39,6 +39,27 @@ + + + + + - - + + @@ -19,9 +19,7 @@ - - @@ -38,8 +36,6 @@ - - @@ -61,7 +57,6 @@ - @@ -105,7 +100,6 @@ - @@ -178,21 +172,11 @@ - - - - - - - - - - @@ -201,10 +185,8 @@ - - @@ -216,13 +198,11 @@ - - @@ -251,10 +231,8 @@ - - @@ -267,7 +245,6 @@ - @@ -288,7 +265,6 @@ - @@ -345,7 +321,6 @@ - @@ -364,26 +339,19 @@ - - - - - - - @@ -392,21 +360,16 @@ - - - - - @@ -420,16 +383,13 @@ - - - @@ -476,14 +436,9 @@ - - - - - @@ -500,7 +455,6 @@ - @@ -516,12 +470,9 @@ - - - @@ -530,31 +481,16 @@ - - - - - - - - - - - - - - - @@ -569,13 +505,11 @@ - - @@ -596,20 +530,15 @@ - - - - - @@ -626,7 +555,6 @@ - @@ -639,10 +567,8 @@ - - @@ -651,10 +577,7 @@ - - - @@ -665,63 +588,39 @@ - - - - - - - - - - - - - - - - - - - - - - - - @@ -736,10 +635,7 @@ - - - @@ -764,7 +660,6 @@ - @@ -778,7 +673,6 @@ - @@ -788,7 +682,6 @@ - @@ -799,25 +692,21 @@ - - - - @@ -843,10 +732,7 @@ - - - @@ -865,7 +751,6 @@ - @@ -877,13 +762,11 @@ - - @@ -932,7 +815,6 @@ - @@ -948,18 +830,13 @@ - - - - - @@ -986,9 +863,6 @@ - - - @@ -1007,11 +881,8 @@ - - - @@ -1056,14 +927,11 @@ - - - @@ -1075,7 +943,6 @@ - @@ -1106,7 +973,6 @@ - @@ -1131,33 +997,14 @@ - - - - - - - - - - - - - - - - - - - @@ -1168,7 +1015,6 @@ - @@ -1265,9 +1111,6 @@ - - - @@ -1276,18 +1119,10 @@ - - - - - - - - @@ -1296,19 +1131,25 @@ - - - + + + + + + + + + @@ -1317,28 +1158,13 @@ - - - - - - - - - - - - - - - + + + + - - - - @@ -1360,13 +1186,8 @@ - - - - - @@ -1386,21 +1207,13 @@ - - - - - - - - - + @@ -1423,36 +1236,11 @@ - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/buildSrc/src/main/resources/deb/postinst.ftl b/buildSrc/src/main/resources/deb/postinst.ftl index 5f67242c265..9acfc0f084e 100644 --- a/buildSrc/src/main/resources/deb/postinst.ftl +++ b/buildSrc/src/main/resources/deb/postinst.ftl @@ -1,2 +1,2 @@ -#!/bin/sh -e +#!/bin/bash -e <% commands.each {command -> %><%= command %><% } %> diff --git a/buildSrc/src/main/resources/deb/preinst.ftl b/buildSrc/src/main/resources/deb/preinst.ftl index 5f67242c265..9acfc0f084e 100644 --- a/buildSrc/src/main/resources/deb/preinst.ftl +++ b/buildSrc/src/main/resources/deb/preinst.ftl @@ -1,2 +1,2 @@ -#!/bin/sh -e +#!/bin/bash -e <% commands.each {command -> %><%= command %><% } %> diff --git a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt index 0e5ce884d9d..37f03f4c91c 100644 --- a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt @@ -32,4 +32,7 @@ org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey() @defaultMessage Soon to be removed org.apache.lucene.document.FieldType#numericType() -org.apache.lucene.document.InetAddressPoint#newPrefixQuery(java.lang.String, java.net.InetAddress, int) @LUCENE-7232 +@defaultMessage Don't use MethodHandles in slow ways, don't be lenient in tests. +java.lang.invoke.MethodHandle#invoke(java.lang.Object[]) +java.lang.invoke.MethodHandle#invokeWithArguments(java.lang.Object[]) +java.lang.invoke.MethodHandle#invokeWithArguments(java.util.List) diff --git a/buildSrc/src/main/resources/forbidden/es-test-signatures.txt b/buildSrc/src/main/resources/forbidden/es-test-signatures.txt index bd6744ee05f..08e591e1cfa 100644 --- a/buildSrc/src/main/resources/forbidden/es-test-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/es-test-signatures.txt @@ -21,5 +21,7 @@ com.carrotsearch.randomizedtesting.annotations.Repeat @ Don't commit hardcoded r org.apache.lucene.codecs.Codec#setDefault(org.apache.lucene.codecs.Codec) @ Use the SuppressCodecs("*") annotation instead org.apache.lucene.util.LuceneTestCase$Slow @ Don't write slow tests org.junit.Ignore @ Use AwaitsFix instead +org.apache.lucene.util.LuceneTestCase$Nightly @ We don't run nightly tests at this point! +com.carrotsearch.randomizedtesting.annotations.Nightly @ We don't run nightly tests at this point! org.junit.Test @defaultMessage Just name your test method testFooBar diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/NamingConventionsCheckBadClasses.java b/buildSrc/src/test/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java similarity index 59% rename from test/framework/src/test/java/org/elasticsearch/test/test/NamingConventionsCheckBadClasses.java rename to buildSrc/src/test/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java index 233e9fe5975..4fc88b3afc5 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/NamingConventionsCheckBadClasses.java +++ b/buildSrc/src/test/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java @@ -17,9 +17,7 @@ * under the License. */ -package org.elasticsearch.test.test; - -import org.elasticsearch.test.ESTestCase; +package org.elasticsearch.test; import junit.framework.TestCase; @@ -30,21 +28,35 @@ public class NamingConventionsCheckBadClasses { public static final class NotImplementingTests { } - public static final class WrongName extends ESTestCase { + public static final class WrongName extends UnitTestCase { + /* + * Dummy test so the tests pass. We do this *and* skip the tests so anyone who jumps back to a branch without these tests can still + * compile without a failure. That is because clean doesn't actually clean these.... + */ + public void testDummy() {} } - public static abstract class DummyAbstractTests extends ESTestCase { + public abstract static class DummyAbstractTests extends UnitTestCase { } public interface DummyInterfaceTests { } - public static final class InnerTests extends ESTestCase { + public static final class InnerTests extends UnitTestCase { + public void testDummy() {} } - public static final class WrongNameTheSecond extends ESTestCase { + public static final class WrongNameTheSecond extends UnitTestCase { + public void testDummy() {} } public static final class PlainUnit extends TestCase { + public void testDummy() {} + } + + public abstract static class UnitTestCase extends TestCase { + } + + public abstract static class IntegTestCase extends UnitTestCase { } } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 0f6a09327d6..7565488d4ab 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ -elasticsearch = 5.0.0-alpha2 -lucene = 6.0.0 +elasticsearch = 5.0.0-alpha5 +lucene = 6.1.0 # optional dependencies spatial4j = 0.6 @@ -7,15 +7,16 @@ jts = 1.13 jackson = 2.7.1 log4j = 1.2.17 slf4j = 1.6.2 -jna = 4.1.0 - +jna = 4.2.2 # test dependencies randomizedrunner = 2.3.2 junit = 4.11 -# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use -# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698 -httpclient = 4.3.6 -httpcore = 4.3.3 +httpclient = 4.5.2 +httpcore = 4.4.4 commonslogging = 1.1.3 commonscodec = 1.10 +hamcrest = 1.3 +securemock = 1.2 +# benchmark dependencies +jmh = 1.12 diff --git a/client/rest/build.gradle b/client/rest/build.gradle new file mode 100644 index 00000000000..4623fbd8c2c --- /dev/null +++ b/client/rest/build.gradle @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.gradle.api.JavaVersion + +apply plugin: 'elasticsearch.build' +apply plugin: 'ru.vyarus.animalsniffer' + +targetCompatibility = JavaVersion.VERSION_1_7 +sourceCompatibility = JavaVersion.VERSION_1_7 + +group = 'org.elasticsearch.client' + +dependencies { + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + + testCompile "org.elasticsearch.client:test:${version}" + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + testCompile "org.elasticsearch:securemock:${versions.securemock}" + testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" + signature "org.codehaus.mojo.signature:java17:1.0@signature" +} + +forbiddenApisMain { + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +forbiddenApisTest { + //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +//JarHell is part of es core, which we don't want to pull in +jarHell.enabled=false + +namingConventions { + testClass = 'org.elasticsearch.client.RestClientTestCase' + //we don't have integration tests + skipIntegTestInDisguise = true +} + +thirdPartyAudit.excludes = [ + //commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'org.apache.log4j.Category', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority', + //commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener' +] diff --git a/plugins/discovery-azure/licenses/commons-codec-1.10.jar.sha1 b/client/rest/licenses/commons-codec-1.10.jar.sha1 similarity index 100% rename from plugins/discovery-azure/licenses/commons-codec-1.10.jar.sha1 rename to client/rest/licenses/commons-codec-1.10.jar.sha1 diff --git a/plugins/discovery-azure/licenses/commons-codec-LICENSE.txt b/client/rest/licenses/commons-codec-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-codec-LICENSE.txt rename to client/rest/licenses/commons-codec-LICENSE.txt diff --git a/client/rest/licenses/commons-codec-NOTICE.txt b/client/rest/licenses/commons-codec-NOTICE.txt new file mode 100644 index 00000000000..1da9af50f60 --- /dev/null +++ b/client/rest/licenses/commons-codec-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/client/rest/licenses/commons-logging-1.1.3.jar.sha1 b/client/rest/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 00000000000..5b8f029e582 --- /dev/null +++ b/client/rest/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/discovery-azure/licenses/commons-logging-LICENSE.txt b/client/rest/licenses/commons-logging-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/commons-logging-LICENSE.txt rename to client/rest/licenses/commons-logging-LICENSE.txt diff --git a/client/rest/licenses/commons-logging-NOTICE.txt b/client/rest/licenses/commons-logging-NOTICE.txt new file mode 100644 index 00000000000..556bd03951d --- /dev/null +++ b/client/rest/licenses/commons-logging-NOTICE.txt @@ -0,0 +1,6 @@ +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/rest/licenses/httpclient-4.5.2.jar.sha1 b/client/rest/licenses/httpclient-4.5.2.jar.sha1 new file mode 100644 index 00000000000..6937112a09f --- /dev/null +++ b/client/rest/licenses/httpclient-4.5.2.jar.sha1 @@ -0,0 +1 @@ +733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/plugins/discovery-azure/licenses/httpclient-LICENSE.txt b/client/rest/licenses/httpclient-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/licenses/httpclient-LICENSE.txt rename to client/rest/licenses/httpclient-LICENSE.txt diff --git a/client/rest/licenses/httpclient-NOTICE.txt b/client/rest/licenses/httpclient-NOTICE.txt new file mode 100644 index 00000000000..91e5c40c4c6 --- /dev/null +++ b/client/rest/licenses/httpclient-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/rest/licenses/httpcore-4.4.4.jar.sha1 b/client/rest/licenses/httpcore-4.4.4.jar.sha1 new file mode 100644 index 00000000000..ef0c257e012 --- /dev/null +++ b/client/rest/licenses/httpcore-4.4.4.jar.sha1 @@ -0,0 +1 @@ +b31526a230871fbe285fbcbe2813f9c0839ae9b0 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-LICENSE.txt b/client/rest/licenses/httpcore-LICENSE.txt new file mode 100644 index 00000000000..32f01eda18f --- /dev/null +++ b/client/rest/licenses/httpcore-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore-NOTICE.txt b/client/rest/licenses/httpcore-NOTICE.txt new file mode 100644 index 00000000000..91e5c40c4c6 --- /dev/null +++ b/client/rest/licenses/httpcore-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java new file mode 100644 index 00000000000..a7b222da70e --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.concurrent.TimeUnit; + +/** + * Holds the state of a dead connection to a host. Keeps track of how many failed attempts were performed and + * when the host should be retried (based on number of previous failed attempts). + * Class is immutable, a new copy of it should be created each time the state has to be changed. + */ +final class DeadHostState { + + private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1); + private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); + + static final DeadHostState INITIAL_DEAD_STATE = new DeadHostState(); + + private final int failedAttempts; + private final long deadUntilNanos; + + private DeadHostState() { + this.failedAttempts = 1; + this.deadUntilNanos = System.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS; + } + + /** + * We keep track of how many times a certain node fails consecutively. The higher that number is the longer we will wait + * to retry that same node again. Minimum is 1 minute (for a node the only failed once), maximum is 30 minutes (for a node + * that failed many consecutive times). + */ + DeadHostState(DeadHostState previousDeadHostState) { + long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), + MAX_CONNECTION_TIMEOUT_NANOS); + this.deadUntilNanos = System.nanoTime() + timeoutNanos; + this.failedAttempts = previousDeadHostState.failedAttempts + 1; + } + + /** + * Returns the timestamp (nanos) till the host is supposed to stay dead without being retried. + * After that the host should be retried. + */ + long getDeadUntilNanos() { + return deadUntilNanos; + } + + @Override + public String toString() { + return "DeadHostState{" + + "failedAttempts=" + failedAttempts + + ", deadUntilNanos=" + deadUntilNanos + + '}'; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java b/client/rest/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java similarity index 80% rename from test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java rename to client/rest/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java index 480fc7b2f01..df08ae5a8d1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java +++ b/client/rest/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java @@ -16,8 +16,9 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.test.rest.client.http; +package org.elasticsearch.client; +import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; import java.net.URI; @@ -25,11 +26,11 @@ import java.net.URI; /** * Allows to send DELETE requests providing a body (not supported out of the box) */ -public class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase { +final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase { - public final static String METHOD_NAME = "DELETE"; + static final String METHOD_NAME = HttpDelete.METHOD_NAME; - public HttpDeleteWithEntity(final URI uri) { + HttpDeleteWithEntity(final URI uri) { setURI(uri); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java b/client/rest/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java similarity index 81% rename from test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java rename to client/rest/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java index aa0129f4660..a3846beefe4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java +++ b/client/rest/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java @@ -16,20 +16,21 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.test.rest.client.http; +package org.elasticsearch.client; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; +import org.apache.http.client.methods.HttpGet; import java.net.URI; /** * Allows to send GET requests providing a body (not supported out of the box) */ -public class HttpGetWithEntity extends HttpEntityEnclosingRequestBase { +final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase { - public final static String METHOD_NAME = "GET"; + static final String METHOD_NAME = HttpGet.METHOD_NAME; - public HttpGetWithEntity(final URI uri) { + HttpGetWithEntity(final URI uri) { setURI(uri); } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java new file mode 100644 index 00000000000..24e6881fa1e --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpEntityEnclosingRequest; +import org.apache.http.HttpHost; +import org.apache.http.HttpResponse; +import org.apache.http.RequestLine; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.entity.BufferedHttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.util.EntityUtils; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +/** + * Helper class that exposes static methods to unify the way requests are logged. + * Includes trace logging to log complete requests and responses in curl format. + * Useful for debugging, manually sending logged requests via curl and checking their responses. + * Trace logging is a feature that all the language clients provide. + */ +final class RequestLogger { + + private static final Log tracer = LogFactory.getLog("tracer"); + + private RequestLogger() { + } + + /** + * Logs a request that yielded a response + */ + static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { + if (logger.isDebugEnabled()) { + logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + + "] returned [" + httpResponse.getStatusLine() + "]"); + } + if (tracer.isTraceEnabled()) { + String requestLine; + try { + requestLine = buildTraceRequest(request, host); + } catch(IOException e) { + requestLine = ""; + tracer.trace("error while reading request for trace purposes", e); + } + String responseLine; + try { + responseLine = buildTraceResponse(httpResponse); + } catch(IOException e) { + responseLine = ""; + tracer.trace("error while reading response for trace purposes", e); + } + tracer.trace(requestLine + '\n' + responseLine); + } + } + + /** + * Logs a request that failed + */ + static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, IOException e) { + if (logger.isDebugEnabled()) { + logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e); + } + if (tracer.isTraceEnabled()) { + String traceRequest; + try { + traceRequest = buildTraceRequest(request, host); + } catch (IOException e1) { + tracer.trace("error while reading request for trace purposes", e); + traceRequest = ""; + } + tracer.trace(traceRequest); + } + } + + /** + * Creates curl output for given request + */ + static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException { + String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestLine()) + "'"; + if (request instanceof HttpEntityEnclosingRequest) { + HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; + if (enclosingRequest.getEntity() != null) { + requestLine += " -d '"; + HttpEntity entity = enclosingRequest.getEntity(); + if (entity.isRepeatable() == false) { + entity = new BufferedHttpEntity(enclosingRequest.getEntity()); + enclosingRequest.setEntity(entity); + } + requestLine += EntityUtils.toString(entity, StandardCharsets.UTF_8) + "'"; + } + } + return requestLine; + } + + /** + * Creates curl output for given response + */ + static String buildTraceResponse(HttpResponse httpResponse) throws IOException { + String responseLine = "# " + httpResponse.getStatusLine().toString(); + for (Header header : httpResponse.getAllHeaders()) { + responseLine += "\n# " + header.getName() + ": " + header.getValue(); + } + responseLine += "\n#"; + HttpEntity entity = httpResponse.getEntity(); + if (entity != null) { + if (entity.isRepeatable() == false) { + entity = new BufferedHttpEntity(entity); + } + httpResponse.setEntity(entity); + ContentType contentType = ContentType.get(entity); + Charset charset = StandardCharsets.UTF_8; + if (contentType != null) { + charset = contentType.getCharset(); + } + try (BufferedReader reader = new BufferedReader(new InputStreamReader(entity.getContent(), charset))) { + String line; + while( (line = reader.readLine()) != null) { + responseLine += "\n# " + line; + } + } + } + return responseLine; + } + + private static String getUri(RequestLine requestLine) { + if (requestLine.getUri().charAt(0) != '/') { + return "/" + requestLine.getUri(); + } + return requestLine.getUri(); + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java new file mode 100644 index 00000000000..f7685b27bb9 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.RequestLine; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Objects; + +/** + * Holds an elasticsearch response. It wraps the {@link CloseableHttpResponse} response and associates it with + * its corresponding {@link RequestLine} and {@link HttpHost}. + * It must be closed to free any resource held by it, as well as the corresponding connection in the connection pool. + */ +public class Response implements Closeable { + + private final RequestLine requestLine; + private final HttpHost host; + private final CloseableHttpResponse response; + + Response(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) { + Objects.requireNonNull(requestLine, "requestLine cannot be null"); + Objects.requireNonNull(host, "node cannot be null"); + Objects.requireNonNull(response, "response cannot be null"); + this.requestLine = requestLine; + this.host = host; + this.response = response; + } + + /** + * Returns the request line that generated this response + */ + public RequestLine getRequestLine() { + return requestLine; + } + + /** + * Returns the node that returned this response + */ + public HttpHost getHost() { + return host; + } + + /** + * Returns the status line of the current response + */ + public StatusLine getStatusLine() { + return response.getStatusLine(); + } + + /** + * Returns all the response headers + */ + public Header[] getHeaders() { + return response.getAllHeaders(); + } + + /** + * Returns the value of the first header with a specified name of this message. + * If there is more than one matching header in the message the first element is returned. + * If there is no matching header in the message null is returned. + */ + public String getHeader(String name) { + Header header = response.getFirstHeader(name); + if (header == null) { + return null; + } + return header.getValue(); + } + + /** + * Returns the response body available, null otherwise + * @see HttpEntity + */ + public HttpEntity getEntity() { + return response.getEntity(); + } + + @Override + public String toString() { + return "Response{" + + "requestLine=" + requestLine + + ", host=" + host + + ", response=" + response.getStatusLine() + + '}'; + } + + @Override + public void close() throws IOException { + this.response.close(); + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java new file mode 100644 index 00000000000..44f59cce7db --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.io.IOException; + +/** + * Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error. + * Note that the response body gets passed in as a string and read eagerly, which means that the Response object + * is expected to be closed and available only to read metadata like status line, request line, response headers. + */ +public class ResponseException extends IOException { + + private Response response; + private final String responseBody; + + ResponseException(Response response, String responseBody) throws IOException { + super(buildMessage(response,responseBody)); + this.response = response; + this.responseBody = responseBody; + } + + private static String buildMessage(Response response, String responseBody) { + String message = response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri() + + ": " + response.getStatusLine().toString(); + if (responseBody != null) { + message += "\n" + responseBody; + } + return message; + } + + /** + * Returns the {@link Response} that caused this exception to be thrown. + * Expected to be used only to read metadata like status line, request line, response headers. The response body should + * be retrieved using {@link #getResponseBody()} + */ + public Response getResponse() { + return response; + } + + /** + * Returns the response body as a string or null if there wasn't any. + * The body is eagerly consumed when an ResponseException gets created, and its corresponding Response + * gets closed straightaway so this method is the only way to get back the response body that was returned. + */ + public String getResponseBody() { + return responseBody; + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java new file mode 100644 index 00000000000..e3bb1b3c507 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -0,0 +1,508 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.Consts; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.HttpRequest; +import org.apache.http.client.ClientProtocolException; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpOptions; +import org.apache.http.client.methods.HttpPatch; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.methods.HttpTrace; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.config.Registry; +import org.apache.http.conn.socket.ConnectionSocketFactory; +import org.apache.http.entity.ContentType; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.http.util.EntityUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Client that connects to an elasticsearch cluster through http. + * Must be created using {@link Builder}, which allows to set all the different options or just rely on defaults. + * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later + * by calling {@link #setHosts(HttpHost...)}. + * The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When + * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and + * retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously + * failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that + * deserve a retry) are retried till one responds or none of them does, in which case an {@link IOException} will be thrown. + * + * Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format. + */ +public final class RestClient implements Closeable { + + private static final Log logger = LogFactory.getLog(RestClient.class); + public static ContentType JSON_CONTENT_TYPE = ContentType.create("application/json", Consts.UTF_8); + + private final CloseableHttpClient client; + //we don't rely on default headers supported by HttpClient as those cannot be replaced, plus it would get hairy + //when we create the HttpClient instance on our own as there would be two different ways to set the default headers. + private final Header[] defaultHeaders; + private final long maxRetryTimeoutMillis; + private final AtomicInteger lastHostIndex = new AtomicInteger(0); + private volatile Set hosts; + private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); + private final FailureListener failureListener; + + private RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, + HttpHost[] hosts, FailureListener failureListener) { + this.client = client; + this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; + this.defaultHeaders = defaultHeaders; + this.failureListener = failureListener; + setHosts(hosts); + } + + /** + * Replaces the hosts that the client communicates with. + * @see HttpHost + */ + public synchronized void setHosts(HttpHost... hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("hosts must not be null nor empty"); + } + Set httpHosts = new HashSet<>(); + for (HttpHost host : hosts) { + Objects.requireNonNull(host, "host cannot be null"); + httpHosts.add(host); + } + this.hosts = Collections.unmodifiableSet(httpHosts); + this.blacklist.clear(); + } + + /** + * Sends a request to the elasticsearch cluster that the current client points to. + * Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters and request body. + * + * @param method the http method + * @param endpoint the path of the request (without host and port) + * @param headers the optional request headers + * @return the response returned by elasticsearch + * @throws IOException in case of a problem or the connection was aborted + * @throws ClientProtocolException in case of an http protocol error + * @throws ResponseException in case elasticsearch responded with a status code that indicated an error + */ + public Response performRequest(String method, String endpoint, Header... headers) throws IOException { + return performRequest(method, endpoint, Collections.emptyMap(), null, headers); + } + + /** + * Sends a request to the elasticsearch cluster that the current client points to. + * Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body. + * + * @param method the http method + * @param endpoint the path of the request (without host and port) + * @param params the query_string parameters + * @param headers the optional request headers + * @return the response returned by elasticsearch + * @throws IOException in case of a problem or the connection was aborted + * @throws ClientProtocolException in case of an http protocol error + * @throws ResponseException in case elasticsearch responded with a status code that indicated an error + */ + public Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { + return performRequest(method, endpoint, params, null, headers); + } + + /** + * Sends a request to the elasticsearch cluster that the current client points to. + * Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain + * amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures, + * the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried + * till one responds or none of them does, in which case an {@link IOException} will be thrown. + * + * @param method the http method + * @param endpoint the path of the request (without host and port) + * @param params the query_string parameters + * @param entity the body of the request, null if not applicable + * @param headers the optional request headers + * @return the response returned by elasticsearch + * @throws IOException in case of a problem or the connection was aborted + * @throws ClientProtocolException in case of an http protocol error + * @throws ResponseException in case elasticsearch responded with a status code that indicated an error + */ + public Response performRequest(String method, String endpoint, Map params, + HttpEntity entity, Header... headers) throws IOException { + URI uri = buildUri(endpoint, params); + HttpRequestBase request = createHttpRequest(method, uri, entity); + setHeaders(request, headers); + //we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt + long retryTimeoutMillis = Math.round(this.maxRetryTimeoutMillis / (float)100 * 98); + IOException lastSeenException = null; + long startTime = System.nanoTime(); + for (HttpHost host : nextHost()) { + if (lastSeenException != null) { + //in case we are retrying, check whether maxRetryTimeout has been reached, in which case an exception will be thrown + long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); + long timeout = retryTimeoutMillis - timeElapsedMillis; + if (timeout <= 0) { + IOException retryTimeoutException = new IOException( + "request retries exceeded max retry timeout [" + retryTimeoutMillis + "]"); + retryTimeoutException.addSuppressed(lastSeenException); + throw retryTimeoutException; + } + //also reset the request to make it reusable for the next attempt + request.reset(); + } + + CloseableHttpResponse httpResponse; + try { + httpResponse = client.execute(host, request); + } catch(IOException e) { + RequestLogger.logFailedRequest(logger, request, host, e); + onFailure(host); + lastSeenException = addSuppressedException(lastSeenException, e); + continue; + } + Response response = new Response(request.getRequestLine(), host, httpResponse); + int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode < 300 || (request.getMethod().equals(HttpHead.METHOD_NAME) && statusCode == 404) ) { + RequestLogger.logResponse(logger, request, host, httpResponse); + onResponse(host); + return response; + } + RequestLogger.logResponse(logger, request, host, httpResponse); + String responseBody; + try { + if (response.getEntity() == null) { + responseBody = null; + } else { + responseBody = EntityUtils.toString(response.getEntity()); + } + } finally { + response.close(); + } + lastSeenException = addSuppressedException(lastSeenException, new ResponseException(response, responseBody)); + switch(statusCode) { + case 502: + case 503: + case 504: + //mark host dead and retry against next one + onFailure(host); + break; + default: + //mark host alive and don't retry, as the error should be a request problem + onResponse(host); + throw lastSeenException; + } + } + //we get here only when we tried all nodes and they all failed + assert lastSeenException != null; + throw lastSeenException; + } + + private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) { + Objects.requireNonNull(requestHeaders, "request headers must not be null"); + for (Header defaultHeader : defaultHeaders) { + httpRequest.setHeader(defaultHeader); + } + for (Header requestHeader : requestHeaders) { + Objects.requireNonNull(requestHeader, "request header must not be null"); + httpRequest.setHeader(requestHeader); + } + } + + /** + * Returns an iterator of hosts to be used for a request call. + * Ideally, the first host is retrieved from the iterator and used successfully for the request. + * Otherwise, after each failure the next host should be retrieved from the iterator so that the request can be retried till + * the iterator is exhausted. The maximum total of attempts is equal to the number of hosts that are available in the iterator. + * The iterator returned will never be empty, rather an {@link IllegalStateException} in case there are no hosts. + * In case there are no healthy hosts available, or dead ones to be be retried, one dead host gets returned. + */ + private Iterable nextHost() { + Set filteredHosts = new HashSet<>(hosts); + for (Map.Entry entry : blacklist.entrySet()) { + if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) { + filteredHosts.remove(entry.getKey()); + } + } + + if (filteredHosts.isEmpty()) { + //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried + List> sortedHosts = new ArrayList<>(blacklist.entrySet()); + Collections.sort(sortedHosts, new Comparator>() { + @Override + public int compare(Map.Entry o1, Map.Entry o2) { + return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos()); + } + }); + HttpHost deadHost = sortedHosts.get(0).getKey(); + logger.trace("resurrecting host [" + deadHost + "]"); + return Collections.singleton(deadHost); + } + + List rotatedHosts = new ArrayList<>(filteredHosts); + Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement()); + return rotatedHosts; + } + + /** + * Called after each successful request call. + * Receives as an argument the host that was used for the successful request. + */ + private void onResponse(HttpHost host) { + DeadHostState removedHost = this.blacklist.remove(host); + if (logger.isDebugEnabled() && removedHost != null) { + logger.debug("removed host [" + host + "] from blacklist"); + } + } + + /** + * Called after each failed attempt. + * Receives as an argument the host that was used for the failed attempt. + */ + private void onFailure(HttpHost host) throws IOException { + while(true) { + DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, DeadHostState.INITIAL_DEAD_STATE); + if (previousDeadHostState == null) { + logger.debug("added host [" + host + "] to blacklist"); + break; + } + if (blacklist.replace(host, previousDeadHostState, new DeadHostState(previousDeadHostState))) { + logger.debug("updated host [" + host + "] already in blacklist"); + break; + } + } + failureListener.onFailure(host); + } + + @Override + public void close() throws IOException { + client.close(); + } + + private static IOException addSuppressedException(IOException suppressedException, IOException currentException) { + if (suppressedException != null) { + currentException.addSuppressed(suppressedException); + } + return currentException; + } + + private static HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { + switch(method.toUpperCase(Locale.ROOT)) { + case HttpDeleteWithEntity.METHOD_NAME: + return addRequestBody(new HttpDeleteWithEntity(uri), entity); + case HttpGetWithEntity.METHOD_NAME: + return addRequestBody(new HttpGetWithEntity(uri), entity); + case HttpHead.METHOD_NAME: + return addRequestBody(new HttpHead(uri), entity); + case HttpOptions.METHOD_NAME: + return addRequestBody(new HttpOptions(uri), entity); + case HttpPatch.METHOD_NAME: + return addRequestBody(new HttpPatch(uri), entity); + case HttpPost.METHOD_NAME: + HttpPost httpPost = new HttpPost(uri); + addRequestBody(httpPost, entity); + return httpPost; + case HttpPut.METHOD_NAME: + return addRequestBody(new HttpPut(uri), entity); + case HttpTrace.METHOD_NAME: + return addRequestBody(new HttpTrace(uri), entity); + default: + throw new UnsupportedOperationException("http method not supported: " + method); + } + } + + private static HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpEntity entity) { + if (entity != null) { + if (httpRequest instanceof HttpEntityEnclosingRequestBase) { + ((HttpEntityEnclosingRequestBase)httpRequest).setEntity(entity); + } else { + throw new UnsupportedOperationException(httpRequest.getMethod() + " with body is not supported"); + } + } + return httpRequest; + } + + private static URI buildUri(String path, Map params) { + Objects.requireNonNull(params, "params must not be null"); + try { + URIBuilder uriBuilder = new URIBuilder(path); + for (Map.Entry param : params.entrySet()) { + uriBuilder.addParameter(param.getKey(), param.getValue()); + } + return uriBuilder.build(); + } catch(URISyntaxException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + } + + /** + * Returns a new {@link Builder} to help with {@link RestClient} creation. + */ + public static Builder builder(HttpHost... hosts) { + return new Builder(hosts); + } + + /** + * Rest client builder. Helps creating a new {@link RestClient}. + */ + public static final class Builder { + public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000; + public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 10000; + public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS; + public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS = 500; + + private static final Header[] EMPTY_HEADERS = new Header[0]; + + private final HttpHost[] hosts; + private CloseableHttpClient httpClient; + private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS; + private Header[] defaultHeaders = EMPTY_HEADERS; + private FailureListener failureListener; + + /** + * Creates a new builder instance and sets the hosts that the client will send requests to. + */ + private Builder(HttpHost... hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("no hosts provided"); + } + this.hosts = hosts; + } + + /** + * Sets the http client. A new default one will be created if not + * specified, by calling {@link #createDefaultHttpClient(Registry)})}. + * + * @see CloseableHttpClient + */ + public Builder setHttpClient(CloseableHttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /** + * Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request. + * {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified. + * + * @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0 + */ + public Builder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) { + if (maxRetryTimeoutMillis <= 0) { + throw new IllegalArgumentException("maxRetryTimeoutMillis must be greater than 0"); + } + this.maxRetryTimeout = maxRetryTimeoutMillis; + return this; + } + + /** + * Sets the default request headers, to be used when creating the default http client instance. + * In case the http client is set through {@link #setHttpClient(CloseableHttpClient)}, the default headers need to be + * set to it externally during http client construction. + */ + public Builder setDefaultHeaders(Header[] defaultHeaders) { + Objects.requireNonNull(defaultHeaders, "default headers must not be null"); + for (Header defaultHeader : defaultHeaders) { + Objects.requireNonNull(defaultHeader, "default header must not be null"); + } + this.defaultHeaders = defaultHeaders; + return this; + } + + /** + * Sets the {@link FailureListener} to be notified for each request failure + */ + public Builder setFailureListener(FailureListener failureListener) { + Objects.requireNonNull(failureListener, "failure listener must not be null"); + this.failureListener = failureListener; + return this; + } + + /** + * Creates a new {@link RestClient} based on the provided configuration. + */ + public RestClient build() { + if (httpClient == null) { + httpClient = createDefaultHttpClient(null); + } + if (failureListener == null) { + failureListener = new FailureListener(); + } + return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener); + } + + /** + * Creates a {@link CloseableHttpClient} with default settings. Used when the http client instance is not provided. + * + * @see CloseableHttpClient + */ + public static CloseableHttpClient createDefaultHttpClient(Registry socketFactoryRegistry) { + PoolingHttpClientConnectionManager connectionManager; + if (socketFactoryRegistry == null) { + connectionManager = new PoolingHttpClientConnectionManager(); + } else { + connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry); + } + //default settings may be too constraining + connectionManager.setDefaultMaxPerRoute(10); + connectionManager.setMaxTotal(30); + + //default timeouts are all infinite + RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS) + .setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS) + .setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS).build(); + return HttpClientBuilder.create().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build(); + } + } + + /** + * Listener that allows to be notified whenever a failure happens. Useful when sniffing is enabled, so that we can sniff on failure. + * The default implementation is a no-op. + */ + public static class FailureListener { + /** + * Notifies that the host provided as argument has just failed + */ + public void onFailure(HttpHost host) throws IOException { + + } + } +} diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java b/client/rest/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java similarity index 57% rename from modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java rename to client/rest/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java index 38d48b98f4e..dd866bac541 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java +++ b/client/rest/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java @@ -16,26 +16,27 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.script.mustache; -import com.fasterxml.jackson.core.io.JsonStringEncoder; -import com.github.mustachejava.DefaultMustacheFactory; -import com.github.mustachejava.MustacheException; +package org.elasticsearch.client; + +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.message.BasicHttpResponse; import java.io.IOException; -import java.io.Writer; /** - * A MustacheFactory that does simple JSON escaping. + * Simple {@link CloseableHttpResponse} impl needed to easily create http responses that are closeable given that + * org.apache.http.impl.execchain.HttpResponseProxy is not public. */ -final class JsonEscapingMustacheFactory extends DefaultMustacheFactory { +class CloseableBasicHttpResponse extends BasicHttpResponse implements CloseableHttpResponse { + + public CloseableBasicHttpResponse(StatusLine statusline) { + super(statusline); + } @Override - public void encode(String value, Writer writer) { - try { - writer.write(JsonStringEncoder.getInstance().quoteAsString(value)); - } catch (IOException e) { - throw new MustacheException("Failed to encode value: " + value); - } + public void close() throws IOException { + //nothing to close } -} +} \ No newline at end of file diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java new file mode 100644 index 00000000000..4d3ad75b5e8 --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java @@ -0,0 +1,152 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.HttpEntity; +import org.apache.http.HttpEntityEnclosingRequest; +import org.apache.http.HttpHost; +import org.apache.http.ProtocolVersion; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpOptions; +import org.apache.http.client.methods.HttpPatch; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.methods.HttpTrace; +import org.apache.http.entity.InputStreamEntity; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHttpResponse; +import org.apache.http.message.BasicStatusLine; +import org.apache.http.util.EntityUtils; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; + +public class RequestLoggerTests extends RestClientTestCase { + + public void testTraceRequest() throws IOException, URISyntaxException { + HttpHost host = new HttpHost("localhost", 9200, getRandom().nextBoolean() ? "http" : "https"); + + String expectedEndpoint = "/index/type/_api"; + URI uri; + if (randomBoolean()) { + uri = new URI(expectedEndpoint); + } else { + uri = new URI("index/type/_api"); + } + + HttpRequestBase request; + int requestType = RandomInts.randomIntBetween(getRandom(), 0, 7); + switch(requestType) { + case 0: + request = new HttpGetWithEntity(uri); + break; + case 1: + request = new HttpPost(uri); + break; + case 2: + request = new HttpPut(uri); + break; + case 3: + request = new HttpDeleteWithEntity(uri); + break; + case 4: + request = new HttpHead(uri); + break; + case 5: + request = new HttpTrace(uri); + break; + case 6: + request = new HttpOptions(uri); + break; + case 7: + request = new HttpPatch(uri); + break; + default: + throw new UnsupportedOperationException(); + } + + String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'"; + boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean(); + String requestBody = "{ \"field\": \"value\" }"; + if (hasBody) { + expected += " -d '" + requestBody + "'"; + HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; + HttpEntity entity; + if (getRandom().nextBoolean()) { + entity = new StringEntity(requestBody, StandardCharsets.UTF_8); + } else { + entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8))); + } + enclosingRequest.setEntity(entity); + } + + String traceRequest = RequestLogger.buildTraceRequest(request, host); + assertThat(traceRequest, equalTo(expected)); + if (hasBody) { + //check that the body is still readable as most entities are not repeatable + String body = EntityUtils.toString(((HttpEntityEnclosingRequest) request).getEntity(), StandardCharsets.UTF_8); + assertThat(body, equalTo(requestBody)); + } + } + + public void testTraceResponse() throws IOException { + ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); + int statusCode = RandomInts.randomIntBetween(getRandom(), 200, 599); + String reasonPhrase = "REASON"; + BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase); + String expected = "# " + statusLine.toString(); + BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); + int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3); + for (int i = 0; i < numHeaders; i++) { + httpResponse.setHeader("header" + i, "value"); + expected += "\n# header" + i + ": value"; + } + expected += "\n#"; + boolean hasBody = getRandom().nextBoolean(); + String responseBody = "{\n \"field\": \"value\"\n}"; + if (hasBody) { + expected += "\n# {"; + expected += "\n# \"field\": \"value\""; + expected += "\n# }"; + HttpEntity entity; + if (getRandom().nextBoolean()) { + entity = new StringEntity(responseBody, StandardCharsets.UTF_8); + } else { + entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8))); + } + httpResponse.setEntity(entity); + } + String traceResponse = RequestLogger.buildTraceResponse(httpResponse); + assertThat(traceResponse, equalTo(expected)); + if (hasBody) { + //check that the body is still readable as most entities are not repeatable + String body = EntityUtils.toString(httpResponse.getEntity(), StandardCharsets.UTF_8); + assertThat(body, equalTo(responseBody)); + } + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java new file mode 100644 index 00000000000..a16e961fd28 --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.Header; +import org.apache.http.HttpHost; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.message.BasicHeader; + +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +public class RestClientBuilderTests extends RestClientTestCase { + + public void testBuild() throws IOException { + try { + RestClient.builder((HttpHost[])null); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("no hosts provided", e.getMessage()); + } + + try { + RestClient.builder(); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("no hosts provided", e.getMessage()); + } + + try { + RestClient.builder(new HttpHost[]{new HttpHost("localhost", 9200), null}).build(); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + + try { + RestClient.builder(new HttpHost("localhost", 9200)) + .setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("maxRetryTimeoutMillis must be greater than 0", e.getMessage()); + } + + try { + RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals("default headers must not be null", e.getMessage()); + } + + try { + RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(new Header[]{null}); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals("default header must not be null", e.getMessage()); + } + + try { + RestClient.builder(new HttpHost("localhost", 9200)).setFailureListener(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals("failure listener must not be null", e.getMessage()); + } + + int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + RestClient.Builder builder = RestClient.builder(hosts); + if (getRandom().nextBoolean()) { + builder.setHttpClient(HttpClientBuilder.create().build()); + } + if (getRandom().nextBoolean()) { + int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5); + Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + headers[i] = new BasicHeader("header" + i, "value"); + } + builder.setDefaultHeaders(headers); + } + if (getRandom().nextBoolean()) { + builder.setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); + } + try (RestClient restClient = builder.build()) { + assertNotNull(restClient); + } + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java new file mode 100644 index 00000000000..4a14c174353 --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -0,0 +1,221 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import com.sun.net.httpserver.Headers; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.Consts; +import org.apache.http.Header; +import org.apache.http.HttpHost; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; +import org.apache.http.util.EntityUtils; +import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; +import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; +import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + +/** + * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Works against a real http server, one single host. + */ +//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes +@IgnoreJRERequirement +public class RestClientIntegTests extends RestClientTestCase { + + private static HttpServer httpServer; + private static RestClient restClient; + private static Header[] defaultHeaders; + + @BeforeClass + public static void startHttpServer() throws Exception { + httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.start(); + //returns a different status code depending on the path + for (int statusCode : getAllStatusCodes()) { + createStatusCodeContext(httpServer, statusCode); + } + int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3); + defaultHeaders = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header-default" + (getRandom().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10); + defaultHeaders[i] = new BasicHeader(headerName, headerValue); + } + restClient = RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) + .setDefaultHeaders(defaultHeaders).build(); + } + + private static void createStatusCodeContext(HttpServer httpServer, final int statusCode) { + httpServer.createContext("/" + statusCode, new ResponseHandler(statusCode)); + } + + //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes + @IgnoreJRERequirement + private static class ResponseHandler implements HttpHandler { + private final int statusCode; + + ResponseHandler(int statusCode) { + this.statusCode = statusCode; + } + + @Override + public void handle(HttpExchange httpExchange) throws IOException { + StringBuilder body = new StringBuilder(); + try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) { + char[] buffer = new char[256]; + int read; + while ((read = reader.read(buffer)) != -1) { + body.append(buffer, 0, read); + } + } + Headers requestHeaders = httpExchange.getRequestHeaders(); + Headers responseHeaders = httpExchange.getResponseHeaders(); + for (Map.Entry> header : requestHeaders.entrySet()) { + responseHeaders.put(header.getKey(), header.getValue()); + } + httpExchange.getRequestBody().close(); + httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length()); + if (body.length() > 0) { + try (OutputStream out = httpExchange.getResponseBody()) { + out.write(body.toString().getBytes(Consts.UTF_8)); + } + } + httpExchange.close(); + } + } + + @AfterClass + public static void stopHttpServers() throws IOException { + restClient.close(); + restClient = null; + httpServer.stop(0); + httpServer = null; + } + + /** + * End to end test for headers. We test it explicitly against a real http client as there are different ways + * to set/add headers to the {@link org.apache.http.client.HttpClient}. + * Exercises the test http server ability to send back whatever headers it received. + */ + public void testHeaders() throws Exception { + for (String method : getHttpMethods()) { + Set standardHeaders = new HashSet<>( + Arrays.asList("Accept-encoding", "Connection", "Host", "User-agent", "Date")); + if (method.equals("HEAD") == false) { + standardHeaders.add("Content-length"); + } + int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5); + Map expectedHeaders = new HashMap<>(); + for (Header defaultHeader : defaultHeaders) { + expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue()); + } + Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header" + (getRandom().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10); + headers[i] = new BasicHeader(headerName, headerValue); + expectedHeaders.put(headerName, headerValue); + } + + int statusCode = randomStatusCode(getRandom()); + Response esResponse; + try (Response response = restClient.performRequest(method, "/" + statusCode, + Collections.emptyMap(), null, headers)) { + esResponse = response; + } catch(ResponseException e) { + esResponse = e.getResponse(); + } + assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); + for (Header responseHeader : esResponse.getHeaders()) { + if (responseHeader.getName().startsWith("Header")) { + String headerValue = expectedHeaders.remove(responseHeader.getName()); + assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue); + } else { + assertTrue("unknown header was returned " + responseHeader.getName(), + standardHeaders.remove(responseHeader.getName())); + } + } + assertEquals("some headers that were sent weren't returned: " + expectedHeaders, 0, expectedHeaders.size()); + assertEquals("some expected standard headers weren't returned: " + standardHeaders, 0, standardHeaders.size()); + } + } + + /** + * End to end test for delete with body. We test it explicitly as it is not supported + * out of the box by {@link org.apache.http.client.HttpClient}. + * Exercises the test http server ability to send back whatever body it received. + */ + public void testDeleteWithBody() throws Exception { + bodyTest("DELETE"); + } + + /** + * End to end test for get with body. We test it explicitly as it is not supported + * out of the box by {@link org.apache.http.client.HttpClient}. + * Exercises the test http server ability to send back whatever body it received. + */ + public void testGetWithBody() throws Exception { + bodyTest("GET"); + } + + private void bodyTest(String method) throws Exception { + String requestBody = "{ \"field\": \"value\" }"; + StringEntity entity = new StringEntity(requestBody); + Response esResponse; + String responseBody; + int statusCode = randomStatusCode(getRandom()); + try (Response response = restClient.performRequest(method, "/" + statusCode, + Collections.emptyMap(), entity)) { + responseBody = EntityUtils.toString(response.getEntity()); + esResponse = response; + } catch(ResponseException e) { + responseBody = e.getResponseBody(); + esResponse = e.getResponse(); + } + assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); + assertEquals(requestBody, responseBody); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java new file mode 100644 index 00000000000..5a43a8d4d9e --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -0,0 +1,274 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.HttpHost; +import org.apache.http.HttpRequest; +import org.apache.http.ProtocolVersion; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.conn.ConnectTimeoutException; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.message.BasicStatusLine; +import org.junit.Before; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; +import static org.elasticsearch.client.RestClientTestUtil.randomErrorRetryStatusCode; +import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; +import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for {@link RestClient} behaviour against multiple hosts: fail-over, blacklisting etc. + * Relies on a mock http client to intercept requests and return desired responses based on request path. + */ +public class RestClientMultipleHostsTests extends RestClientTestCase { + + private RestClient restClient; + private HttpHost[] httpHosts; + private TrackingFailureListener failureListener; + + @Before + public void createRestClient() throws IOException { + CloseableHttpClient httpClient = mock(CloseableHttpClient.class); + when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer() { + @Override + public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable { + HttpHost httpHost = (HttpHost) invocationOnMock.getArguments()[0]; + HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1]; + //return the desired status code or exception depending on the path + if (request.getURI().getPath().equals("/soe")) { + throw new SocketTimeoutException(httpHost.toString()); + } else if (request.getURI().getPath().equals("/coe")) { + throw new ConnectTimeoutException(httpHost.toString()); + } else if (request.getURI().getPath().equals("/ioe")) { + throw new IOException(httpHost.toString()); + } + int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); + StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, ""); + return new CloseableBasicHttpResponse(statusLine); + } + }); + + int numHosts = RandomInts.randomIntBetween(getRandom(), 2, 5); + httpHosts = new HttpHost[numHosts]; + for (int i = 0; i < numHosts; i++) { + httpHosts[i] = new HttpHost("localhost", 9200 + i); + } + failureListener = new TrackingFailureListener(); + restClient = RestClient.builder(httpHosts).setHttpClient(httpClient).setFailureListener(failureListener).build(); + } + + public void testRoundRobinOkStatusCodes() throws Exception { + int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5); + for (int i = 0; i < numIters; i++) { + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + for (int j = 0; j < httpHosts.length; j++) { + int statusCode = randomOkStatusCode(getRandom()); + try (Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) { + assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); + assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); + } + } + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } + failureListener.assertNotCalled(); + } + + public void testRoundRobinNoRetryErrors() throws Exception { + int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5); + for (int i = 0; i < numIters; i++) { + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + for (int j = 0; j < httpHosts.length; j++) { + String method = randomHttpMethod(getRandom()); + int statusCode = randomErrorNoRetryStatusCode(getRandom()); + try (Response response = restClient.performRequest(method, "/" + statusCode)) { + if (method.equals("HEAD") && statusCode == 404) { + //no exception gets thrown although we got a 404 + assertThat(response.getStatusLine().getStatusCode(), equalTo(404)); + assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); + assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); + } else { + fail("request should have failed"); + } + } catch(ResponseException e) { + if (method.equals("HEAD") && statusCode == 404) { + throw e; + } + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); + assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); + assertEquals(0, e.getSuppressed().length); + } + } + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } + failureListener.assertNotCalled(); + } + + public void testRoundRobinRetryErrors() throws Exception { + String retryEndpoint = randomErrorRetryEndpoint(); + try { + restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); + fail("request should have failed"); + } catch(ResponseException e) { + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each + failureListener.assertCalled(httpHosts); + do { + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); + assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times", + hostsSet.remove(response.getHost())); + if (e.getSuppressed().length > 0) { + assertEquals(1, e.getSuppressed().length); + Throwable suppressed = e.getSuppressed()[0]; + assertThat(suppressed, instanceOf(ResponseException.class)); + e = (ResponseException)suppressed; + } else { + e = null; + } + } while(e != null); + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } catch(IOException e) { + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each + failureListener.assertCalled(httpHosts); + do { + HttpHost httpHost = HttpHost.create(e.getMessage()); + assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); + if (e.getSuppressed().length > 0) { + assertEquals(1, e.getSuppressed().length); + Throwable suppressed = e.getSuppressed()[0]; + assertThat(suppressed, instanceOf(IOException.class)); + e = (IOException) suppressed; + } else { + e = null; + } + } while(e != null); + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } + + int numIters = RandomInts.randomIntBetween(getRandom(), 2, 5); + for (int i = 1; i <= numIters; i++) { + //check that one different host is resurrected at each new attempt + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + for (int j = 0; j < httpHosts.length; j++) { + retryEndpoint = randomErrorRetryEndpoint(); + try { + restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); + fail("request should have failed"); + } catch(ResponseException e) { + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); + assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times", + hostsSet.remove(response.getHost())); + //after the first request, all hosts are blacklisted, a single one gets resurrected each time + failureListener.assertCalled(response.getHost()); + assertEquals(0, e.getSuppressed().length); + } catch(IOException e) { + HttpHost httpHost = HttpHost.create(e.getMessage()); + assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); + //after the first request, all hosts are blacklisted, a single one gets resurrected each time + failureListener.assertCalled(httpHost); + assertEquals(0, e.getSuppressed().length); + } + } + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + if (getRandom().nextBoolean()) { + //mark one host back alive through a successful request and check that all requests after that are sent to it + HttpHost selectedHost = null; + int iters = RandomInts.randomIntBetween(getRandom(), 2, 10); + for (int y = 0; y < iters; y++) { + int statusCode = randomErrorNoRetryStatusCode(getRandom()); + Response response; + try (Response esResponse = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) { + response = esResponse; + } + catch(ResponseException e) { + response = e.getResponse(); + } + assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); + if (selectedHost == null) { + selectedHost = response.getHost(); + } else { + assertThat(response.getHost(), equalTo(selectedHost)); + } + } + failureListener.assertNotCalled(); + //let the selected host catch up on number of failures, it gets selected a consecutive number of times as it's the one + //selected to be retried earlier (due to lower number of failures) till all the hosts have the same number of failures + for (int y = 0; y < i + 1; y++) { + retryEndpoint = randomErrorRetryEndpoint(); + try { + restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); + fail("request should have failed"); + } catch(ResponseException e) { + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); + assertThat(response.getHost(), equalTo(selectedHost)); + failureListener.assertCalled(selectedHost); + } catch(IOException e) { + HttpHost httpHost = HttpHost.create(e.getMessage()); + assertThat(httpHost, equalTo(selectedHost)); + failureListener.assertCalled(selectedHost); + } + } + } + } + } + + private static String randomErrorRetryEndpoint() { + switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) { + case 0: + return "/" + randomErrorRetryStatusCode(getRandom()); + case 1: + return "/coe"; + case 2: + return "/soe"; + case 3: + return "/ioe"; + } + throw new UnsupportedOperationException(); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java new file mode 100644 index 00000000000..b250614b91b --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -0,0 +1,450 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpEntityEnclosingRequest; +import org.apache.http.HttpHost; +import org.apache.http.HttpRequest; +import org.apache.http.ProtocolVersion; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpOptions; +import org.apache.http.client.methods.HttpPatch; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.methods.HttpTrace; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.conn.ConnectTimeoutException; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.message.BasicHeader; +import org.apache.http.message.BasicStatusLine; +import org.apache.http.util.EntityUtils; +import org.junit.Before; +import org.mockito.ArgumentCaptor; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; +import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; +import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes; +import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; +import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Tests for basic functionality of {@link RestClient} against one single host: tests http requests being sent, headers, + * body, different status codes and corresponding responses/exceptions. + * Relies on a mock http client to intercept requests and return desired responses based on request path. + */ +public class RestClientSingleHostTests extends RestClientTestCase { + + private RestClient restClient; + private Header[] defaultHeaders; + private HttpHost httpHost; + private CloseableHttpClient httpClient; + private TrackingFailureListener failureListener; + + @Before + public void createRestClient() throws IOException { + httpClient = mock(CloseableHttpClient.class); + when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer() { + @Override + public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable { + HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1]; + //return the desired status code or exception depending on the path + if (request.getURI().getPath().equals("/soe")) { + throw new SocketTimeoutException(); + } else if (request.getURI().getPath().equals("/coe")) { + throw new ConnectTimeoutException(); + } + int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); + StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, ""); + + CloseableHttpResponse httpResponse = new CloseableBasicHttpResponse(statusLine); + //return the same body that was sent + if (request instanceof HttpEntityEnclosingRequest) { + HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity(); + if (entity != null) { + assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); + httpResponse.setEntity(entity); + } + } + //return the same headers that were sent + httpResponse.setHeaders(request.getAllHeaders()); + return httpResponse; + } + }); + int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3); + defaultHeaders = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header-default" + (getRandom().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10); + defaultHeaders[i] = new BasicHeader(headerName, headerValue); + } + httpHost = new HttpHost("localhost", 9200); + failureListener = new TrackingFailureListener(); + restClient = RestClient.builder(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders) + .setFailureListener(failureListener).build(); + } + + /** + * Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client + */ + public void testInternalHttpRequest() throws Exception { + ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpUriRequest.class); + int times = 0; + for (String httpMethod : getHttpMethods()) { + HttpUriRequest expectedRequest = performRandomRequest(httpMethod); + verify(httpClient, times(++times)).execute(any(HttpHost.class), requestArgumentCaptor.capture()); + HttpUriRequest actualRequest = requestArgumentCaptor.getValue(); + assertEquals(expectedRequest.getURI(), actualRequest.getURI()); + assertEquals(expectedRequest.getClass(), actualRequest.getClass()); + assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders()); + if (expectedRequest instanceof HttpEntityEnclosingRequest) { + HttpEntity expectedEntity = ((HttpEntityEnclosingRequest) expectedRequest).getEntity(); + if (expectedEntity != null) { + HttpEntity actualEntity = ((HttpEntityEnclosingRequest) actualRequest).getEntity(); + assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); + } + } + } + } + + public void testSetHosts() throws IOException { + try { + restClient.setHosts((HttpHost[]) null); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try { + restClient.setHosts(); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try { + restClient.setHosts((HttpHost) null); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + try { + restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + } + + /** + * End to end test for ok status codes + */ + public void testOkStatusCodes() throws Exception { + for (String method : getHttpMethods()) { + for (int okStatusCode : getOkStatusCodes()) { + Response response = performRequest(method, "/" + okStatusCode); + assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); + } + } + failureListener.assertNotCalled(); + } + + /** + * End to end test for error status codes: they should cause an exception to be thrown, apart from 404 with HEAD requests + */ + public void testErrorStatusCodes() throws Exception { + for (String method : getHttpMethods()) { + //error status codes should cause an exception to be thrown + for (int errorStatusCode : getAllErrorStatusCodes()) { + try (Response response = performRequest(method, "/" + errorStatusCode)) { + if (method.equals("HEAD") && errorStatusCode == 404) { + //no exception gets thrown although we got a 404 + assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode)); + } else { + fail("request should have failed"); + } + } catch(ResponseException e) { + if (method.equals("HEAD") && errorStatusCode == 404) { + throw e; + } + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(errorStatusCode)); + } + if (errorStatusCode <= 500) { + failureListener.assertNotCalled(); + } else { + failureListener.assertCalled(httpHost); + } + } + } + } + + public void testIOExceptions() throws IOException { + for (String method : getHttpMethods()) { + //IOExceptions should be let bubble up + try { + performRequest(method, "/coe"); + fail("request should have failed"); + } catch(IOException e) { + assertThat(e, instanceOf(ConnectTimeoutException.class)); + } + failureListener.assertCalled(httpHost); + try { + performRequest(method, "/soe"); + fail("request should have failed"); + } catch(IOException e) { + assertThat(e, instanceOf(SocketTimeoutException.class)); + } + failureListener.assertCalled(httpHost); + } + } + + /** + * End to end test for request and response body. Exercises the mock http client ability to send back + * whatever body it has received. + */ + public void testBody() throws Exception { + String body = "{ \"field\": \"value\" }"; + StringEntity entity = new StringEntity(body); + for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) { + for (int okStatusCode : getOkStatusCodes()) { + try (Response response = restClient.performRequest(method, "/" + okStatusCode, + Collections.emptyMap(), entity)) { + assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); + assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); + } + } + for (int errorStatusCode : getAllErrorStatusCodes()) { + try { + restClient.performRequest(method, "/" + errorStatusCode, Collections.emptyMap(), entity); + fail("request should have failed"); + } catch(ResponseException e) { + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode)); + assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); + } + } + } + for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) { + try { + restClient.performRequest(method, "/" + randomStatusCode(getRandom()), Collections.emptyMap(), entity); + fail("request should have failed"); + } catch(UnsupportedOperationException e) { + assertThat(e.getMessage(), equalTo(method + " with body is not supported")); + } + } + } + + public void testNullHeaders() throws Exception { + String method = randomHttpMethod(getRandom()); + int statusCode = randomStatusCode(getRandom()); + try { + performRequest(method, "/" + statusCode, (Header[])null); + fail("request should have failed"); + } catch(NullPointerException e) { + assertEquals("request headers must not be null", e.getMessage()); + } + try { + performRequest(method, "/" + statusCode, (Header)null); + fail("request should have failed"); + } catch(NullPointerException e) { + assertEquals("request header must not be null", e.getMessage()); + } + } + + public void testNullParams() throws Exception { + String method = randomHttpMethod(getRandom()); + int statusCode = randomStatusCode(getRandom()); + try { + restClient.performRequest(method, "/" + statusCode, (Map)null); + fail("request should have failed"); + } catch(NullPointerException e) { + assertEquals("params must not be null", e.getMessage()); + } + try { + restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null); + fail("request should have failed"); + } catch(NullPointerException e) { + assertEquals("params must not be null", e.getMessage()); + } + } + + /** + * End to end test for request and response headers. Exercises the mock http client ability to send back + * whatever headers it has received. + */ + public void testHeaders() throws Exception { + for (String method : getHttpMethods()) { + Map expectedHeaders = new HashMap<>(); + for (Header defaultHeader : defaultHeaders) { + expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue()); + } + int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5); + Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header" + (getRandom().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10); + headers[i] = new BasicHeader(headerName, headerValue); + expectedHeaders.put(headerName, headerValue); + } + + int statusCode = randomStatusCode(getRandom()); + Response esResponse; + try (Response response = restClient.performRequest(method, "/" + statusCode, + Collections.emptyMap(), null, headers)) { + esResponse = response; + } catch(ResponseException e) { + esResponse = e.getResponse(); + } + assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); + for (Header responseHeader : esResponse.getHeaders()) { + String headerValue = expectedHeaders.remove(responseHeader.getName()); + assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue); + } + assertEquals("some headers that were sent weren't returned " + expectedHeaders, 0, expectedHeaders.size()); + } + } + + private HttpUriRequest performRandomRequest(String method) throws IOException, URISyntaxException { + String uriAsString = "/" + randomStatusCode(getRandom()); + URIBuilder uriBuilder = new URIBuilder(uriAsString); + Map params = Collections.emptyMap(); + boolean hasParams = randomBoolean(); + if (hasParams) { + int numParams = RandomInts.randomIntBetween(getRandom(), 1, 3); + params = new HashMap<>(numParams); + for (int i = 0; i < numParams; i++) { + String paramKey = "param-" + i; + String paramValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10); + params.put(paramKey, paramValue); + uriBuilder.addParameter(paramKey, paramValue); + } + } + URI uri = uriBuilder.build(); + + HttpUriRequest request; + switch(method) { + case "DELETE": + request = new HttpDeleteWithEntity(uri); + break; + case "GET": + request = new HttpGetWithEntity(uri); + break; + case "HEAD": + request = new HttpHead(uri); + break; + case "OPTIONS": + request = new HttpOptions(uri); + break; + case "PATCH": + request = new HttpPatch(uri); + break; + case "POST": + request = new HttpPost(uri); + break; + case "PUT": + request = new HttpPut(uri); + break; + case "TRACE": + request = new HttpTrace(uri); + break; + default: + throw new UnsupportedOperationException("method not supported: " + method); + } + + HttpEntity entity = null; + boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean(); + if (hasBody) { + entity = new StringEntity(RandomStrings.randomAsciiOfLengthBetween(getRandom(), 10, 100)); + ((HttpEntityEnclosingRequest) request).setEntity(entity); + } + + Header[] headers = new Header[0]; + for (Header defaultHeader : defaultHeaders) { + //default headers are expected but not sent for each request + request.setHeader(defaultHeader); + } + if (getRandom().nextBoolean()) { + int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5); + headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header" + (getRandom().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10); + BasicHeader basicHeader = new BasicHeader(headerName, headerValue); + headers[i] = basicHeader; + request.setHeader(basicHeader); + } + } + + try { + if (hasParams == false && hasBody == false && randomBoolean()) { + restClient.performRequest(method, uriAsString, headers); + } else if (hasBody == false && randomBoolean()) { + restClient.performRequest(method, uriAsString, params, headers); + } else { + restClient.performRequest(method, uriAsString, params, entity, headers); + } + } catch(ResponseException e) { + //all good + } + return request; + } + + private Response performRequest(String method, String endpoint, Header... headers) throws IOException { + switch(randomIntBetween(0, 2)) { + case 0: + return restClient.performRequest(method, endpoint, headers); + case 1: + return restClient.performRequest(method, endpoint, Collections.emptyMap(), headers); + case 2: + return restClient.performRequest(method, endpoint, Collections.emptyMap(), null, headers); + default: + throw new UnsupportedOperationException(); + } + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/TrackingFailureListener.java b/client/rest/src/test/java/org/elasticsearch/client/TrackingFailureListener.java new file mode 100644 index 00000000000..35842823923 --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/TrackingFailureListener.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; + +/** + * {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called + */ +class TrackingFailureListener extends RestClient.FailureListener { + private Set hosts = new HashSet<>(); + + @Override + public void onFailure(HttpHost host) throws IOException { + hosts.add(host); + } + + void assertCalled(HttpHost... hosts) { + assertEquals(hosts.length, this.hosts.size()); + assertThat(this.hosts, containsInAnyOrder(hosts)); + this.hosts.clear(); + } + + void assertNotCalled() { + assertEquals(0, hosts.size()); + } +} \ No newline at end of file diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle new file mode 100644 index 00000000000..7cf16ee85d8 --- /dev/null +++ b/client/sniffer/build.gradle @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.gradle.api.JavaVersion + +apply plugin: 'elasticsearch.build' +apply plugin: 'ru.vyarus.animalsniffer' + +targetCompatibility = JavaVersion.VERSION_1_7 +sourceCompatibility = JavaVersion.VERSION_1_7 + +group = 'org.elasticsearch.client' + +dependencies { + compile "org.elasticsearch.client:rest:${version}" + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + + testCompile "org.elasticsearch.client:test:${version}" + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + testCompile "org.elasticsearch:securemock:${versions.securemock}" + testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" + signature "org.codehaus.mojo.signature:java17:1.0@signature" +} + +forbiddenApisMain { + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +forbiddenApisTest { + //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +//JarHell is part of es core, which we don't want to pull in +jarHell.enabled=false + +namingConventions { + testClass = 'org.elasticsearch.client.RestClientTestCase' + //we don't have integration tests + skipIntegTestInDisguise = true +} + +dependencyLicenses { + dependencies = project.configurations.runtime.fileCollection { + it.group.startsWith('org.elasticsearch') == false + } +} + +thirdPartyAudit.excludes = [ + //commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'org.apache.log4j.Category', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority', + //commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener' +] diff --git a/client/sniffer/licenses/commons-codec-1.10.jar.sha1 b/client/sniffer/licenses/commons-codec-1.10.jar.sha1 new file mode 100644 index 00000000000..3fe8682a1b0 --- /dev/null +++ b/client/sniffer/licenses/commons-codec-1.10.jar.sha1 @@ -0,0 +1 @@ +4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/distribution/src/main/resources/LICENSE.txt b/client/sniffer/licenses/commons-codec-LICENSE.txt similarity index 100% rename from distribution/src/main/resources/LICENSE.txt rename to client/sniffer/licenses/commons-codec-LICENSE.txt diff --git a/client/sniffer/licenses/commons-codec-NOTICE.txt b/client/sniffer/licenses/commons-codec-NOTICE.txt new file mode 100644 index 00000000000..1da9af50f60 --- /dev/null +++ b/client/sniffer/licenses/commons-codec-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 b/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 00000000000..5b8f029e582 --- /dev/null +++ b/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/discovery-azure/LICENSE.txt b/client/sniffer/licenses/commons-logging-LICENSE.txt similarity index 100% rename from plugins/discovery-azure/LICENSE.txt rename to client/sniffer/licenses/commons-logging-LICENSE.txt diff --git a/client/sniffer/licenses/commons-logging-NOTICE.txt b/client/sniffer/licenses/commons-logging-NOTICE.txt new file mode 100644 index 00000000000..556bd03951d --- /dev/null +++ b/client/sniffer/licenses/commons-logging-NOTICE.txt @@ -0,0 +1,6 @@ +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/sniffer/licenses/httpclient-4.5.2.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.2.jar.sha1 new file mode 100644 index 00000000000..6937112a09f --- /dev/null +++ b/client/sniffer/licenses/httpclient-4.5.2.jar.sha1 @@ -0,0 +1 @@ +733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient-LICENSE.txt b/client/sniffer/licenses/httpclient-LICENSE.txt new file mode 100644 index 00000000000..32f01eda18f --- /dev/null +++ b/client/sniffer/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/sniffer/licenses/httpclient-NOTICE.txt b/client/sniffer/licenses/httpclient-NOTICE.txt new file mode 100644 index 00000000000..91e5c40c4c6 --- /dev/null +++ b/client/sniffer/licenses/httpclient-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/sniffer/licenses/httpcore-4.4.4.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.4.jar.sha1 new file mode 100644 index 00000000000..ef0c257e012 --- /dev/null +++ b/client/sniffer/licenses/httpcore-4.4.4.jar.sha1 @@ -0,0 +1 @@ +b31526a230871fbe285fbcbe2813f9c0839ae9b0 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-LICENSE.txt b/client/sniffer/licenses/httpcore-LICENSE.txt new file mode 100644 index 00000000000..32f01eda18f --- /dev/null +++ b/client/sniffer/licenses/httpcore-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/sniffer/licenses/httpcore-NOTICE.txt b/client/sniffer/licenses/httpcore-NOTICE.txt new file mode 100644 index 00000000000..91e5c40c4c6 --- /dev/null +++ b/client/sniffer/licenses/httpcore-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/sniffer/licenses/jackson-core-2.7.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.7.1.jar.sha1 new file mode 100644 index 00000000000..73831ed2d51 --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.7.1.jar.sha1 @@ -0,0 +1 @@ +4127b62db028f981e81caa248953c0899d720f98 \ No newline at end of file diff --git a/plugins/discovery-azure/licenses/jackson-LICENSE b/client/sniffer/licenses/jackson-core-LICENSE similarity index 100% rename from plugins/discovery-azure/licenses/jackson-LICENSE rename to client/sniffer/licenses/jackson-core-LICENSE diff --git a/plugins/discovery-azure/licenses/jackson-NOTICE b/client/sniffer/licenses/jackson-core-NOTICE similarity index 100% rename from plugins/discovery-azure/licenses/jackson-NOTICE rename to client/sniffer/licenses/jackson-core-NOTICE diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java new file mode 100644 index 00000000000..bfe21f5e7d1 --- /dev/null +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +/** + * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. + * Compatible with elasticsearch 5.x and 2.x. + */ +public class HostsSniffer { + + private static final Log logger = LogFactory.getLog(HostsSniffer.class); + + private final RestClient restClient; + private final Map sniffRequestParams; + private final Scheme scheme; + private final JsonFactory jsonFactory = new JsonFactory(); + + protected HostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) { + this.restClient = restClient; + this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeoutMillis + "ms"); + this.scheme = scheme; + } + + /** + * Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts + */ + public List sniffHosts() throws IOException { + try (Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams)) { + return readHosts(response.getEntity()); + } + } + + private List readHosts(HttpEntity entity) throws IOException { + try (InputStream inputStream = entity.getContent()) { + JsonParser parser = jsonFactory.createParser(inputStream); + if (parser.nextToken() != JsonToken.START_OBJECT) { + throw new IOException("expected data to start with an object"); + } + List hosts = new ArrayList<>(); + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if ("nodes".equals(parser.getCurrentName())) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + JsonToken token = parser.nextToken(); + assert token == JsonToken.START_OBJECT; + String nodeId = parser.getCurrentName(); + HttpHost sniffedHost = readHost(nodeId, parser, this.scheme); + if (sniffedHost != null) { + logger.trace("adding node [" + nodeId + "]"); + hosts.add(sniffedHost); + } + } + } else { + parser.skipChildren(); + } + } + } + return hosts; + } + } + + private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException { + HttpHost httpHost = null; + String fieldName = null; + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { + fieldName = parser.getCurrentName(); + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if ("http".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { + URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), + boundAddressAsURI.getScheme()); + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + parser.skipChildren(); + } + } + } else { + parser.skipChildren(); + } + } + } + //http section is not present if http is not enabled on the node, ignore such nodes + if (httpHost == null) { + logger.debug("skipping node [" + nodeId + "] with http disabled"); + return null; + } + return httpHost; + } + + /** + * Returns a new {@link Builder} to help with {@link HostsSniffer} creation. + */ + public static Builder builder(RestClient restClient) { + return new Builder(restClient); + } + + public enum Scheme { + HTTP("http"), HTTPS("https"); + + private final String name; + + Scheme(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + } + + /** + * HostsSniffer builder. Helps creating a new {@link HostsSniffer}. + */ + public static class Builder { + public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); + + private final RestClient restClient; + private long sniffRequestTimeoutMillis = DEFAULT_SNIFF_REQUEST_TIMEOUT; + private Scheme scheme = Scheme.HTTP; + + private Builder(RestClient restClient) { + Objects.requireNonNull(restClient, "restClient cannot be null"); + this.restClient = restClient; + } + + /** + * Sets the sniff request timeout (in milliseconds) to be passed in as a query string parameter to elasticsearch. + * Allows to halt the request without any failure, as only the nodes that have responded within this timeout will be returned. + */ + public Builder setSniffRequestTimeoutMillis(int sniffRequestTimeoutMillis) { + if (sniffRequestTimeoutMillis <= 0) { + throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0"); + } + this.sniffRequestTimeoutMillis = sniffRequestTimeoutMillis; + return this; + } + + /** + * Sets the scheme to associate sniffed nodes with (as it is not returned by elasticsearch) + */ + public Builder setScheme(Scheme scheme) { + Objects.requireNonNull(scheme, "scheme cannot be null"); + this.scheme = scheme; + return this; + } + + /** + * Creates a new {@link HostsSniffer} instance given the provided configuration + */ + public HostsSniffer build() { + return new HostsSniffer(restClient, sniffRequestTimeoutMillis, scheme); + } + } +} diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java new file mode 100644 index 00000000000..76350057141 --- /dev/null +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * {@link org.elasticsearch.client.RestClient.FailureListener} implementation that allows to perform + * sniffing on failure. Gets notified whenever a failure happens and uses a {@link Sniffer} instance + * to manually reload hosts and sets them back to the {@link RestClient}. The {@link Sniffer} instance + * needs to be lazily set through {@link #setSniffer(Sniffer)}. + */ +public class SniffOnFailureListener extends RestClient.FailureListener { + + private volatile Sniffer sniffer; + private final AtomicBoolean set; + + public SniffOnFailureListener() { + this.set = new AtomicBoolean(false); + } + + /** + * Sets the {@link Sniffer} instance used to perform sniffing + * @throws IllegalStateException if the sniffer was already set, as it can only be set once + */ + public void setSniffer(Sniffer sniffer) { + Objects.requireNonNull(sniffer, "sniffer must not be null"); + if (set.compareAndSet(false, true)) { + this.sniffer = sniffer; + } else { + throw new IllegalStateException("sniffer can only be set once"); + } + } + + @Override + public void onFailure(HttpHost host) throws IOException { + if (sniffer == null) { + throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); + } + //re-sniff immediately but take out the node that failed + sniffer.sniffOnFailure(host); + } +} diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java new file mode 100644 index 00000000000..74a28cdd222 --- /dev/null +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -0,0 +1,206 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Class responsible for sniffing nodes from an elasticsearch cluster and setting them to a provided instance of {@link RestClient}. + * Must be created via {@link Builder}, which allows to set all of the different options or rely on defaults. + * A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance. + * It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to + * {@link org.elasticsearch.client.RestClient.Builder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation + * needs to be lazily set to the previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}. + */ +public final class Sniffer implements Closeable { + + private static final Log logger = LogFactory.getLog(Sniffer.class); + + private final Task task; + + private Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) { + this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); + } + + /** + * Triggers a new sniffing round and explicitly takes out the failed host provided as argument + */ + public void sniffOnFailure(HttpHost failedHost) { + this.task.sniffOnFailure(failedHost); + } + + @Override + public void close() throws IOException { + task.shutdown(); + } + + private static class Task implements Runnable { + private final HostsSniffer hostsSniffer; + private final RestClient restClient; + + private final long sniffIntervalMillis; + private final long sniffAfterFailureDelayMillis; + private final ScheduledExecutorService scheduledExecutorService; + private final AtomicBoolean running = new AtomicBoolean(false); + private ScheduledFuture scheduledFuture; + + private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffIntervalMillis, long sniffAfterFailureDelayMillis) { + this.hostsSniffer = hostsSniffer; + this.restClient = restClient; + this.sniffIntervalMillis = sniffIntervalMillis; + this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis; + this.scheduledExecutorService = Executors.newScheduledThreadPool(1); + scheduleNextRun(0); + } + + synchronized void scheduleNextRun(long delayMillis) { + if (scheduledExecutorService.isShutdown() == false) { + try { + if (scheduledFuture != null) { + //regardless of when the next sniff is scheduled, cancel it and schedule a new one with updated delay + this.scheduledFuture.cancel(false); + } + logger.debug("scheduling next sniff in " + delayMillis + " ms"); + this.scheduledFuture = this.scheduledExecutorService.schedule(this, delayMillis, TimeUnit.MILLISECONDS); + } catch(Exception e) { + logger.error("error while scheduling next sniffer task", e); + } + } + } + + @Override + public void run() { + sniff(null, sniffIntervalMillis); + } + + void sniffOnFailure(HttpHost failedHost) { + sniff(failedHost, sniffAfterFailureDelayMillis); + } + + void sniff(HttpHost excludeHost, long nextSniffDelayMillis) { + if (running.compareAndSet(false, true)) { + try { + List sniffedHosts = hostsSniffer.sniffHosts(); + logger.debug("sniffed hosts: " + sniffedHosts); + if (excludeHost != null) { + sniffedHosts.remove(excludeHost); + } + if (sniffedHosts.isEmpty()) { + logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); + } else { + this.restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); + } + } catch (Exception e) { + logger.error("error while sniffing nodes", e); + } finally { + scheduleNextRun(nextSniffDelayMillis); + running.set(false); + } + } + } + + synchronized void shutdown() { + scheduledExecutorService.shutdown(); + try { + if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) { + return; + } + scheduledExecutorService.shutdownNow(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + } + + /** + * Returns a new {@link Builder} to help with {@link Sniffer} creation. + */ + public static Builder builder(RestClient restClient, HostsSniffer hostsSniffer) { + return new Builder(restClient, hostsSniffer); + } + + /** + * Sniffer builder. Helps creating a new {@link Sniffer}. + */ + public static final class Builder { + public static final long DEFAULT_SNIFF_INTERVAL = TimeUnit.MINUTES.toMillis(5); + public static final long DEFAULT_SNIFF_AFTER_FAILURE_DELAY = TimeUnit.MINUTES.toMillis(1); + + private final RestClient restClient; + private final HostsSniffer hostsSniffer; + private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL; + private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; + + /** + * Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch, + * and the + */ + private Builder(RestClient restClient, HostsSniffer hostsSniffer) { + Objects.requireNonNull(restClient, "restClient cannot be null"); + this.restClient = restClient; + Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null"); + this.hostsSniffer = hostsSniffer; + } + + /** + * Sets the interval between consecutive ordinary sniff executions in milliseconds. Will be honoured when + * sniffOnFailure is disabled or when there are no failures between consecutive sniff executions. + * @throws IllegalArgumentException if sniffIntervalMillis is not greater than 0 + */ + public Builder setSniffIntervalMillis(int sniffIntervalMillis) { + if (sniffIntervalMillis <= 0) { + throw new IllegalArgumentException("sniffIntervalMillis must be greater than 0"); + } + this.sniffIntervalMillis = sniffIntervalMillis; + return this; + } + + /** + * Sets the delay of a sniff execution scheduled after a failure (in milliseconds) + */ + public Builder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelayMillis) { + if (sniffAfterFailureDelayMillis <= 0) { + throw new IllegalArgumentException("sniffAfterFailureDelayMillis must be greater than 0"); + } + this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis; + return this; + } + + /** + * Creates the {@link Sniffer} based on the provided configuration. + */ + public Sniffer build() { + return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); + } + } +} diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java new file mode 100644 index 00000000000..c167a3a104b --- /dev/null +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientTestCase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +public class HostsSnifferBuilderTests extends RestClientTestCase { + + public void testBuild() throws Exception { + try { + HostsSniffer.builder(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "restClient cannot be null"); + } + + int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + + try (RestClient client = RestClient.builder(hosts).build()) { + try { + HostsSniffer.builder(client).setScheme(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "scheme cannot be null"); + } + + try { + HostsSniffer.builder(client).setSniffRequestTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0"); + } + + HostsSniffer.Builder builder = HostsSniffer.builder(client); + if (getRandom().nextBoolean()) { + builder.setScheme(RandomPicks.randomFrom(getRandom(), HostsSniffer.Scheme.values())); + } + if (getRandom().nextBoolean()) { + builder.setSniffRequestTimeoutMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); + } + assertNotNull(builder.build()); + } + } +} diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java new file mode 100644 index 00000000000..6e0c3a728d5 --- /dev/null +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -0,0 +1,276 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.Consts; +import org.apache.http.HttpHost; +import org.apache.http.client.methods.HttpGet; +import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.StringWriter; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + +//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes +@IgnoreJRERequirement +public class HostsSnifferTests extends RestClientTestCase { + + private int sniffRequestTimeout; + private HostsSniffer.Scheme scheme; + private SniffResponse sniffResponse; + private HttpServer httpServer; + + @Before + public void startHttpServer() throws IOException { + this.sniffRequestTimeout = RandomInts.randomIntBetween(getRandom(), 1000, 10000); + this.scheme = RandomPicks.randomFrom(getRandom(), HostsSniffer.Scheme.values()); + if (rarely()) { + this.sniffResponse = SniffResponse.buildFailure(); + } else { + this.sniffResponse = buildSniffResponse(scheme); + } + this.httpServer = createHttpServer(sniffResponse, sniffRequestTimeout); + this.httpServer.start(); + } + + @After + public void stopHttpServer() throws IOException { + httpServer.stop(0); + } + + public void testSniffNodes() throws IOException, URISyntaxException { + HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + try (RestClient restClient = RestClient.builder(httpHost).build()) { + HostsSniffer.Builder builder = HostsSniffer.builder(restClient).setSniffRequestTimeoutMillis(sniffRequestTimeout); + if (scheme != HostsSniffer.Scheme.HTTP || randomBoolean()) { + builder.setScheme(scheme); + } + HostsSniffer sniffer = builder.build(); + try { + List sniffedHosts = sniffer.sniffHosts(); + if (sniffResponse.isFailure) { + fail("sniffNodes should have failed"); + } + assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size())); + Iterator responseHostsIterator = sniffResponse.hosts.iterator(); + for (HttpHost sniffedHost : sniffedHosts) { + assertEquals(sniffedHost, responseHostsIterator.next()); + } + } catch(ResponseException e) { + Response response = e.getResponse(); + if (sniffResponse.isFailure) { + assertThat(e.getMessage(), containsString("GET " + httpHost + "/_nodes/http?timeout=" + sniffRequestTimeout + "ms")); + assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); + assertThat(response.getHost(), equalTo(httpHost)); + assertThat(response.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); + assertThat(response.getRequestLine().toString(), + equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1")); + } else { + fail("sniffNodes should have succeeded: " + response.getStatusLine()); + } + } + } + } + + private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeoutMillis) throws IOException { + HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.createContext("/_nodes/http", new ResponseHandler(sniffTimeoutMillis, sniffResponse)); + return httpServer; + } + + //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes + @IgnoreJRERequirement + private static class ResponseHandler implements HttpHandler { + private final int sniffTimeoutMillis; + private final SniffResponse sniffResponse; + + ResponseHandler(int sniffTimeoutMillis, SniffResponse sniffResponse) { + this.sniffTimeoutMillis = sniffTimeoutMillis; + this.sniffResponse = sniffResponse; + } + + @Override + public void handle(HttpExchange httpExchange) throws IOException { + if (httpExchange.getRequestMethod().equals(HttpGet.METHOD_NAME)) { + if (httpExchange.getRequestURI().getRawQuery().equals("timeout=" + sniffTimeoutMillis + "ms")) { + String nodesInfoBody = sniffResponse.nodesInfoBody; + httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length()); + try (OutputStream out = httpExchange.getResponseBody()) { + out.write(nodesInfoBody.getBytes(Consts.UTF_8)); + return; + } + } + } + httpExchange.sendResponseHeaders(404, 0); + httpExchange.close(); + } + } + + private static SniffResponse buildSniffResponse(HostsSniffer.Scheme scheme) throws IOException { + int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5); + List hosts = new ArrayList<>(numNodes); + JsonFactory jsonFactory = new JsonFactory(); + StringWriter writer = new StringWriter(); + JsonGenerator generator = jsonFactory.createGenerator(writer); + generator.writeStartObject(); + if (getRandom().nextBoolean()) { + generator.writeStringField("cluster_name", "elasticsearch"); + } + if (getRandom().nextBoolean()) { + generator.writeObjectFieldStart("bogus_object"); + generator.writeEndObject(); + } + generator.writeObjectFieldStart("nodes"); + for (int i = 0; i < numNodes; i++) { + String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10); + generator.writeObjectFieldStart(nodeId); + if (getRandom().nextBoolean()) { + generator.writeObjectFieldStart("bogus_object"); + generator.writeEndObject(); + } + if (getRandom().nextBoolean()) { + generator.writeArrayFieldStart("bogus_array"); + generator.writeStartObject(); + generator.writeEndObject(); + generator.writeEndArray(); + } + boolean isHttpEnabled = rarely() == false; + if (isHttpEnabled) { + String host = "host" + i; + int port = RandomInts.randomIntBetween(getRandom(), 9200, 9299); + HttpHost httpHost = new HttpHost(host, port, scheme.toString()); + hosts.add(httpHost); + generator.writeObjectFieldStart("http"); + if (getRandom().nextBoolean()) { + generator.writeArrayFieldStart("bound_address"); + generator.writeString("[fe80::1]:" + port); + generator.writeString("[::1]:" + port); + generator.writeString("127.0.0.1:" + port); + generator.writeEndArray(); + } + if (getRandom().nextBoolean()) { + generator.writeObjectFieldStart("bogus_object"); + generator.writeEndObject(); + } + generator.writeStringField("publish_address", httpHost.toHostString()); + if (getRandom().nextBoolean()) { + generator.writeNumberField("max_content_length_in_bytes", 104857600); + } + generator.writeEndObject(); + } + if (getRandom().nextBoolean()) { + String[] roles = {"master", "data", "ingest"}; + int numRoles = RandomInts.randomIntBetween(getRandom(), 0, 3); + Set nodeRoles = new HashSet<>(numRoles); + for (int j = 0; j < numRoles; j++) { + String role; + do { + role = RandomPicks.randomFrom(getRandom(), roles); + } while(nodeRoles.add(role) == false); + } + generator.writeArrayFieldStart("roles"); + for (String nodeRole : nodeRoles) { + generator.writeString(nodeRole); + } + generator.writeEndArray(); + } + int numAttributes = RandomInts.randomIntBetween(getRandom(), 0, 3); + Map attributes = new HashMap<>(numAttributes); + for (int j = 0; j < numAttributes; j++) { + attributes.put("attr" + j, "value" + j); + } + if (numAttributes > 0) { + generator.writeObjectFieldStart("attributes"); + } + for (Map.Entry entry : attributes.entrySet()) { + generator.writeStringField(entry.getKey(), entry.getValue()); + } + if (numAttributes > 0) { + generator.writeEndObject(); + } + generator.writeEndObject(); + } + generator.writeEndObject(); + generator.writeEndObject(); + generator.close(); + return SniffResponse.buildResponse(writer.toString(), hosts); + } + + private static class SniffResponse { + private final String nodesInfoBody; + private final int nodesInfoResponseCode; + private final List hosts; + private final boolean isFailure; + + SniffResponse(String nodesInfoBody, List hosts, boolean isFailure) { + this.nodesInfoBody = nodesInfoBody; + this.hosts = hosts; + this.isFailure = isFailure; + if (isFailure) { + this.nodesInfoResponseCode = randomErrorResponseCode(); + } else { + this.nodesInfoResponseCode = 200; + } + } + + static SniffResponse buildFailure() { + return new SniffResponse("", Collections.emptyList(), true); + } + + static SniffResponse buildResponse(String nodesInfoBody, List hosts) { + return new SniffResponse(nodesInfoBody, hosts, false); + } + } + + private static int randomErrorResponseCode() { + return RandomInts.randomIntBetween(getRandom(), 400, 599); + } +} diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsModule.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java similarity index 65% rename from core/src/main/java/org/elasticsearch/plugins/PluginsModule.java rename to client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java index 04e468cdd6c..bdc052d07c8 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsModule.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java @@ -17,20 +17,23 @@ * under the License. */ -package org.elasticsearch.plugins; +package org.elasticsearch.client.sniff; -import org.elasticsearch.common.inject.AbstractModule; +import org.apache.http.HttpHost; -public class PluginsModule extends AbstractModule { +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; - private final PluginsService pluginsService; - - public PluginsModule(PluginsService pluginsService) { - this.pluginsService = pluginsService; +class MockHostsSniffer extends HostsSniffer { + MockHostsSniffer() { + super(null, -1, null); } @Override - protected void configure() { - bind(PluginsService.class).toInstance(pluginsService); + public List sniffHosts() throws IOException { + List hosts = new ArrayList<>(); + hosts.add(new HttpHost("localhost", 9200)); + return hosts; } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java new file mode 100644 index 00000000000..6a71d72f60e --- /dev/null +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientTestCase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class SniffOnFailureListenerTests extends RestClientTestCase { + + public void testSetSniffer() throws Exception { + SniffOnFailureListener listener = new SniffOnFailureListener(); + + try { + listener.onFailure(null); + fail("should have failed"); + } catch(IllegalStateException e) { + assertEquals("sniffer was not set, unable to sniff on failure", e.getMessage()); + } + + try { + listener.setSniffer(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals("sniffer must not be null", e.getMessage()); + } + + RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build(); + try (Sniffer sniffer = Sniffer.builder(restClient, new MockHostsSniffer()).build()) { + listener.setSniffer(sniffer); + try { + listener.setSniffer(sniffer); + fail("should have failed"); + } catch(IllegalStateException e) { + assertEquals("sniffer can only be set once", e.getMessage()); + } + listener.onFailure(new HttpHost("localhost", 9200)); + } + } +} diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java new file mode 100644 index 00000000000..defa83554a4 --- /dev/null +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientTestCase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +public class SnifferBuilderTests extends RestClientTestCase { + + public void testBuild() throws Exception { + int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + + HostsSniffer hostsSniffer = new MockHostsSniffer(); + + try (RestClient client = RestClient.builder(hosts).build()) { + try { + Sniffer.builder(null, hostsSniffer).build(); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals("restClient cannot be null", e.getMessage()); + } + + try { + Sniffer.builder(client, null).build(); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals("hostsSniffer cannot be null", e.getMessage()); + } + + try { + Sniffer.builder(client, hostsSniffer) + .setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("sniffIntervalMillis must be greater than 0", e.getMessage()); + } + + try { + Sniffer.builder(client, hostsSniffer) + .setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage()); + } + + try (Sniffer sniffer = Sniffer.builder(client, hostsSniffer).build()) { + assertNotNull(sniffer); + } + + Sniffer.Builder builder = Sniffer.builder(client, hostsSniffer); + if (getRandom().nextBoolean()) { + builder.setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); + } + if (getRandom().nextBoolean()) { + builder.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); + } + try (Sniffer sniffer = builder.build()) { + assertNotNull(sniffer); + } + } + } +} diff --git a/client/test/build.gradle b/client/test/build.gradle new file mode 100644 index 00000000000..05d044504ec --- /dev/null +++ b/client/test/build.gradle @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.gradle.api.JavaVersion + +apply plugin: 'elasticsearch.build' +apply plugin: 'ru.vyarus.animalsniffer' + +targetCompatibility = JavaVersion.VERSION_1_7 +sourceCompatibility = JavaVersion.VERSION_1_7 + +install.enabled = false +uploadArchives.enabled = false + +dependencies { + compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + compile "junit:junit:${versions.junit}" + compile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + compile "org.codehaus.mojo:animal-sniffer-annotations:1.15" + signature "org.codehaus.mojo.signature:java17:1.0@signature" +} + +forbiddenApisMain { + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +forbiddenApisTest { + //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +//JarHell is part of es core, which we don't want to pull in +jarHell.enabled=false + +// TODO: should we have licenses for our test deps? +dependencyLicenses.enabled = false + +namingConventions.enabled = false + +//we aren't releasing this jar +thirdPartyAudit.enabled = false +test.enabled = false \ No newline at end of file diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java new file mode 100644 index 00000000000..8c506beb5ac --- /dev/null +++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.JUnit3MethodProvider; +import com.carrotsearch.randomizedtesting.MixWithSuiteName; +import com.carrotsearch.randomizedtesting.RandomizedTest; +import com.carrotsearch.randomizedtesting.annotations.SeedDecorators; +import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; + +@TestMethodProviders({ + JUnit3MethodProvider.class +}) +@SeedDecorators({MixWithSuiteName.class}) // See LUCENE-3995 for rationale. +@ThreadLeakScope(ThreadLeakScope.Scope.SUITE) +@ThreadLeakGroup(ThreadLeakGroup.Group.MAIN) +@ThreadLeakAction({ThreadLeakAction.Action.WARN, ThreadLeakAction.Action.INTERRUPT}) +@ThreadLeakZombies(ThreadLeakZombies.Consequence.IGNORE_REMAINING_TESTS) +@ThreadLeakLingering(linger = 5000) // 5 sec lingering +@TimeoutSuite(millis = 2 * 60 * 60 * 1000) +public abstract class RestClientTestCase extends RandomizedTest { + +} diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java new file mode 100644 index 00000000000..4d4aa00f492 --- /dev/null +++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +final class RestClientTestUtil { + + private static final String[] HTTP_METHODS = new String[]{"DELETE", "HEAD", "GET", "OPTIONS", "PATCH", "POST", "PUT", "TRACE"}; + private static final List ALL_STATUS_CODES; + private static final List OK_STATUS_CODES = Arrays.asList(200, 201); + private static final List ALL_ERROR_STATUS_CODES; + private static List ERROR_NO_RETRY_STATUS_CODES = Arrays.asList(400, 401, 403, 404, 405, 500); + private static List ERROR_RETRY_STATUS_CODES = Arrays.asList(502, 503, 504); + + static { + ALL_ERROR_STATUS_CODES = new ArrayList<>(ERROR_RETRY_STATUS_CODES); + ALL_ERROR_STATUS_CODES.addAll(ERROR_NO_RETRY_STATUS_CODES); + ALL_STATUS_CODES = new ArrayList<>(ALL_ERROR_STATUS_CODES); + ALL_STATUS_CODES.addAll(OK_STATUS_CODES); + } + + private RestClientTestUtil() { + + } + + static String[] getHttpMethods() { + return HTTP_METHODS; + } + + static String randomHttpMethod(Random random) { + return RandomPicks.randomFrom(random, HTTP_METHODS); + } + + static int randomStatusCode(Random random) { + return RandomPicks.randomFrom(random, ALL_ERROR_STATUS_CODES); + } + + static int randomOkStatusCode(Random random) { + return RandomPicks.randomFrom(random, OK_STATUS_CODES); + } + + static int randomErrorNoRetryStatusCode(Random random) { + return RandomPicks.randomFrom(random, ERROR_NO_RETRY_STATUS_CODES); + } + + static int randomErrorRetryStatusCode(Random random) { + return RandomPicks.randomFrom(random, ERROR_RETRY_STATUS_CODES); + } + + static List getOkStatusCodes() { + return OK_STATUS_CODES; + } + + static List getAllErrorStatusCodes() { + return ALL_ERROR_STATUS_CODES; + } + + static List getAllStatusCodes() { + return ALL_STATUS_CODES; + } +} diff --git a/core/README.textile b/core/README.textile deleted file mode 100644 index daaf5ecb70e..00000000000 --- a/core/README.textile +++ /dev/null @@ -1,235 +0,0 @@ -h1. Elasticsearch - -h2. A Distributed RESTful Search Engine - -h3. "https://www.elastic.co/products/elasticsearch":https://www.elastic.co/products/elasticsearch - -Elasticsearch is a distributed RESTful search engine built for the cloud. Features include: - -* Distributed and Highly Available Search Engine. -** Each index is fully sharded with a configurable number of shards. -** Each shard can have one or more replicas. -** Read / Search operations performed on either one of the replica shard. -* Multi Tenant with Multi Types. -** Support for more than one index. -** Support for more than one type per index. -** Index level configuration (number of shards, index storage, ...). -* Various set of APIs -** HTTP RESTful API -** Native Java API. -** All APIs perform automatic node operation rerouting. -* Document oriented -** No need for upfront schema definition. -** Schema can be defined per type for customization of the indexing process. -* Reliable, Asynchronous Write Behind for long term persistency. -* (Near) Real Time Search. -* Built on top of Lucene -** Each shard is a fully functional Lucene index -** All the power of Lucene easily exposed through simple configuration / plugins. -* Per operation consistency -** Single document level operations are atomic, consistent, isolated and durable. -* Open Source under the Apache License, version 2 ("ALv2") - -h2. Getting Started - -First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about. - -h3. Requirements - -You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information. - -h3. Installation - -* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution. -* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows. -* Run @curl -X GET http://localhost:9200/@. -* Start more servers ... - -h3. Indexing - -Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically): - -
-curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
-
-curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
-{
-    "user": "kimchy",
-    "postDate": "2009-11-15T13:12:00",
-    "message": "Trying out Elasticsearch, so far so good?"
-}'
-
-curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
-{
-    "user": "kimchy",
-    "postDate": "2009-11-15T14:12:12",
-    "message": "Another tweet, will it be indexed?"
-}'
-
- -Now, let's see if the information was added by GETting it: - -
-curl -XGET 'http://localhost:9200/twitter/user/kimchy?pretty=true'
-curl -XGET 'http://localhost:9200/twitter/tweet/1?pretty=true'
-curl -XGET 'http://localhost:9200/twitter/tweet/2?pretty=true'
-
- -h3. Searching - -Mmm search..., shouldn't it be elastic? -Let's find all the tweets that @kimchy@ posted: - -
-curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy&pretty=true'
-
- -We can also use the JSON query language Elasticsearch provides instead of a query string: - -
-curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d '
-{
-    "query" : {
-        "match" : { "user": "kimchy" }
-    }
-}'
-
- -Just for kicks, let's get all the documents stored (we should see the user as well): - -
-curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
-{
-    "query" : {
-        "matchAll" : {}
-    }
-}'
-
- -We can also do range search (the @postDate@ was automatically identified as date) - -
-curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
-{
-    "query" : {
-        "range" : {
-            "postDate" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" }
-        }
-    }
-}'
-
- -There are many more options to perform search, after all, it's a search product no? All the familiar Lucene queries are available through the JSON query language, or through the query parser. - -h3. Multi Tenant - Indices and Types - -Maan, that twitter index might get big (in this case, index size == valuation). Let's see if we can structure our twitter system a bit differently in order to support such large amounts of data. - -Elasticsearch supports multiple indices, as well as multiple types per index. In the previous example we used an index called @twitter@, with two types, @user@ and @tweet@. - -Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case: - -
-curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
-
-curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
-{
-    "user": "kimchy",
-    "postDate": "2009-11-15T13:12:00",
-    "message": "Trying out Elasticsearch, so far so good?"
-}'
-
-curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
-{
-    "user": "kimchy",
-    "postDate": "2009-11-15T14:12:12",
-    "message": "Another tweet, will it be indexed?"
-}'
-
- -The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index. - -Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well): - -
-curl -XPUT http://localhost:9200/another_user/ -d '
-{
-    "index" : {
-        "numberOfShards" : 1,
-        "numberOfReplicas" : 1
-    }
-}'
-
- -Search (and similar operations) are multi index aware. This means that we can easily search on more than one -index (twitter user), for example: - -
-curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
-{
-    "query" : {
-        "matchAll" : {}
-    }
-}'
-
- -Or on all the indices: - -
-curl -XGET 'http://localhost:9200/_search?pretty=true' -d '
-{
-    "query" : {
-        "matchAll" : {}
-    }
-}'
-
- -{One liner teaser}: And the cool part about that? You can easily search on multiple twitter users (indices), with different boost levels per user (index), making social search so much simpler (results from my friends rank higher than results from friends of my friends). - -h3. Distributed, Highly Available - -Let's face it, things will fail.... - -Elasticsearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more replica. By default, an index is created with 5 shards and 1 replica per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards). - -In order to play with the distributed nature of Elasticsearch, simply bring more nodes up and shut down nodes. The system will continue to serve requests (make sure you use the correct http port) with the latest data indexed. - -h3. Where to go from here? - -We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website. - -h3. Building from Source - -Elasticsearch uses "Maven":http://maven.apache.org for its build system. - -In order to create a distribution, simply run the @mvn clean package --DskipTests@ command in the cloned directory. - -The distribution will be created under @target/releases@. - -See the "TESTING":TESTING.asciidoc file for more information about -running the Elasticsearch test suite. - -h3. Upgrading to Elasticsearch 1.x? - -In order to ensure a smooth upgrade process from earlier versions of Elasticsearch (< 1.0.0), it is recommended to perform a full cluster restart. Please see the "setup reference":https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process. - -h1. License - -
-This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
-
-Copyright 2009-2016 Elasticsearch 
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not
-use this file except in compliance with the License. You may obtain a copy of
-the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations under
-the License.
-
diff --git a/core/build.gradle b/core/build.gradle index ab3754e72ff..6fd8c62af3e 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -24,6 +24,16 @@ import org.elasticsearch.gradle.BuildPlugin apply plugin: 'elasticsearch.build' apply plugin: 'com.bmuschko.nexus' apply plugin: 'nebula.optional-base' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +publishing { + publications { + nebula { + artifactId 'elasticsearch' + } + } +} archivesBaseName = 'elasticsearch' @@ -46,14 +56,14 @@ dependencies { compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}" compile "org.apache.lucene:lucene-suggest:${versions.lucene}" - compile 'org.elasticsearch:securesm:1.0' + compile 'org.elasticsearch:securesm:1.1' // utilities - compile 'net.sf.jopt-simple:jopt-simple:4.9' + compile 'net.sf.jopt-simple:jopt-simple:5.0.2' compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time - compile 'joda-time:joda-time:2.8.2' + compile 'joda-time:joda-time:2.9.4' // joda 2.0 moved to using volatile fields for datetime // When updating to a new version, make sure to update our copy of BaseDateTime compile 'org.joda:joda-convert:1.2' @@ -65,7 +75,7 @@ dependencies { compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" // network stack - compile 'io.netty:netty:3.10.5.Final' + compile 'io.netty:netty:3.10.6.Final' // percentiles aggregation compile 'com.tdunning:t-digest:3.0' // precentil ranks aggregation @@ -79,7 +89,7 @@ dependencies { compile "log4j:log4j:${versions.log4j}", optional compile "log4j:apache-log4j-extras:${versions.log4j}", optional - compile "net.java.dev.jna:jna:${versions.jna}", optional + compile "net.java.dev.jna:jna:${versions.jna}" if (isEclipse == false || project.path == ":core-tests") { testCompile("org.elasticsearch.test:framework:${version}") { @@ -111,6 +121,36 @@ forbiddenPatterns { exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt' } +task generateModulesList { + List modules = project(':modules').subprojects.collect { it.name } + File modulesFile = new File(buildDir, 'generated-resources/modules.txt') + processResources.from(modulesFile) + inputs.property('modules', modules) + outputs.file(modulesFile) + doLast { + modulesFile.parentFile.mkdirs() + modulesFile.setText(modules.join('\n'), 'UTF-8') + } +} + +task generatePluginsList { + List plugins = project(':plugins').subprojects + .findAll { it.name.contains('example') == false } + .collect { it.name } + File pluginsFile = new File(buildDir, 'generated-resources/plugins.txt') + processResources.from(pluginsFile) + inputs.property('plugins', plugins) + outputs.file(pluginsFile) + doLast { + pluginsFile.parentFile.mkdirs() + pluginsFile.setText(plugins.join('\n'), 'UTF-8') + } +} + +processResources { + dependsOn generateModulesList, generatePluginsList +} + thirdPartyAudit.excludes = [ // uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) 'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryParseElement.java b/core/src/main/java/org/apache/log4j/Java9Hack.java similarity index 63% rename from core/src/main/java/org/elasticsearch/search/query/QueryParseElement.java rename to core/src/main/java/org/apache/log4j/Java9Hack.java index 094a29cd6b1..831cf5b35ae 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryParseElement.java +++ b/core/src/main/java/org/apache/log4j/Java9Hack.java @@ -17,19 +17,21 @@ * under the License. */ -package org.elasticsearch.search.query; +package org.apache.log4j; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.SearchParseElement; -import org.elasticsearch.search.internal.SearchContext; +import org.apache.log4j.helpers.ThreadLocalMap; /** + * Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning). * + * This hack fixes up the pkg private members as if it had detected the java version correctly. */ -public class QueryParseElement implements SearchParseElement { +public class Java9Hack { - @Override - public void parse(XContentParser parser, SearchContext context) throws Exception { - context.parsedQuery(context.getQueryShardContext().parse(parser)); + public static void fixLog4j() { + if (MDC.mdc.tlm == null) { + MDC.mdc.java1 = false; + MDC.mdc.tlm = new ThreadLocalMap(); + } } } diff --git a/core/src/main/java/org/apache/lucene/document/XInetAddressPoint.java b/core/src/main/java/org/apache/lucene/document/XInetAddressPoint.java deleted file mode 100644 index 580b875ce2c..00000000000 --- a/core/src/main/java/org/apache/lucene/document/XInetAddressPoint.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.lucene.document; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Arrays; - -import org.apache.lucene.search.Query; -import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.common.SuppressForbidden; - -/** - * Forked utility methods from Lucene's InetAddressPoint until LUCENE-7232 and - * LUCENE-7234 are released. - */ -// TODO: remove me when we upgrade to Lucene 6.1 -@SuppressForbidden(reason="uses InetAddress.getHostAddress") -public final class XInetAddressPoint { - - private XInetAddressPoint() {} - - /** The minimum value that an ip address can hold. */ - public static final InetAddress MIN_VALUE; - /** The maximum value that an ip address can hold. */ - public static final InetAddress MAX_VALUE; - static { - MIN_VALUE = InetAddressPoint.decode(new byte[InetAddressPoint.BYTES]); - byte[] maxValueBytes = new byte[InetAddressPoint.BYTES]; - Arrays.fill(maxValueBytes, (byte) 0xFF); - MAX_VALUE = InetAddressPoint.decode(maxValueBytes); - } - - /** - * Return the {@link InetAddress} that compares immediately greater than - * {@code address}. - * @throws ArithmeticException if the provided address is the - * {@link #MAX_VALUE maximum ip address} - */ - public static InetAddress nextUp(InetAddress address) { - if (address.equals(MAX_VALUE)) { - throw new ArithmeticException("Overflow: there is no greater InetAddress than " - + address.getHostAddress()); - } - byte[] delta = new byte[InetAddressPoint.BYTES]; - delta[InetAddressPoint.BYTES-1] = 1; - byte[] nextUpBytes = new byte[InetAddressPoint.BYTES]; - NumericUtils.add(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextUpBytes); - return InetAddressPoint.decode(nextUpBytes); - } - - /** - * Return the {@link InetAddress} that compares immediately less than - * {@code address}. - * @throws ArithmeticException if the provided address is the - * {@link #MIN_VALUE minimum ip address} - */ - public static InetAddress nextDown(InetAddress address) { - if (address.equals(MIN_VALUE)) { - throw new ArithmeticException("Underflow: there is no smaller InetAddress than " - + address.getHostAddress()); - } - byte[] delta = new byte[InetAddressPoint.BYTES]; - delta[InetAddressPoint.BYTES-1] = 1; - byte[] nextDownBytes = new byte[InetAddressPoint.BYTES]; - NumericUtils.subtract(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextDownBytes); - return InetAddressPoint.decode(nextDownBytes); - } - - /** - * Create a prefix query for matching a CIDR network range. - * - * @param field field name. must not be {@code null}. - * @param value any host address - * @param prefixLength the network prefix length for this address. This is also known as the subnet mask in the context of IPv4 - * addresses. - * @throws IllegalArgumentException if {@code field} is null, or prefixLength is invalid. - * @return a query matching documents with addresses contained within this network - */ - // TODO: remove me when we upgrade to Lucene 6.0.1 - public static Query newPrefixQuery(String field, InetAddress value, int prefixLength) { - if (value == null) { - throw new IllegalArgumentException("InetAddress must not be null"); - } - if (prefixLength < 0 || prefixLength > 8 * value.getAddress().length) { - throw new IllegalArgumentException("illegal prefixLength '" + prefixLength - + "'. Must be 0-32 for IPv4 ranges, 0-128 for IPv6 ranges"); - } - // create the lower value by zeroing out the host portion, upper value by filling it with all ones. - byte lower[] = value.getAddress(); - byte upper[] = value.getAddress(); - for (int i = prefixLength; i < 8 * lower.length; i++) { - int m = 1 << (7 - (i & 7)); - lower[i >> 3] &= ~m; - upper[i >> 3] |= m; - } - try { - return InetAddressPoint.newRangeQuery(field, InetAddress.getByAddress(lower), InetAddress.getByAddress(upper)); - } catch (UnknownHostException e) { - throw new AssertionError(e); // values are coming from InetAddress - } - } -} diff --git a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index 564f780b8ed..a4b94b007fd 100644 --- a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -283,7 +283,7 @@ public abstract class BlendedTermQuery extends Query { @Override public boolean equals(Object o) { if (this == o) return true; - if (!super.equals(o)) return false; + if (sameClassAs(o) == false) return false; BlendedTermQuery that = (BlendedTermQuery) o; return Arrays.equals(equalsTerms(), that.equalsTerms()); @@ -291,7 +291,7 @@ public abstract class BlendedTermQuery extends Query { @Override public int hashCode() { - return Objects.hash(super.hashCode(), Arrays.hashCode(equalsTerms())); + return Objects.hash(classHash(), Arrays.hashCode(equalsTerms())); } public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final boolean disableCoord) { diff --git a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java index 86982bfc949..a8b7dc9299f 100644 --- a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java @@ -44,12 +44,12 @@ public final class MinDocQuery extends Query { @Override public int hashCode() { - return Objects.hash(super.hashCode(), minDoc); + return Objects.hash(classHash(), minDoc); } @Override public boolean equals(Object obj) { - if (super.equals(obj) == false) { + if (sameClassAs(obj) == false) { return false; } MinDocQuery that = (MinDocQuery) obj; diff --git a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java index a9327d785e1..c65f962dbb8 100644 --- a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java +++ b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java @@ -63,9 +63,6 @@ import org.elasticsearch.common.io.PathUtils; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -622,8 +619,12 @@ public long ramBytesUsed() { Set seenSurfaceForms = new HashSet<>(); int dedup = 0; - while (reader.read(scratch)) { - input.reset(scratch.bytes(), 0, scratch.length()); + while (true) { + BytesRef bytes = reader.next(); + if (bytes == null) { + break; + } + input.reset(bytes.bytes, bytes.offset, bytes.length); short analyzedLength = input.readShort(); analyzed.grow(analyzedLength+2); input.readBytes(analyzed.bytes(), 0, analyzedLength); @@ -631,13 +632,13 @@ public long ramBytesUsed() { long cost = input.readInt(); - surface.bytes = scratch.bytes(); + surface.bytes = bytes.bytes; if (hasPayloads) { surface.length = input.readShort(); surface.offset = input.getPosition(); } else { surface.offset = input.getPosition(); - surface.length = scratch.length() - surface.offset; + surface.length = bytes.length - surface.offset; } if (previousAnalyzed == null) { @@ -679,11 +680,11 @@ public long ramBytesUsed() { builder.add(scratchInts.get(), outputs.newPair(cost, BytesRef.deepCopyOf(surface))); } else { int payloadOffset = input.getPosition() + surface.length; - int payloadLength = scratch.length() - payloadOffset; + int payloadLength = bytes.length - payloadOffset; BytesRef br = new BytesRef(surface.length + 1 + payloadLength); System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length); br.bytes[surface.length] = (byte) payloadSep; - System.arraycopy(scratch.bytes(), payloadOffset, br.bytes, surface.length+1, payloadLength); + System.arraycopy(bytes.bytes, payloadOffset, br.bytes, surface.length+1, payloadLength); br.length = br.bytes.length; builder.add(scratchInts.get(), outputs.newPair(cost, br)); } @@ -1109,7 +1110,7 @@ public long ramBytesUsed() { this.analyzed.copyBytes(analyzed); } - private final static class SurfaceFormAndPayload implements Comparable { + private static final class SurfaceFormAndPayload implements Comparable { BytesRef payload; long weight; diff --git a/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java b/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java index ea504f7688c..e1ae7b938b3 100644 --- a/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java +++ b/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; */ public class StoreRateLimiting { - public static interface Provider { + public interface Provider { StoreRateLimiting rateLimiting(); } diff --git a/core/src/main/java/org/elasticsearch/Build.java b/core/src/main/java/org/elasticsearch/Build.java index f844e3b4040..25da5f28166 100644 --- a/core/src/main/java/org/elasticsearch/Build.java +++ b/core/src/main/java/org/elasticsearch/Build.java @@ -19,16 +19,11 @@ package org.elasticsearch; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; -import java.net.URISyntaxException; import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.jar.JarInputStream; import java.util.jar.Manifest; @@ -47,9 +42,9 @@ public class Build { final String date; final boolean isSnapshot; - Path path = getElasticsearchCodebase(); - if (path.toString().endsWith(".jar")) { - try (JarInputStream jar = new JarInputStream(Files.newInputStream(path))) { + final URL url = getElasticsearchCodebase(); + if (url.toString().endsWith(".jar")) { + try (JarInputStream jar = new JarInputStream(url.openStream())) { Manifest manifest = jar.getManifest(); shortHash = manifest.getMainAttributes().getValue("Change"); date = manifest.getMainAttributes().getValue("Build-Date"); @@ -80,14 +75,8 @@ public class Build { /** * Returns path to elasticsearch codebase path */ - @SuppressForbidden(reason = "looks up path of elasticsearch.jar directly") - static Path getElasticsearchCodebase() { - URL url = Build.class.getProtectionDomain().getCodeSource().getLocation(); - try { - return PathUtils.get(url.toURI()); - } catch (URISyntaxException bogus) { - throw new RuntimeException(bogus); - } + static URL getElasticsearchCodebase() { + return Build.class.getProtectionDomain().getCodeSource().getLocation(); } private String shortHash; diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index 3332bfed0c3..54bbfc851d2 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -21,18 +21,18 @@ package org.elasticsearch; import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TcpTransport; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -47,7 +47,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VAL /** * A base class for all elasticsearch exceptions. */ -public class ElasticsearchException extends RuntimeException implements ToXContent { +public class ElasticsearchException extends RuntimeException implements ToXContent, Writeable { public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip"; @@ -99,18 +99,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte } public ElasticsearchException(StreamInput in) throws IOException { - super(in.readOptionalString(), in.readThrowable()); + super(in.readOptionalString(), in.readException()); readStackTrace(this, in); - int numKeys = in.readVInt(); - for (int i = 0; i < numKeys; i++) { - final String key = in.readString(); - final int numValues = in.readVInt(); - final ArrayList values = new ArrayList<>(numValues); - for (int j = 0; j < numValues; j++) { - values.add(in.readString()); - } - headers.put(key, values); - } + headers.putAll(in.readMapOfLists()); } /** @@ -161,7 +152,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte * Unwraps the actual cause from the exception for cases when the exception is a * {@link ElasticsearchWrapperException}. * - * @see org.elasticsearch.ExceptionsHelper#unwrapCause(Throwable) + * @see ExceptionsHelper#unwrapCause(Throwable) */ public Throwable unwrapCause() { return ExceptionsHelper.unwrapCause(this); @@ -200,53 +191,12 @@ public class ElasticsearchException extends RuntimeException implements ToXConte return rootCause; } - /** - * Check whether this exception contains an exception of the given type: - * either it is of the given class itself or it contains a nested cause - * of the given type. - * - * @param exType the exception type to look for - * @return whether there is a nested exception of the specified type - */ - public boolean contains(Class exType) { - if (exType == null) { - return false; - } - if (exType.isInstance(this)) { - return true; - } - Throwable cause = getCause(); - if (cause == this) { - return false; - } - if (cause instanceof ElasticsearchException) { - return ((ElasticsearchException) cause).contains(exType); - } else { - while (cause != null) { - if (exType.isInstance(cause)) { - return true; - } - if (cause.getCause() == cause) { - break; - } - cause = cause.getCause(); - } - return false; - } - } - + @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(this.getMessage()); - out.writeThrowable(this.getCause()); + out.writeException(this.getCause()); writeStackTraces(this, out); - out.writeVInt(headers.size()); - for (Map.Entry> entry : headers.entrySet()) { - out.writeString(entry.getKey()); - out.writeVInt(entry.getValue().size()); - for (String v : entry.getValue()) { - out.writeString(v); - } - } + out.writeMapOfLists(headers); } public static ElasticsearchException readException(StreamInput input, int id) throws IOException { @@ -448,7 +398,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte int numSuppressed = in.readVInt(); for (int i = 0; i < numSuppressed; i++) { - throwable.addSuppressed(in.readThrowable()); + throwable.addSuppressed(in.readException()); } return throwable; } @@ -468,7 +418,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte Throwable[] suppressed = throwable.getSuppressed(); out.writeVInt(suppressed.length); for (Throwable t : suppressed) { - out.writeThrowable(t); + out.writeException(t); } return throwable; } @@ -530,7 +480,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.index.shard.IndexShardStartedException::new, 23), SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class, org.elasticsearch.search.SearchContextMissingException::new, 24), - SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25), + GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class, + org.elasticsearch.script.GeneralScriptException::new, 25), BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class, org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26), SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class, @@ -679,8 +630,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106), REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class, org.elasticsearch.repositories.RepositoryMissingException::new, 107), - PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class, - org.elasticsearch.index.percolator.PercolatorException::new, 108), DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class, org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109), FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class, @@ -711,8 +660,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.indices.IndexAlreadyExistsException::new, 123), SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class, org.elasticsearch.script.Script.ScriptParseException::new, 124), - HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class, - org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125), + HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class, + TcpTransport.HttpOnTransportException::new, 125), MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class, org.elasticsearch.index.mapper.MapperParsingException::new, 126), SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class, @@ -742,7 +691,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class, org.elasticsearch.index.query.QueryShardException::new, 141), NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class, - ShardStateAction.NoLongerPrimaryShardException::new, 142); + ShardStateAction.NoLongerPrimaryShardException::new, 142), + SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143); final Class exceptionClass; @@ -827,9 +777,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte return null; } - public static void renderThrowable(XContentBuilder builder, Params params, Throwable t) throws IOException { + public static void renderException(XContentBuilder builder, Params params, Exception e) throws IOException { builder.startObject("error"); - final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t); + final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(e); builder.field("root_cause"); builder.startArray(); for (ElasticsearchException rootCause : rootCauses) { @@ -839,7 +789,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte builder.endObject(); } builder.endArray(); - ElasticsearchException.toXContent(builder, params, t); + ElasticsearchException.toXContent(builder, params, e); builder.endObject(); } diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchSecurityException.java b/core/src/main/java/org/elasticsearch/ElasticsearchSecurityException.java index f4878fe6f9b..b6cd420c856 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchSecurityException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchSecurityException.java @@ -36,7 +36,7 @@ public class ElasticsearchSecurityException extends ElasticsearchException { this.status = status ; } - public ElasticsearchSecurityException(String msg, Throwable cause, Object... args) { + public ElasticsearchSecurityException(String msg, Exception cause, Object... args) { this(msg, ExceptionsHelper.status(cause), cause, args); } diff --git a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java index 3842ab4e3bf..e2af52ccd2c 100644 --- a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -37,25 +37,22 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -/** - * - */ public final class ExceptionsHelper { private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class); - public static RuntimeException convertToRuntime(Throwable t) { - if (t instanceof RuntimeException) { - return (RuntimeException) t; + public static RuntimeException convertToRuntime(Exception e) { + if (e instanceof RuntimeException) { + return (RuntimeException) e; } - return new ElasticsearchException(t); + return new ElasticsearchException(e); } - public static ElasticsearchException convertToElastic(Throwable t) { - if (t instanceof ElasticsearchException) { - return (ElasticsearchException) t; + public static ElasticsearchException convertToElastic(Exception e) { + if (e instanceof ElasticsearchException) { + return (ElasticsearchException) e; } - return new ElasticsearchException(t); + return new ElasticsearchException(e); } public static RestStatus status(Throwable t) { @@ -89,15 +86,14 @@ public final class ExceptionsHelper { return result; } + /** + * @deprecated Don't swallow exceptions, allow them to propagate. + */ + @Deprecated public static String detailedMessage(Throwable t) { - return detailedMessage(t, false, 0); - } - - public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) { if (t == null) { return "Unknown"; } - int counter = initialCounter + 1; if (t.getCause() != null) { StringBuilder sb = new StringBuilder(); while (t != null) { @@ -107,21 +103,11 @@ public final class ExceptionsHelper { sb.append(t.getMessage()); sb.append("]"); } - if (!newLines) { - sb.append("; "); - } + sb.append("; "); t = t.getCause(); if (t != null) { - if (newLines) { - sb.append("\n"); - for (int i = 0; i < counter; i++) { - sb.append("\t"); - } - } else { - sb.append("nested: "); - } + sb.append("nested: "); } - counter++; } return sb.toString(); } else { @@ -175,8 +161,8 @@ public final class ExceptionsHelper { } public static IOException unwrapCorruption(Throwable t) { - return (IOException) unwrap(t, CorruptIndexException.class, - IndexFormatTooOldException.class, + return (IOException) unwrap(t, CorruptIndexException.class, + IndexFormatTooOldException.class, IndexFormatTooNewException.class); } @@ -220,7 +206,6 @@ public final class ExceptionsHelper { return true; } - /** * Deduplicate the failures by exception message and index. */ diff --git a/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java b/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java index d38de2e3bc1..d408fdef033 100644 --- a/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java +++ b/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java @@ -32,7 +32,7 @@ public class ResourceNotFoundException extends ElasticsearchException { super(msg, args); } - protected ResourceNotFoundException(String msg, Throwable cause, Object... args) { + public ResourceNotFoundException(String msg, Throwable cause, Object... args) { super(msg, cause, args); } diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 56d245ddc51..da876730b9a 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -22,7 +22,6 @@ package org.elasticsearch; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -32,7 +31,6 @@ import java.io.IOException; /** */ -@SuppressWarnings("deprecation") public class Version { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA @@ -69,11 +67,21 @@ public class Version { public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final int V_2_3_2_ID = 2030299; public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); + public static final int V_2_3_3_ID = 2030399; + public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); + public static final int V_2_3_4_ID = 2030499; + public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final int V_5_0_0_alpha1_ID = 5000001; public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final int V_5_0_0_alpha2_ID = 5000002; public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final Version CURRENT = V_5_0_0_alpha2; + public static final int V_5_0_0_alpha3_ID = 5000003; + public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); + public static final int V_5_0_0_alpha4_ID = 5000004; + public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); + public static final int V_5_0_0_alpha5_ID = 5000005; + public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0); + public static final Version CURRENT = V_5_0_0_alpha5; static { assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to [" @@ -86,10 +94,20 @@ public class Version { public static Version fromId(int id) { switch (id) { + case V_5_0_0_alpha5_ID: + return V_5_0_0_alpha5; + case V_5_0_0_alpha4_ID: + return V_5_0_0_alpha4; + case V_5_0_0_alpha3_ID: + return V_5_0_0_alpha3; case V_5_0_0_alpha2_ID: return V_5_0_0_alpha2; case V_5_0_0_alpha1_ID: return V_5_0_0_alpha1; + case V_2_3_4_ID: + return V_2_3_4; + case V_2_3_3_ID: + return V_2_3_3; case V_2_3_2_ID: return V_2_3_2; case V_2_3_1_ID: @@ -318,18 +336,4 @@ public class Version { public boolean isRC() { return build > 50 && build < 99; } - - public static class Module extends AbstractModule { - - private final Version version; - - public Module(Version version) { - this.version = version; - } - - @Override - protected void configure() { - bind(Version.class).toInstance(version); - } - } } diff --git a/core/src/main/java/org/elasticsearch/action/ActionListener.java b/core/src/main/java/org/elasticsearch/action/ActionListener.java index 8447d6cef08..e7d5ecd8d64 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/core/src/main/java/org/elasticsearch/action/ActionListener.java @@ -32,5 +32,5 @@ public interface ActionListener { /** * A failure caused by an exception at some phase of the task. */ - void onFailure(Throwable e); + void onFailure(Exception e); } diff --git a/core/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java b/core/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java index 6cdc1c3194f..e0e04652315 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java +++ b/core/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java @@ -20,7 +20,7 @@ package org.elasticsearch.action; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; @@ -31,7 +31,7 @@ import java.util.function.Supplier; * A simple base class for action response listeners, defaulting to using the SAME executor (as its * very common on response handlers). */ -public class ActionListenerResponseHandler extends BaseTransportResponseHandler { +public class ActionListenerResponseHandler implements TransportResponseHandler { private final ActionListener listener; private final Supplier responseSupplier; diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 3e93f699645..46494f4ebdc 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -19,6 +19,12 @@ package org.elasticsearch.action; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; @@ -32,6 +38,8 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; +import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction; +import org.elasticsearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; @@ -62,10 +70,14 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction; import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; -import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction; -import org.elasticsearch.action.admin.cluster.validate.template.TransportRenderSearchTemplateAction; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction; import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction; @@ -107,6 +119,8 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; import org.elasticsearch.action.admin.indices.segments.TransportIndicesSegmentsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; @@ -115,6 +129,8 @@ import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettin import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; +import org.elasticsearch.action.admin.indices.shrink.TransportShrinkAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; @@ -139,7 +155,7 @@ import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.TransportExplainAction; import org.elasticsearch.action.fieldstats.FieldStatsAction; -import org.elasticsearch.action.fieldstats.TransportFieldStatsTransportAction; +import org.elasticsearch.action.fieldstats.TransportFieldStatsAction; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.get.TransportGetAction; @@ -147,28 +163,18 @@ import org.elasticsearch.action.get.TransportMultiGetAction; import org.elasticsearch.action.get.TransportShardMultiGetAction; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.TransportIndexAction; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction; -import org.elasticsearch.action.ingest.IngestActionFilter; -import org.elasticsearch.action.ingest.IngestProxyActionFilter; import org.elasticsearch.action.ingest.DeletePipelineAction; import org.elasticsearch.action.ingest.DeletePipelineTransportAction; import org.elasticsearch.action.ingest.GetPipelineAction; import org.elasticsearch.action.ingest.GetPipelineTransportAction; +import org.elasticsearch.action.ingest.IngestActionFilter; +import org.elasticsearch.action.ingest.IngestProxyActionFilter; import org.elasticsearch.action.ingest.PutPipelineAction; import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulatePipelineAction; import org.elasticsearch.action.ingest.SimulatePipelineTransportAction; import org.elasticsearch.action.main.MainAction; import org.elasticsearch.action.main.TransportMainAction; -import org.elasticsearch.action.percolate.MultiPercolateAction; -import org.elasticsearch.action.percolate.PercolateAction; -import org.elasticsearch.action.percolate.TransportMultiPercolateAction; -import org.elasticsearch.action.percolate.TransportPercolateAction; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.SearchAction; @@ -189,189 +195,470 @@ import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction import org.elasticsearch.action.termvectors.TransportTermVectorsAction; import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.NamedRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.MapBinder; import org.elasticsearch.common.inject.multibindings.Multibinder; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.ActionPlugin.ActionHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.action.admin.cluster.allocation.RestClusterAllocationExplainAction; +import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; +import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; +import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; +import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction; +import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestCancelTasksAction; +import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestGetTaskAction; +import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestListTasksAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction; +import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction; +import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction; +import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction; +import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction; +import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction; +import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteStoredScriptAction; +import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetStoredScriptAction; +import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutStoredScriptAction; +import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction; +import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction; +import org.elasticsearch.rest.action.admin.indices.RestShrinkIndexAction; +import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction; +import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction; +import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction; +import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction; +import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction; +import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction; +import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction; +import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction; +import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction; +import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction; +import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction; +import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction; +import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction; +import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction; +import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction; +import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction; +import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction; +import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; +import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction; +import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction; +import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction; +import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction; +import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction; +import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction; +import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; +import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; +import org.elasticsearch.rest.action.bulk.RestBulkAction; +import org.elasticsearch.rest.action.cat.AbstractCatAction; +import org.elasticsearch.rest.action.cat.RestAliasAction; +import org.elasticsearch.rest.action.cat.RestAllocationAction; +import org.elasticsearch.rest.action.cat.RestCatAction; +import org.elasticsearch.rest.action.cat.RestFielddataAction; +import org.elasticsearch.rest.action.cat.RestHealthAction; +import org.elasticsearch.rest.action.cat.RestIndicesAction; +import org.elasticsearch.rest.action.cat.RestMasterAction; +import org.elasticsearch.rest.action.cat.RestNodeAttrsAction; +import org.elasticsearch.rest.action.cat.RestNodesAction; +import org.elasticsearch.rest.action.cat.RestPluginsAction; +import org.elasticsearch.rest.action.cat.RestRepositoriesAction; +import org.elasticsearch.rest.action.cat.RestSegmentsAction; +import org.elasticsearch.rest.action.cat.RestShardsAction; +import org.elasticsearch.rest.action.cat.RestSnapshotAction; +import org.elasticsearch.rest.action.cat.RestTasksAction; +import org.elasticsearch.rest.action.cat.RestThreadPoolAction; +import org.elasticsearch.rest.action.delete.RestDeleteAction; +import org.elasticsearch.rest.action.explain.RestExplainAction; +import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; +import org.elasticsearch.rest.action.get.RestGetAction; +import org.elasticsearch.rest.action.get.RestGetSourceAction; +import org.elasticsearch.rest.action.get.RestHeadAction; +import org.elasticsearch.rest.action.get.RestMultiGetAction; +import org.elasticsearch.rest.action.index.RestIndexAction; +import org.elasticsearch.rest.action.ingest.RestDeletePipelineAction; +import org.elasticsearch.rest.action.ingest.RestGetPipelineAction; +import org.elasticsearch.rest.action.ingest.RestPutPipelineAction; +import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction; +import org.elasticsearch.rest.action.main.RestMainAction; +import org.elasticsearch.rest.action.search.RestClearScrollAction; +import org.elasticsearch.rest.action.search.RestMultiSearchAction; +import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.rest.action.search.RestSearchScrollAction; +import org.elasticsearch.rest.action.suggest.RestSuggestAction; +import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction; +import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction; +import org.elasticsearch.rest.action.update.RestUpdateAction; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; /** - * + * Builds and binds the generic action map, all {@link TransportAction}s, and {@link ActionFilters}. */ public class ActionModule extends AbstractModule { - private final Map actions = new HashMap<>(); - private final List> actionFilters = new ArrayList<>(); + private final boolean transportClient; + private final Settings settings; + private final List actionPlugins; + private final Map> actions; + private final List> actionFilters; + private final AutoCreateIndex autoCreateIndex; + private final DestructiveOperations destructiveOperations; + private final RestController restController; - static class ActionEntry, Response extends ActionResponse> { - public final GenericAction action; - public final Class> transportAction; - public final Class[] supportTransportActions; + public ActionModule(boolean ingestEnabled, boolean transportClient, Settings settings, IndexNameExpressionResolver resolver, + ClusterSettings clusterSettings, List actionPlugins) { + this.transportClient = transportClient; + this.settings = settings; + this.actionPlugins = actionPlugins; + actions = setupActions(actionPlugins); + actionFilters = setupActionFilters(actionPlugins, ingestEnabled); + autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver); + destructiveOperations = new DestructiveOperations(settings, clusterSettings); + restController = new RestController(settings); + } - ActionEntry(GenericAction action, Class> transportAction, Class... supportTransportActions) { - this.action = action; - this.transportAction = transportAction; - this.supportTransportActions = supportTransportActions; + public Map> getActions() { + return actions; + } + + static Map> setupActions(List actionPlugins) { + // Subclass NamedRegistry for easy registration + class ActionRegistry extends NamedRegistry> { + public ActionRegistry() { + super("action"); + } + + public void register(ActionHandler handler) { + register(handler.getAction().name(), handler); + } + + public , Response extends ActionResponse> void register( + GenericAction action, Class> transportAction, + Class... supportTransportActions) { + register(new ActionHandler<>(action, transportAction, supportTransportActions)); + } } + ActionRegistry actions = new ActionRegistry(); + + actions.register(MainAction.INSTANCE, TransportMainAction.class); + actions.register(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); + actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); + actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); + actions.register(ListTasksAction.INSTANCE, TransportListTasksAction.class); + actions.register(GetTaskAction.INSTANCE, TransportGetTaskAction.class); + actions.register(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class); + + actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); + actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); + actions.register(ClusterStateAction.INSTANCE, TransportClusterStateAction.class); + actions.register(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class); + actions.register(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class); + actions.register(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class); + actions.register(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class); + actions.register(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class); + actions.register(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class); + actions.register(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class); + actions.register(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class); + actions.register(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class); + actions.register(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class); + actions.register(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class); + actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class); + actions.register(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class); + actions.register(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class); + + actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); + actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); + actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); + actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); + actions.register(ShrinkAction.INSTANCE, TransportShrinkAction.class); + actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class); + actions.register(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); + actions.register(GetIndexAction.INSTANCE, TransportGetIndexAction.class); + actions.register(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class); + actions.register(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class); + actions.register(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class); + actions.register(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class); + actions.register(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class); + actions.register(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class, + TransportGetFieldMappingsIndexAction.class); + actions.register(PutMappingAction.INSTANCE, TransportPutMappingAction.class); + actions.register(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class); + actions.register(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class); + actions.register(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class); + actions.register(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class); + actions.register(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class); + actions.register(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class); + actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); + actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class); + actions.register(FlushAction.INSTANCE, TransportFlushAction.class); + actions.register(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); + actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); + actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class); + actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); + actions.register(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class); + actions.register(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class); + actions.register(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class); + actions.register(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class); + actions.register(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class); + + actions.register(IndexAction.INSTANCE, TransportIndexAction.class); + actions.register(GetAction.INSTANCE, TransportGetAction.class); + actions.register(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class); + actions.register(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class, + TransportShardMultiTermsVectorAction.class); + actions.register(DeleteAction.INSTANCE, TransportDeleteAction.class); + actions.register(UpdateAction.INSTANCE, TransportUpdateAction.class); + actions.register(MultiGetAction.INSTANCE, TransportMultiGetAction.class, + TransportShardMultiGetAction.class); + actions.register(BulkAction.INSTANCE, TransportBulkAction.class, + TransportShardBulkAction.class); + actions.register(SearchAction.INSTANCE, TransportSearchAction.class); + actions.register(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); + actions.register(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); + actions.register(ExplainAction.INSTANCE, TransportExplainAction.class); + actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); + actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class); + + //Indexed scripts + actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); + actions.register(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class); + actions.register(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class); + + actions.register(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class); + + actions.register(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class); + actions.register(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class); + actions.register(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class); + actions.register(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class); + + actionPlugins.stream().flatMap(p -> p.getActions().stream()).forEach(actions::register); + + return unmodifiableMap(actions.getRegistry()); } - private final boolean ingestEnabled; - private final boolean proxy; + private List> setupActionFilters(List actionPlugins, boolean ingestEnabled) { + List> filters = new ArrayList<>(); + if (transportClient == false) { + if (ingestEnabled) { + filters.add(IngestActionFilter.class); + } else { + filters.add(IngestProxyActionFilter.class); + } + } - public ActionModule(boolean ingestEnabled, boolean proxy) { - this.ingestEnabled = ingestEnabled; - this.proxy = proxy; + for (ActionPlugin plugin : actionPlugins) { + filters.addAll(plugin.getActionFilters()); + } + return unmodifiableList(filters); } - /** - * Registers an action. - * - * @param action The action type. - * @param transportAction The transport action implementing the actual action. - * @param supportTransportActions Any support actions that are needed by the transport action. - * @param The request type. - * @param The response type. - */ - public , Response extends ActionResponse> void registerAction(GenericAction action, Class> transportAction, Class... supportTransportActions) { - actions.put(action.name(), new ActionEntry<>(action, transportAction, supportTransportActions)); + static Set> setupRestHandlers(List actionPlugins) { + Set> handlers = new HashSet<>(); + registerRestHandler(handlers, RestMainAction.class); + registerRestHandler(handlers, RestNodesInfoAction.class); + registerRestHandler(handlers, RestNodesStatsAction.class); + registerRestHandler(handlers, RestNodesHotThreadsAction.class); + registerRestHandler(handlers, RestClusterAllocationExplainAction.class); + registerRestHandler(handlers, RestClusterStatsAction.class); + registerRestHandler(handlers, RestClusterStateAction.class); + registerRestHandler(handlers, RestClusterHealthAction.class); + registerRestHandler(handlers, RestClusterUpdateSettingsAction.class); + registerRestHandler(handlers, RestClusterGetSettingsAction.class); + registerRestHandler(handlers, RestClusterRerouteAction.class); + registerRestHandler(handlers, RestClusterSearchShardsAction.class); + registerRestHandler(handlers, RestPendingClusterTasksAction.class); + registerRestHandler(handlers, RestPutRepositoryAction.class); + registerRestHandler(handlers, RestGetRepositoriesAction.class); + registerRestHandler(handlers, RestDeleteRepositoryAction.class); + registerRestHandler(handlers, RestVerifyRepositoryAction.class); + registerRestHandler(handlers, RestGetSnapshotsAction.class); + registerRestHandler(handlers, RestCreateSnapshotAction.class); + registerRestHandler(handlers, RestRestoreSnapshotAction.class); + registerRestHandler(handlers, RestDeleteSnapshotAction.class); + registerRestHandler(handlers, RestSnapshotsStatusAction.class); + + registerRestHandler(handlers, RestIndicesExistsAction.class); + registerRestHandler(handlers, RestTypesExistsAction.class); + registerRestHandler(handlers, RestGetIndicesAction.class); + registerRestHandler(handlers, RestIndicesStatsAction.class); + registerRestHandler(handlers, RestIndicesSegmentsAction.class); + registerRestHandler(handlers, RestIndicesShardStoresAction.class); + registerRestHandler(handlers, RestGetAliasesAction.class); + registerRestHandler(handlers, RestAliasesExistAction.class); + registerRestHandler(handlers, RestIndexDeleteAliasesAction.class); + registerRestHandler(handlers, RestIndexPutAliasAction.class); + registerRestHandler(handlers, RestIndicesAliasesAction.class); + registerRestHandler(handlers, RestCreateIndexAction.class); + registerRestHandler(handlers, RestShrinkIndexAction.class); + registerRestHandler(handlers, RestRolloverIndexAction.class); + registerRestHandler(handlers, RestDeleteIndexAction.class); + registerRestHandler(handlers, RestCloseIndexAction.class); + registerRestHandler(handlers, RestOpenIndexAction.class); + + registerRestHandler(handlers, RestUpdateSettingsAction.class); + registerRestHandler(handlers, RestGetSettingsAction.class); + + registerRestHandler(handlers, RestAnalyzeAction.class); + registerRestHandler(handlers, RestGetIndexTemplateAction.class); + registerRestHandler(handlers, RestPutIndexTemplateAction.class); + registerRestHandler(handlers, RestDeleteIndexTemplateAction.class); + registerRestHandler(handlers, RestHeadIndexTemplateAction.class); + + registerRestHandler(handlers, RestPutMappingAction.class); + registerRestHandler(handlers, RestGetMappingAction.class); + registerRestHandler(handlers, RestGetFieldMappingAction.class); + + registerRestHandler(handlers, RestRefreshAction.class); + registerRestHandler(handlers, RestFlushAction.class); + registerRestHandler(handlers, RestSyncedFlushAction.class); + registerRestHandler(handlers, RestForceMergeAction.class); + registerRestHandler(handlers, RestUpgradeAction.class); + registerRestHandler(handlers, RestClearIndicesCacheAction.class); + + registerRestHandler(handlers, RestIndexAction.class); + registerRestHandler(handlers, RestGetAction.class); + registerRestHandler(handlers, RestGetSourceAction.class); + registerRestHandler(handlers, RestHeadAction.Document.class); + registerRestHandler(handlers, RestHeadAction.Source.class); + registerRestHandler(handlers, RestMultiGetAction.class); + registerRestHandler(handlers, RestDeleteAction.class); + registerRestHandler(handlers, org.elasticsearch.rest.action.count.RestCountAction.class); + registerRestHandler(handlers, RestSuggestAction.class); + registerRestHandler(handlers, RestTermVectorsAction.class); + registerRestHandler(handlers, RestMultiTermVectorsAction.class); + registerRestHandler(handlers, RestBulkAction.class); + registerRestHandler(handlers, RestUpdateAction.class); + + registerRestHandler(handlers, RestSearchAction.class); + registerRestHandler(handlers, RestSearchScrollAction.class); + registerRestHandler(handlers, RestClearScrollAction.class); + registerRestHandler(handlers, RestMultiSearchAction.class); + + registerRestHandler(handlers, RestValidateQueryAction.class); + + registerRestHandler(handlers, RestExplainAction.class); + + registerRestHandler(handlers, RestRecoveryAction.class); + + // Scripts API + registerRestHandler(handlers, RestGetStoredScriptAction.class); + registerRestHandler(handlers, RestPutStoredScriptAction.class); + registerRestHandler(handlers, RestDeleteStoredScriptAction.class); + + registerRestHandler(handlers, RestFieldStatsAction.class); + + // Tasks API + registerRestHandler(handlers, RestListTasksAction.class); + registerRestHandler(handlers, RestGetTaskAction.class); + registerRestHandler(handlers, RestCancelTasksAction.class); + + // Ingest API + registerRestHandler(handlers, RestPutPipelineAction.class); + registerRestHandler(handlers, RestGetPipelineAction.class); + registerRestHandler(handlers, RestDeletePipelineAction.class); + registerRestHandler(handlers, RestSimulatePipelineAction.class); + + // CAT API + registerRestHandler(handlers, RestCatAction.class); + registerRestHandler(handlers, RestAllocationAction.class); + registerRestHandler(handlers, RestShardsAction.class); + registerRestHandler(handlers, RestMasterAction.class); + registerRestHandler(handlers, RestNodesAction.class); + registerRestHandler(handlers, RestTasksAction.class); + registerRestHandler(handlers, RestIndicesAction.class); + registerRestHandler(handlers, RestSegmentsAction.class); + // Fully qualified to prevent interference with rest.action.count.RestCountAction + registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestCountAction.class); + // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction + registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestRecoveryAction.class); + registerRestHandler(handlers, RestHealthAction.class); + registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class); + registerRestHandler(handlers, RestAliasAction.class); + registerRestHandler(handlers, RestThreadPoolAction.class); + registerRestHandler(handlers, RestPluginsAction.class); + registerRestHandler(handlers, RestFielddataAction.class); + registerRestHandler(handlers, RestNodeAttrsAction.class); + registerRestHandler(handlers, RestRepositoriesAction.class); + registerRestHandler(handlers, RestSnapshotAction.class); + for (ActionPlugin plugin : actionPlugins) { + for (Class handler : plugin.getRestHandlers()) { + registerRestHandler(handlers, handler); + } + } + return handlers; } - public ActionModule registerFilter(Class actionFilter) { - actionFilters.add(actionFilter); - return this; + private static void registerRestHandler(Set> handlers, Class handler) { + if (handlers.contains(handler)) { + throw new IllegalArgumentException("can't register the same [rest_handler] more than once for [" + handler.getName() + "]"); + } + handlers.add(handler); } @Override protected void configure() { - if (proxy == false) { - if (ingestEnabled) { - registerFilter(IngestActionFilter.class); - } else { - registerFilter(IngestProxyActionFilter.class); - } - } - Multibinder actionFilterMultibinder = Multibinder.newSetBinder(binder(), ActionFilter.class); for (Class actionFilter : actionFilters) { actionFilterMultibinder.addBinding().to(actionFilter); } bind(ActionFilters.class).asEagerSingleton(); - bind(AutoCreateIndex.class).asEagerSingleton(); - bind(DestructiveOperations.class).asEagerSingleton(); - registerAction(MainAction.INSTANCE, TransportMainAction.class); - registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); - registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); - registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); - registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class); - registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class); + bind(DestructiveOperations.class).toInstance(destructiveOperations); - registerAction(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); - registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); - registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class); - registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class); - registerAction(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class); - registerAction(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class); - registerAction(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class); - registerAction(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class); - registerAction(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class); - registerAction(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class); - registerAction(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class); - registerAction(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class); - registerAction(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class); - registerAction(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class); - registerAction(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class); - registerAction(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class); - registerAction(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class); - - registerAction(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); - registerAction(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); - registerAction(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); - registerAction(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); - registerAction(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); - registerAction(GetIndexAction.INSTANCE, TransportGetIndexAction.class); - registerAction(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class); - registerAction(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class); - registerAction(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class); - registerAction(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class); - registerAction(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class); - registerAction(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class, TransportGetFieldMappingsIndexAction.class); - registerAction(PutMappingAction.INSTANCE, TransportPutMappingAction.class); - registerAction(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class); - registerAction(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class); - registerAction(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class); - registerAction(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class); - registerAction(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class); - registerAction(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class); - registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); - registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class); - registerAction(FlushAction.INSTANCE, TransportFlushAction.class); - registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); - registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); - registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class); - registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); - registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class); - registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class); - registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class); - registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class); - registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class); - - registerAction(IndexAction.INSTANCE, TransportIndexAction.class); - registerAction(GetAction.INSTANCE, TransportGetAction.class); - registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class); - registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class, - TransportShardMultiTermsVectorAction.class); - registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class); - registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class); - registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class, - TransportShardMultiGetAction.class); - registerAction(BulkAction.INSTANCE, TransportBulkAction.class, - TransportShardBulkAction.class); - registerAction(SearchAction.INSTANCE, TransportSearchAction.class); - registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); - registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); - registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class); - registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class); - registerAction(ExplainAction.INSTANCE, TransportExplainAction.class); - registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); - registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class); - registerAction(RenderSearchTemplateAction.INSTANCE, TransportRenderSearchTemplateAction.class); - - //Indexed scripts - registerAction(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); - registerAction(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class); - registerAction(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class); - - registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsTransportAction.class); - - registerAction(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class); - registerAction(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class); - registerAction(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class); - registerAction(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class); - - // register Name -> GenericAction Map that can be injected to instances. - MapBinder actionsBinder - = MapBinder.newMapBinder(binder(), String.class, GenericAction.class); - - for (Map.Entry entry : actions.entrySet()) { - actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().action); - } - // register GenericAction -> transportAction Map that can be injected to instances. - // also register any supporting classes - if (!proxy) { + if (false == transportClient) { + // Supporting classes only used when not a transport client + bind(AutoCreateIndex.class).toInstance(autoCreateIndex); bind(TransportLivenessAction.class).asEagerSingleton(); + + // register GenericAction -> transportAction Map used by NodeClient + @SuppressWarnings("rawtypes") MapBinder transportActionsBinder = MapBinder.newMapBinder(binder(), GenericAction.class, TransportAction.class); - for (Map.Entry entry : actions.entrySet()) { + for (ActionHandler action : actions.values()) { // bind the action as eager singleton, so the map binder one will reuse it - bind(entry.getValue().transportAction).asEagerSingleton(); - transportActionsBinder.addBinding(entry.getValue().action).to(entry.getValue().transportAction).asEagerSingleton(); - for (Class supportAction : entry.getValue().supportTransportActions) { + bind(action.getTransportAction()).asEagerSingleton(); + transportActionsBinder.addBinding(action.getAction()).to(action.getTransportAction()).asEagerSingleton(); + for (Class supportAction : action.getSupportTransportActions()) { bind(supportAction).asEagerSingleton(); } } + + // Bind the RestController which is required (by Node) even if rest isn't enabled. + bind(RestController.class).toInstance(restController); + + // Setup the RestHandlers + if (NetworkModule.HTTP_ENABLED.get(settings)) { + Multibinder restHandlers = Multibinder.newSetBinder(binder(), RestHandler.class); + Multibinder catHandlers = Multibinder.newSetBinder(binder(), AbstractCatAction.class); + for (Class handler : setupRestHandlers(actionPlugins)) { + bind(handler).asEagerSingleton(); + if (AbstractCatAction.class.isAssignableFrom(handler)) { + catHandlers.addBinding().to(handler.asSubclass(AbstractCatAction.class)); + } else { + restHandlers.addBinding().to(handler); + } + } + } } } } diff --git a/core/src/main/java/org/elasticsearch/action/ActionRequest.java b/core/src/main/java/org/elasticsearch/action/ActionRequest.java index 7955855bc0d..bc052895a6f 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionRequest.java +++ b/core/src/main/java/org/elasticsearch/action/ActionRequest.java @@ -39,6 +39,13 @@ public abstract class ActionRequest> exte public abstract ActionRequestValidationException validate(); + /** + * Should this task persist its result after it has finished? + */ + public boolean getShouldPersistResult() { + return false; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/ActionRunnable.java b/core/src/main/java/org/elasticsearch/action/ActionRunnable.java index 36c3f4f17fa..78e2249d6f4 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionRunnable.java +++ b/core/src/main/java/org/elasticsearch/action/ActionRunnable.java @@ -22,11 +22,11 @@ package org.elasticsearch.action; import org.elasticsearch.common.util.concurrent.AbstractRunnable; /** - * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Throwable)} in case an uncaught + * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught * exception or error is thrown while the actual action is run. */ public abstract class ActionRunnable extends AbstractRunnable { - + protected final ActionListener listener; public ActionRunnable(ActionListener listener) { @@ -34,11 +34,11 @@ public abstract class ActionRunnable extends AbstractRunnable { } /** - * Calls the action listeners {@link ActionListener#onFailure(Throwable)} method with the given exception. + * Calls the action listeners {@link ActionListener#onFailure(Exception)} method with the given exception. * This method is invoked for all exception thrown by {@link #doRun()} */ @Override - public void onFailure(Throwable t) { - listener.onFailure(t); + public void onFailure(Exception e) { + listener.onFailure(e); } } diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 4df43b75401..0925c744144 100644 --- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -18,10 +18,15 @@ */ package org.elasticsearch.action; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; @@ -30,12 +35,13 @@ import java.io.IOException; /** * A base class for the response of a write operation that involves a single doc */ -public abstract class DocWriteResponse extends ReplicationResponse implements StatusToXContent { +public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent { private ShardId shardId; private String id; private String type; private long version; + private boolean forcedRefresh; public DocWriteResponse(ShardId shardId, String type, String id, long version) { this.shardId = shardId; @@ -84,6 +90,20 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St return this.version; } + /** + * Did this request force a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to + * {@link RefreshPolicy#IMMEDIATE} will always return true for this. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will + * only return true here if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). + */ + public boolean forcedRefresh() { + return forcedRefresh; + } + + @Override + public void setForcedRefresh(boolean forcedRefresh) { + this.forcedRefresh = forcedRefresh; + } + /** returns the rest status for this response (based on {@link ShardInfo#status()} */ public RestStatus status() { return getShardInfo().status(); @@ -97,6 +117,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St type = in.readString(); id = in.readString(); version = in.readZLong(); + forcedRefresh = in.readBoolean(); } @Override @@ -106,6 +127,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St out.writeString(type); out.writeString(id); out.writeZLong(version); + out.writeBoolean(forcedRefresh); } static final class Fields { @@ -121,7 +143,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St builder.field(Fields._INDEX, shardId.getIndexName()) .field(Fields._TYPE, type) .field(Fields._ID, id) - .field(Fields._VERSION, version); + .field(Fields._VERSION, version) + .field("forced_refresh", forcedRefresh); shardInfo.toXContent(builder, params); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/IndicesRequest.java b/core/src/main/java/org/elasticsearch/action/IndicesRequest.java index 4c62a7e849b..3ef699818b6 100644 --- a/core/src/main/java/org/elasticsearch/action/IndicesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/IndicesRequest.java @@ -40,7 +40,7 @@ public interface IndicesRequest { */ IndicesOptions indicesOptions(); - static interface Replaceable extends IndicesRequest { + interface Replaceable extends IndicesRequest { /** * Sets the indices that the action relates to. */ diff --git a/core/src/main/java/org/elasticsearch/action/LatchedActionListener.java b/core/src/main/java/org/elasticsearch/action/LatchedActionListener.java index fb0fd81a7be..e5e0af93072 100644 --- a/core/src/main/java/org/elasticsearch/action/LatchedActionListener.java +++ b/core/src/main/java/org/elasticsearch/action/LatchedActionListener.java @@ -45,7 +45,7 @@ public class LatchedActionListener implements ActionListener { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { try { delegate.onFailure(e); } finally { diff --git a/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java b/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java index 67436f31772..6704f610ec0 100644 --- a/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java +++ b/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java @@ -43,15 +43,15 @@ public final class TaskOperationFailure implements Writeable, ToXContent { private final long taskId; - private final Throwable reason; + private final Exception reason; private final RestStatus status; - public TaskOperationFailure(String nodeId, long taskId, Throwable t) { + public TaskOperationFailure(String nodeId, long taskId, Exception e) { this.nodeId = nodeId; this.taskId = taskId; - this.reason = t; - status = ExceptionsHelper.status(t); + this.reason = e; + status = ExceptionsHelper.status(e); } /** @@ -60,7 +60,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent { public TaskOperationFailure(StreamInput in) throws IOException { nodeId = in.readString(); taskId = in.readLong(); - reason = in.readThrowable(); + reason = in.readException(); status = RestStatus.readFrom(in); } @@ -68,7 +68,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent { public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); out.writeLong(taskId); - out.writeThrowable(reason); + out.writeException(reason); RestStatus.writeTo(out, status); } diff --git a/core/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/core/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java index b12af818c2e..7d57e5bd60a 100644 --- a/core/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java +++ b/core/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java @@ -35,7 +35,6 @@ public class TransportActionNodeProxy action; private final TransportRequestOptions transportOptions; - @Inject public TransportActionNodeProxy(Settings settings, GenericAction action, TransportService transportService) { super(settings); this.action = action; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index dbabe681c7a..e007929faf2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -42,17 +42,22 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable private final ShardId shard; private final boolean primary; + private final boolean hasPendingAsyncFetch; private final String assignedNodeId; private final UnassignedInfo unassignedInfo; + private final long allocationDelayMillis; private final long remainingDelayMillis; private final Map nodeExplanations; - public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long remainingDelayMillis, - @Nullable UnassignedInfo unassignedInfo, Map nodeExplanations) { + public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis, + long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch, + Map nodeExplanations) { this.shard = shard; this.primary = primary; + this.hasPendingAsyncFetch = hasPendingAsyncFetch; this.assignedNodeId = assignedNodeId; this.unassignedInfo = unassignedInfo; + this.allocationDelayMillis = allocationDelayMillis; this.remainingDelayMillis = remainingDelayMillis; this.nodeExplanations = nodeExplanations; } @@ -60,8 +65,10 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable public ClusterAllocationExplanation(StreamInput in) throws IOException { this.shard = ShardId.readShardId(in); this.primary = in.readBoolean(); + this.hasPendingAsyncFetch = in.readBoolean(); this.assignedNodeId = in.readOptionalString(); this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new); + this.allocationDelayMillis = in.readVLong(); this.remainingDelayMillis = in.readVLong(); int mapSize = in.readVInt(); @@ -77,8 +84,10 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable public void writeTo(StreamOutput out) throws IOException { this.getShard().writeTo(out); out.writeBoolean(this.isPrimary()); + out.writeBoolean(this.isStillFetchingShardData()); out.writeOptionalString(this.getAssignedNodeId()); out.writeOptionalWriteable(this.getUnassignedInfo()); + out.writeVLong(allocationDelayMillis); out.writeVLong(remainingDelayMillis); out.writeVInt(this.nodeExplanations.size()); @@ -97,6 +106,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable return this.primary; } + /** Return turn if shard data is still being fetched for the allocation */ + public boolean isStillFetchingShardData() { + return this.hasPendingAsyncFetch; + } + /** Return turn if the shard is assigned to a node */ public boolean isAssigned() { return this.assignedNodeId != null; @@ -114,7 +128,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable return this.unassignedInfo; } - /** Return the remaining allocation delay for this shard in millisocends */ + /** Return the configured delay before the shard can be allocated in milliseconds */ + public long getAllocationDelayMillis() { + return this.allocationDelayMillis; + } + + /** Return the remaining allocation delay for this shard in milliseconds */ public long getRemainingDelayMillis() { return this.remainingDelayMillis; } @@ -138,11 +157,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable if (assignedNodeId != null) { builder.field("assigned_node_id", this.assignedNodeId); } + builder.field("shard_state_fetch_pending", this.hasPendingAsyncFetch); // If we have unassigned info, show that if (unassignedInfo != null) { unassignedInfo.toXContent(builder, params); - long delay = unassignedInfo.getLastComputedLeftDelayNanos(); - builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueNanos(delay)); + builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis)); builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis)); } builder.startObject("nodes"); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java index 8f467402bc4..e564711d418 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java @@ -43,7 +43,7 @@ public class NodeExplanation implements Writeable, ToXContent { private final String finalExplanation; public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight, - final @Nullable IndicesShardStoresResponse.StoreStatus storeStatus, + @Nullable final IndicesShardStoresResponse.StoreStatus storeStatus, final ClusterAllocationExplanation.FinalDecision finalDecision, final String finalExplanation, final ClusterAllocationExplanation.StoreCopy storeCopy) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 08e899be4df..d63a7ff8968 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.allocation; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -30,24 +29,18 @@ import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStores import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterInfoService; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.cluster.routing.RoutingNodes.RoutingNodesIterator; -import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -56,15 +49,17 @@ import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; + /** * The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the * master node in the cluster. @@ -72,26 +67,26 @@ import java.util.Set; public class TransportClusterAllocationExplainAction extends TransportMasterNodeAction { - private final AllocationService allocationService; private final ClusterInfoService clusterInfoService; private final AllocationDeciders allocationDeciders; private final ShardsAllocator shardAllocator; private final TransportIndicesShardStoresAction shardStoresAction; + private final GatewayAllocator gatewayAllocator; @Inject public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - AllocationService allocationService, ClusterInfoService clusterInfoService, - AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator, - TransportIndicesShardStoresAction shardStoresAction) { + ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders, + ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction, + GatewayAllocator gatewayAllocator) { super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterAllocationExplainRequest::new); - this.allocationService = allocationService; this.clusterInfoService = clusterInfoService; this.allocationDeciders = allocationDeciders; this.shardAllocator = shardAllocator; this.shardStoresAction = shardStoresAction; + this.gatewayAllocator = gatewayAllocator; } @Override @@ -140,7 +135,8 @@ public class TransportClusterAllocationExplainAction Float nodeWeight, IndicesShardStoresResponse.StoreStatus storeStatus, String assignedNodeId, - Set activeAllocationIds) { + Set activeAllocationIds, + boolean hasPendingAsyncFetch) { final ClusterAllocationExplanation.FinalDecision finalDecision; final ClusterAllocationExplanation.StoreCopy storeCopy; final String finalExplanation; @@ -149,7 +145,7 @@ public class TransportClusterAllocationExplainAction // No copies of the data storeCopy = ClusterAllocationExplanation.StoreCopy.NONE; } else { - final Throwable storeErr = storeStatus.getStoreException(); + final Exception storeErr = storeStatus.getStoreException(); if (storeErr != null) { if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) { storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT; @@ -171,6 +167,19 @@ public class TransportClusterAllocationExplainAction if (node.getId().equals(assignedNodeId)) { finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED; finalExplanation = "the shard is already assigned to this node"; + } else if (hasPendingAsyncFetch && + shard.primary() == false && + shard.unassigned() && + shard.allocatedPostIndexCreate(indexMetaData) && + nodeDecision.type() != Decision.Type.YES) { + finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() + + " decision and the shard's state is still being fetched"; + finalDecision = ClusterAllocationExplanation.FinalDecision.NO; + } else if (hasPendingAsyncFetch && + shard.unassigned() && + shard.allocatedPostIndexCreate(indexMetaData)) { + finalExplanation = "the shard's state is still being fetched so it cannot be allocated"; + finalDecision = ClusterAllocationExplanation.FinalDecision.NO; } else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) && storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) { finalExplanation = "the copy of the shard is stale, allocation ids do not match"; @@ -190,6 +199,7 @@ public class TransportClusterAllocationExplainAction finalDecision = ClusterAllocationExplanation.FinalDecision.NO; finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision"; } else { + // TODO: handle throttling decision better here finalDecision = ClusterAllocationExplanation.FinalDecision.YES; if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) { finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data"; @@ -208,16 +218,15 @@ public class TransportClusterAllocationExplainAction */ public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes, boolean includeYesDecisions, ShardsAllocator shardAllocator, - List shardStores) { + List shardStores, + GatewayAllocator gatewayAllocator) { // don't short circuit deciders, we want a full explanation allocation.debugDecision(true); // get the existing unassigned info if available UnassignedInfo ui = shard.unassignedInfo(); - RoutingNodesIterator iter = routingNodes.nodes(); Map nodeToDecision = new HashMap<>(); - while (iter.hasNext()) { - RoutingNode node = iter.next(); + for (RoutingNode node : routingNodes) { DiscoveryNode discoNode = node.node(); if (discoNode.isDataNode()) { Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions); @@ -227,9 +236,9 @@ public class TransportClusterAllocationExplainAction long remainingDelayMillis = 0; final MetaData metadata = allocation.metaData(); final IndexMetaData indexMetaData = metadata.index(shard.index()); - if (ui != null) { - final Settings indexSettings = indexMetaData.getSettings(); - long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), metadata.settings(), indexSettings); + long allocationDelayMillis = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).getMillis(); + if (ui != null && ui.isDelayed()) { + long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), indexMetaData.getSettings()); remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis(); } @@ -248,19 +257,21 @@ public class TransportClusterAllocationExplainAction Float weight = weights.get(node); IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node); NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight, - storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId())); + storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()), + allocation.hasPendingAsyncFetch()); explanations.put(node, nodeExplanation); } return new ClusterAllocationExplanation(shard.shardId(), shard.primary(), - shard.currentNodeId(), remainingDelayMillis, ui, explanations); + shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui, + gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations); } @Override protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state, final ActionListener listener) { final RoutingNodes routingNodes = state.getRoutingNodes(); - final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(), - clusterInfoService.getClusterInfo(), System.nanoTime()); + final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state, + clusterInfoService.getClusterInfo(), System.nanoTime(), false); ShardRouting foundShard = null; if (request.useAnyUnassignedShard()) { @@ -307,12 +318,12 @@ public class TransportClusterAllocationExplainAction shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName()); List shardStoreStatus = shardStatuses.get(shardRouting.id()); ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes, - request.includeYesDecisions(), shardAllocator, shardStoreStatus); + request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator); listener.onResponse(new ClusterAllocationExplainResponse(cae)); } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { listener.onFailure(e); } }); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 59b426d8c31..27970f332fc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -33,8 +33,6 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.common.unit.TimeValue.readTimeValue; - /** * */ @@ -160,7 +158,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest { - private final ClusterName clusterName; private final GatewayAllocator gatewayAllocator; @Inject public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ClusterName clusterName, ActionFilters actionFilters, + ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, GatewayAllocator gatewayAllocator) { - super(settings, ClusterHealthAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterHealthRequest::new); - this.clusterName = clusterName; + super(settings, ClusterHealthAction.NAME, false, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, ClusterHealthRequest::new); this.gatewayAllocator = gatewayAllocator; } @@ -106,9 +104,9 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< } @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure during [{}]", t, source); - listener.onFailure(t); + public void onFailure(String source, Exception e) { + logger.error("unexpected failure during [{}]", e, source); + listener.onFailure(e); } @Override @@ -283,14 +281,14 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); } catch (IndexNotFoundException e) { // one of the specified indices is not there - treat it as RED. - ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState, + ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY, clusterState, numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), pendingTaskTimeInQueue); response.setStatus(ClusterHealthStatus.RED); return response; } - return new ClusterHealthResponse(clusterName.value(), concreteIndices, clusterState, numberOfPendingTasks, + return new ClusterHealthResponse(clusterState.getClusterName().value(), concreteIndices, clusterState, numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), pendingTaskTimeInQueue); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 7c8f797fdcb..e3df7f57312 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -101,7 +101,7 @@ public class NodesHotThreadsRequest extends BaseNodesRequest { NodesHotThreadsResponse() { } - public NodesHotThreadsResponse(ClusterName clusterName, NodeHotThreads[] nodes) { - super(clusterName, nodes); + public NodesHotThreadsResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - nodes = new NodeHotThreads[in.readVInt()]; - for (int i = 0; i < nodes.length; i++) { - nodes[i] = NodeHotThreads.readNodeHotThreads(in); - } + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeHotThreads::readNodeHotThreads); } @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(nodes.length); - for (NodeHotThreads node : nodes) { - node.writeTo(out); - } + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeStreamableList(nodes); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index d53f651da45..73403f40318 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; @@ -35,33 +36,28 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicReferenceArray; /** * */ -public class TransportNodesHotThreadsAction extends TransportNodesAction { +public class TransportNodesHotThreadsAction extends TransportNodesAction { @Inject - public TransportNodesHotThreadsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, + public TransportNodesHotThreadsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, NodesHotThreadsAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters, - indexNameExpressionResolver, NodesHotThreadsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC); + super(settings, NodesHotThreadsAction.NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, NodesHotThreadsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeHotThreads.class); } @Override - protected NodesHotThreadsResponse newResponse(NodesHotThreadsRequest request, AtomicReferenceArray responses) { - final List nodes = new ArrayList<>(); - for (int i = 0; i < responses.length(); i++) { - Object resp = responses.get(i); - if (resp instanceof NodeHotThreads) { - nodes.add((NodeHotThreads) resp); - } - } - return new NodesHotThreadsResponse(clusterName, nodes.toArray(new NodeHotThreads[nodes.size()])); + protected NodesHotThreadsResponse newResponse(NodesHotThreadsRequest request, + List responses, List failures) { + return new NodesHotThreadsResponse(clusterService.getClusterName(), responses, failures); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 87ec2d052ab..d7ce899792f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -27,8 +27,9 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.http.HttpInfo; -import org.elasticsearch.ingest.core.IngestInfo; +import org.elasticsearch.ingest.IngestInfo; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.os.OsInfo; import org.elasticsearch.monitor.process.ProcessInfo; @@ -45,8 +46,6 @@ import static java.util.Collections.unmodifiableMap; * Node information (static, does not change over time). */ public class NodeInfo extends BaseNodeResponse { - @Nullable - private Map serviceAttributes; private Version version; private Build build; @@ -78,16 +77,19 @@ public class NodeInfo extends BaseNodeResponse { @Nullable private IngestInfo ingest; + @Nullable + private ByteSizeValue totalIndexingBuffer; + public NodeInfo() { } - public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map serviceAttributes, @Nullable Settings settings, + public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings, @Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool, - @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest) { + @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest, + @Nullable ByteSizeValue totalIndexingBuffer) { super(node); this.version = version; this.build = build; - this.serviceAttributes = serviceAttributes; this.settings = settings; this.os = os; this.process = process; @@ -97,6 +99,7 @@ public class NodeInfo extends BaseNodeResponse { this.http = http; this.plugins = plugins; this.ingest = ingest; + this.totalIndexingBuffer = totalIndexingBuffer; } /** @@ -121,14 +124,6 @@ public class NodeInfo extends BaseNodeResponse { return this.build; } - /** - * The service attributes of the node. - */ - @Nullable - public Map getServiceAttributes() { - return this.serviceAttributes; - } - /** * The settings of the node. */ @@ -186,6 +181,11 @@ public class NodeInfo extends BaseNodeResponse { return ingest; } + @Nullable + public ByteSizeValue getTotalIndexingBuffer() { + return totalIndexingBuffer; + } + public static NodeInfo readNodeInfo(StreamInput in) throws IOException { NodeInfo nodeInfo = new NodeInfo(); nodeInfo.readFrom(in); @@ -198,12 +198,9 @@ public class NodeInfo extends BaseNodeResponse { version = Version.readVersion(in); build = Build.readBuild(in); if (in.readBoolean()) { - Map builder = new HashMap<>(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.put(in.readString(), in.readString()); - } - serviceAttributes = unmodifiableMap(builder); + totalIndexingBuffer = new ByteSizeValue(in.readLong()); + } else { + totalIndexingBuffer = null; } if (in.readBoolean()) { settings = Settings.readSettingsFromStream(in); @@ -240,15 +237,11 @@ public class NodeInfo extends BaseNodeResponse { super.writeTo(out); out.writeVInt(version.id); Build.writeBuild(build, out); - if (getServiceAttributes() == null) { + if (totalIndexingBuffer == null) { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeVInt(serviceAttributes.size()); - for (Map.Entry entry : serviceAttributes.entrySet()) { - out.writeString(entry.getKey()); - out.writeString(entry.getValue()); - } + out.writeLong(totalIndexingBuffer.bytes()); } if (settings == null) { out.writeBoolean(false); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java index 66c5cfd65d4..b547d1d7432 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -39,6 +39,7 @@ public class NodesInfoRequest extends BaseNodesRequest { private boolean http = true; private boolean plugins = true; private boolean ingest = true; + private boolean indices = true; public NodesInfoRequest() { } @@ -64,6 +65,7 @@ public class NodesInfoRequest extends BaseNodesRequest { http = false; plugins = false; ingest = false; + indices = false; return this; } @@ -80,6 +82,7 @@ public class NodesInfoRequest extends BaseNodesRequest { http = true; plugins = true; ingest = true; + indices = true; return this; } @@ -221,6 +224,22 @@ public class NodesInfoRequest extends BaseNodesRequest { return ingest; } + /** + * Should information about indices (currently just indexing buffers) be returned + * @param indices true if you want info + */ + public NodesInfoRequest indices(boolean indices) { + this.indices = indices; + return this; + } + + /** + * @return true if information about indices (currently just indexing buffers) + */ + public boolean indices() { + return indices; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -233,6 +252,7 @@ public class NodesInfoRequest extends BaseNodesRequest { http = in.readBoolean(); plugins = in.readBoolean(); ingest = in.readBoolean(); + indices = in.readBoolean(); } @Override @@ -247,5 +267,6 @@ public class NodesInfoRequest extends BaseNodesRequest { out.writeBoolean(http); out.writeBoolean(plugins); out.writeBoolean(ingest); + out.writeBoolean(indices); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java index fc484012379..16befb79aab 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java @@ -118,4 +118,12 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder implements To public NodesInfoResponse() { } - public NodesInfoResponse(ClusterName clusterName, NodeInfo[] nodes) { - super(clusterName, nodes); + public NodesInfoResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - nodes = new NodeInfo[in.readVInt()]; - for (int i = 0; i < nodes.length; i++) { - nodes[i] = NodeInfo.readNodeInfo(in); - } + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeInfo::readNodeInfo); } @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(nodes.length); - for (NodeInfo node : nodes) { - node.writeTo(out); - } + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeStreamableList(nodes); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("cluster_name", getClusterName().value()); - builder.startObject("nodes"); - for (NodeInfo nodeInfo : this) { + for (NodeInfo nodeInfo : getNodes()) { builder.startObject(nodeInfo.getNode().getId()); builder.field("name", nodeInfo.getNode().getName()); @@ -77,11 +69,8 @@ public class NodesInfoResponse extends BaseNodesResponse implements To builder.field("version", nodeInfo.getVersion()); builder.field("build_hash", nodeInfo.getBuild().shortHash()); - - if (nodeInfo.getServiceAttributes() != null) { - for (Map.Entry nodeAttribute : nodeInfo.getServiceAttributes().entrySet()) { - builder.field(nodeAttribute.getKey(), nodeAttribute.getValue()); - } + if (nodeInfo.getTotalIndexingBuffer() != null) { + builder.byteSizeField("total_indexing_buffer", "total_indexing_buffer_in_bytes", nodeInfo.getTotalIndexingBuffer()); } builder.startArray("roles"); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index f52729faa4f..028198cf831 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.admin.cluster.node.info; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -34,36 +34,32 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicReferenceArray; /** * */ -public class TransportNodesInfoAction extends TransportNodesAction { +public class TransportNodesInfoAction extends TransportNodesAction { private final NodeService nodeService; @Inject - public TransportNodesInfoAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, + public TransportNodesInfoAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - NodeService nodeService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, NodesInfoAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters, - indexNameExpressionResolver, NodesInfoRequest::new, NodeInfoRequest::new, ThreadPool.Names.MANAGEMENT); + NodeService nodeService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, NodesInfoAction.NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, NodesInfoRequest::new, NodeInfoRequest::new, ThreadPool.Names.MANAGEMENT, NodeInfo.class); this.nodeService = nodeService; } @Override - protected NodesInfoResponse newResponse(NodesInfoRequest nodesInfoRequest, AtomicReferenceArray responses) { - final List nodesInfos = new ArrayList<>(); - for (int i = 0; i < responses.length(); i++) { - Object resp = responses.get(i); - if (resp instanceof NodeInfo) { - nodesInfos.add((NodeInfo) resp); - } - } - return new NodesInfoResponse(clusterName, nodesInfos.toArray(new NodeInfo[nodesInfos.size()])); + protected NodesInfoResponse newResponse(NodesInfoRequest nodesInfoRequest, + List responses, List failures) { + return new NodesInfoResponse(clusterService.getClusterName(), responses, failures); } @Override @@ -80,7 +76,7 @@ public class TransportNodesInfoAction extends TransportNodesAction { private final ClusterService clusterService; - private final ClusterName clusterName; public static final String NAME = "cluster:monitor/nodes/liveness"; @Inject - public TransportLivenessAction(ClusterName clusterName, - ClusterService clusterService, TransportService transportService) { + public TransportLivenessAction(ClusterService clusterService, TransportService transportService) { this.clusterService = clusterService; - this.clusterName = clusterName; - transportService.registerRequestHandler(NAME, LivenessRequest::new, ThreadPool.Names.SAME, this); + transportService.registerRequestHandler(NAME, LivenessRequest::new, ThreadPool.Names.SAME, + false, false /*can not trip circuit breaker*/, this); } @Override public void messageReceived(LivenessRequest request, TransportChannel channel) throws Exception { - channel.sendResponse(new LivenessResponse(clusterName, clusterService.localNode())); + channel.sendResponse(new LivenessResponse(clusterService.getClusterName(), clusterService.localNode())); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java index af28c1fb5d5..1a9023ab93c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.cluster.node.stats; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; +import java.util.List; /** * @@ -37,34 +39,24 @@ public class NodesStatsResponse extends BaseNodesResponse implements NodesStatsResponse() { } - public NodesStatsResponse(ClusterName clusterName, NodeStats[] nodes) { - super(clusterName, nodes); + public NodesStatsResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - nodes = new NodeStats[in.readVInt()]; - for (int i = 0; i < nodes.length; i++) { - nodes[i] = NodeStats.readNodeStats(in); - } + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodeStats::readNodeStats); } @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(nodes.length); - for (NodeStats node : nodes) { - node.writeTo(out); - } + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeStreamableList(nodes); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("cluster_name", getClusterName().value()); - builder.startObject("nodes"); - for (NodeStats nodeStats : this) { + for (NodeStats nodeStats : getNodes()) { builder.startObject(nodeStats.getNode().getId()); builder.field("timestamp", nodeStats.getTimestamp()); nodeStats.toXContent(builder, params); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 8ba3d00558b..5863e54d08f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -19,10 +19,10 @@ package org.elasticsearch.action.admin.cluster.node.stats; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -34,36 +34,31 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicReferenceArray; /** * */ -public class TransportNodesStatsAction extends TransportNodesAction { +public class TransportNodesStatsAction extends TransportNodesAction { private final NodeService nodeService; @Inject - public TransportNodesStatsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, + public TransportNodesStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - NodeService nodeService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, NodesStatsAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - NodesStatsRequest::new, NodeStatsRequest::new, ThreadPool.Names.MANAGEMENT); + NodeService nodeService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, NodesStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, NodesStatsRequest::new, NodeStatsRequest::new, ThreadPool.Names.MANAGEMENT, NodeStats.class); this.nodeService = nodeService; } @Override - protected NodesStatsResponse newResponse(NodesStatsRequest nodesInfoRequest, AtomicReferenceArray responses) { - final List nodeStats = new ArrayList<>(); - for (int i = 0; i < responses.length(); i++) { - Object resp = responses.get(i); - if (resp instanceof NodeStats) { - nodeStats.add((NodeStats) resp); - } - } - return new NodesStatsResponse(clusterName, nodeStats.toArray(new NodeStats[nodeStats.size()])); + protected NodesStatsResponse newResponse(NodesStatsRequest request, List responses, List failures) { + return new NodesStatsResponse(clusterService.getClusterName(), responses, failures); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java index 716e9027bf0..9bfeaecd78b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java @@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo; +import org.elasticsearch.tasks.TaskInfo; import java.util.List; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index a52a0358983..6d5936db67a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -22,10 +22,8 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -36,6 +34,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; import org.elasticsearch.transport.TransportChannel; @@ -63,10 +62,10 @@ public class TransportCancelTasksAction extends TransportTasksAction { +/** + * Action for retrieving a list of currently running tasks + */ +public class GetTaskAction extends Action { - public static final RenderSearchTemplateAction INSTANCE = new RenderSearchTemplateAction(); - public static final String NAME = "cluster:admin/render/template/search"; + public static final GetTaskAction INSTANCE = new GetTaskAction(); + public static final String NAME = "cluster:monitor/task/get"; - public RenderSearchTemplateAction() { + private GetTaskAction() { super(NAME); } @Override - public RenderSearchTemplateRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new RenderSearchTemplateRequestBuilder(client, this); + public GetTaskResponse newResponse() { + return new GetTaskResponse(); } @Override - public RenderSearchTemplateResponse newResponse() { - return new RenderSearchTemplateResponse(); + public GetTaskRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GetTaskRequestBuilder(client, this); } - } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java new file mode 100644 index 00000000000..efbc9679e71 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.tasks.get; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.tasks.TaskId; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * A request to get node tasks + */ +public class GetTaskRequest extends ActionRequest { + private TaskId taskId = TaskId.EMPTY_TASK_ID; + private boolean waitForCompletion = false; + private TimeValue timeout = null; + + /** + * Get the TaskId to look up. + */ + public TaskId getTaskId() { + return taskId; + } + + /** + * Set the TaskId to look up. Required. + */ + public GetTaskRequest setTaskId(TaskId taskId) { + this.taskId = taskId; + return this; + } + + /** + * Should this request wait for all found tasks to complete? + */ + public boolean getWaitForCompletion() { + return waitForCompletion; + } + + /** + * Should this request wait for all found tasks to complete? + */ + public GetTaskRequest setWaitForCompletion(boolean waitForCompletion) { + this.waitForCompletion = waitForCompletion; + return this; + } + + /** + * Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2. + */ + public TimeValue getTimeout() { + return timeout; + } + + /** + * Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2. + */ + public GetTaskRequest setTimeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + GetTaskRequest nodeRequest(String thisNodeId, long thisTaskId) { + GetTaskRequest copy = new GetTaskRequest(); + copy.setParentTask(thisNodeId, thisTaskId); + copy.setTaskId(taskId); + copy.setTimeout(timeout); + copy.setWaitForCompletion(waitForCompletion); + return copy; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (false == getTaskId().isSet()) { + validationException = addValidationError("task id is required", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + taskId = TaskId.readFromStream(in); + timeout = in.readOptionalWriteable(TimeValue::new); + waitForCompletion = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + taskId.writeTo(out); + out.writeOptionalWriteable(timeout); + out.writeBoolean(waitForCompletion); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java new file mode 100644 index 00000000000..e1042df2ac3 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.tasks.get; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.tasks.TaskId; + +/** + * Builder for the request to retrieve the list of tasks running on the specified nodes + */ +public class GetTaskRequestBuilder extends ActionRequestBuilder { + public GetTaskRequestBuilder(ElasticsearchClient client, GetTaskAction action) { + super(client, action, new GetTaskRequest()); + } + + /** + * Set the TaskId to look up. Required. + */ + public final GetTaskRequestBuilder setTaskId(TaskId taskId) { + request.setTaskId(taskId); + return this; + } + + /** + * Should this request wait for all found tasks to complete? + */ + public final GetTaskRequestBuilder setWaitForCompletion(boolean waitForCompletion) { + request.setWaitForCompletion(waitForCompletion); + return this; + } + + /** + * Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2. + */ + public final GetTaskRequestBuilder setTimeout(TimeValue timeout) { + request.setTimeout(timeout); + return this; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java similarity index 61% rename from core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateResponse.java rename to core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java index d14a9a4f06a..afb03a7c9dc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java @@ -17,52 +17,59 @@ * under the License. */ -package org.elasticsearch.action.admin.cluster.validate.template; +package org.elasticsearch.action.admin.cluster.node.tasks.get; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.PersistedTaskInfo; import java.io.IOException; -public class RenderSearchTemplateResponse extends ActionResponse implements ToXContent { +import static java.util.Objects.requireNonNull; - private BytesReference source; +/** + * Returns the list of tasks currently running on the nodes + */ +public class GetTaskResponse extends ActionResponse implements ToXContent { + private PersistedTaskInfo task; - public BytesReference source() { - return source; + public GetTaskResponse() { } - - public void source(BytesReference source) { - this.source = source; + + public GetTaskResponse(PersistedTaskInfo task) { + this.task = requireNonNull(task, "task is required"); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - boolean hasSource = source != null; - out.writeBoolean(hasSource); - if (hasSource) { - out.writeBytesReference(source); - } - } - + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.readBoolean()) { - source = in.readBytesReference(); - } + task = in.readOptionalWriteable(PersistedTaskInfo::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(task); + } + + /** + * Get the actual result of the fetch. + */ + public PersistedTaskInfo getTask() { + return task; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.rawField("template_output", source); - builder.endObject(); - return builder; + return task.innerToXContent(builder, params); + } + + @Override + public String toString() { + return Strings.toString(this); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java new file mode 100644 index 00000000000..430b07866c9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -0,0 +1,245 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.node.tasks.get; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.tasks.PersistedTaskInfo; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.tasks.TaskPersistenceService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; + +import static org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction.waitForCompletionTimeout; + +/** + * Action to get a single task. If the task isn't running then it'll try to request the status from request index. + * + * The general flow is: + *
    + *
  • If this isn't being executed on the node to which the requested TaskId belongs then move to that node. + *
  • Look up the task and return it if it exists + *
  • If it doesn't then look up the task from the results index + *
+ */ +public class TransportGetTaskAction extends HandledTransportAction { + private final ClusterService clusterService; + private final TransportService transportService; + private final Client client; + + @Inject + public TransportGetTaskAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, Client client) { + super(settings, GetTaskAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, GetTaskRequest::new); + this.clusterService = clusterService; + this.transportService = transportService; + this.client = client; + } + + @Override + protected void doExecute(GetTaskRequest request, ActionListener listener) { + throw new UnsupportedOperationException("Task is required"); + } + + @Override + protected void doExecute(Task thisTask, GetTaskRequest request, ActionListener listener) { + if (clusterService.localNode().getId().equals(request.getTaskId().getNodeId())) { + getRunningTaskFromNode(thisTask, request, listener); + } else { + runOnNodeWithTaskIfPossible(thisTask, request, listener); + } + } + + /** + * Executed on the coordinating node to forward execution of the remaining work to the node that matches that requested + * {@link TaskId#getNodeId()}. If the node isn't in the cluster then this will just proceed to + * {@link #getFinishedTaskFromIndex(Task, GetTaskRequest, ActionListener)} on this node. + */ + private void runOnNodeWithTaskIfPossible(Task thisTask, GetTaskRequest request, ActionListener listener) { + TransportRequestOptions.Builder builder = TransportRequestOptions.builder(); + if (request.getTimeout() != null) { + builder.withTimeout(request.getTimeout()); + } + builder.withCompress(false); + DiscoveryNode node = clusterService.state().nodes().get(request.getTaskId().getNodeId()); + if (node == null) { + // Node is no longer part of the cluster! Try and look the task up from the results index. + getFinishedTaskFromIndex(thisTask, request, listener); + return; + } + GetTaskRequest nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId()); + taskManager.registerChildTask(thisTask, node.getId()); + transportService.sendRequest(node, GetTaskAction.NAME, nodeRequest, builder.build(), + new TransportResponseHandler() { + @Override + public GetTaskResponse newInstance() { + return new GetTaskResponse(); + } + + @Override + public void handleResponse(GetTaskResponse response) { + listener.onResponse(response); + } + + @Override + public void handleException(TransportException exp) { + listener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }); + } + + /** + * Executed on the node that should be running the task to find and return the running task. Falls back to + * {@link #getFinishedTaskFromIndex(Task, GetTaskRequest, ActionListener)} if the task isn't still running. + */ + void getRunningTaskFromNode(Task thisTask, GetTaskRequest request, ActionListener listener) { + Task runningTask = taskManager.getTask(request.getTaskId().getId()); + if (runningTask == null) { + // Task isn't running, go look in the task index + getFinishedTaskFromIndex(thisTask, request, listener); + } else { + if (request.getWaitForCompletion()) { + // Shift to the generic thread pool and let it wait for the task to complete so we don't block any important threads. + threadPool.generic().execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + taskManager.waitForTaskCompletion(runningTask, waitForCompletionTimeout(request.getTimeout())); + waitedForCompletion(thisTask, request, runningTask.taskInfo(clusterService.localNode(), true), listener); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } else { + TaskInfo info = runningTask.taskInfo(clusterService.localNode(), true); + listener.onResponse(new GetTaskResponse(new PersistedTaskInfo(false, info))); + } + } + } + + /** + * Called after waiting for the task to complete. Attempts to load the results of the task from the tasks index. If it isn't in the + * index then returns a snapshot of the task taken shortly after completion. + */ + void waitedForCompletion(Task thisTask, GetTaskRequest request, TaskInfo snapshotOfRunningTask, + ActionListener listener) { + getFinishedTaskFromIndex(thisTask, request, new ActionListener() { + @Override + public void onResponse(GetTaskResponse response) { + // We were able to load the task from the task index. Let's send that back. + listener.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + /* + * We couldn't load the task from the task index. Instead of 404 we should use the snapshot we took after it finished. If + * the error isn't a 404 then we'll just throw it back to the user. + */ + if (ExceptionsHelper.unwrap(e, ResourceNotFoundException.class) != null) { + listener.onResponse(new GetTaskResponse(new PersistedTaskInfo(true, snapshotOfRunningTask))); + } else { + listener.onFailure(e); + } + } + }); + } + + /** + * Send a {@link GetRequest} to the tasks index looking for a persisted copy of the task completed task. It'll only be found only if the + * task's result was persisted. Called on the node that once had the task if that node is still part of the cluster or on the + * coordinating node if the node is no longer part of the cluster. + */ + void getFinishedTaskFromIndex(Task thisTask, GetTaskRequest request, ActionListener listener) { + GetRequest get = new GetRequest(TaskPersistenceService.TASK_INDEX, TaskPersistenceService.TASK_TYPE, + request.getTaskId().toString()); + get.setParentTask(clusterService.localNode().getId(), thisTask.getId()); + client.get(get, new ActionListener() { + @Override + public void onResponse(GetResponse getResponse) { + try { + onGetFinishedTaskFromIndex(getResponse, listener); + } catch (Exception e) { + listener.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) { + // We haven't yet created the index for the task results so it can't be found. + listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or persisted", e, request.getTaskId())); + } else { + listener.onFailure(e); + } + } + }); + } + + /** + * Called with the {@linkplain GetResponse} from loading the task from the results index. Called on the node that once had the task if + * that node is part of the cluster or on the coordinating node if the node wasn't part of the cluster. + */ + void onGetFinishedTaskFromIndex(GetResponse response, ActionListener listener) throws IOException { + if (false == response.isExists()) { + listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or persisted", response.getId())); + return; + } + if (response.isSourceEmpty()) { + listener.onFailure(new ElasticsearchException("Stored task status for [{}] didn't contain any source!", response.getId())); + return; + } + try (XContentParser parser = XContentHelper.createParser(response.getSourceAsBytesRef())) { + PersistedTaskInfo result = PersistedTaskInfo.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT); + listener.onResponse(new GetTaskResponse(result)); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 446ae3affb7..6ab0bafb2fb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -23,21 +23,21 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.stream.Collectors; /** @@ -47,10 +47,12 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent { private List tasks; - private Map> nodes; + private Map> perNodeTasks; private List groups; + private DiscoveryNodes discoveryNodes; + public ListTasksResponse() { } @@ -75,28 +77,11 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent { /** * Returns the list of tasks by node */ - public Map> getPerNodeTasks() { - if (nodes != null) { - return nodes; + public Map> getPerNodeTasks() { + if (perNodeTasks == null) { + perNodeTasks = tasks.stream().collect(Collectors.groupingBy(t -> t.getTaskId().getNodeId())); } - Map> nodeTasks = new HashMap<>(); - - Set nodes = new HashSet<>(); - for (TaskInfo shard : tasks) { - nodes.add(shard.getNode()); - } - - for (DiscoveryNode node : nodes) { - List tasks = new ArrayList<>(); - for (TaskInfo taskInfo : this.tasks) { - if (taskInfo.getNode().equals(node)) { - tasks.add(taskInfo); - } - } - nodeTasks.put(node, tasks); - } - this.nodes = nodeTasks; - return nodeTasks; + return perNodeTasks; } public List getTaskGroups() { @@ -138,6 +123,14 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent { return tasks; } + /** + * Set a reference to the {@linkplain DiscoveryNodes}. Used for calling {@link #toXContent(XContentBuilder, ToXContent.Params)} with + * {@code group_by=nodes}. + */ + public void setDiscoveryNodes(DiscoveryNodes discoveryNodes) { + this.discoveryNodes = discoveryNodes; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (getTaskFailures() != null && getTaskFailures().size() > 0) { @@ -161,43 +154,48 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent { } String groupBy = params.param("group_by", "nodes"); if ("nodes".equals(groupBy)) { + if (discoveryNodes == null) { + throw new IllegalStateException("discoveryNodes must be set before calling toXContent with group_by=nodes"); + } builder.startObject("nodes"); - for (Map.Entry> entry : getPerNodeTasks().entrySet()) { - DiscoveryNode node = entry.getKey(); - builder.startObject(node.getId()); - builder.field("name", node.getName()); - builder.field("transport_address", node.getAddress().toString()); - builder.field("host", node.getHostName()); - builder.field("ip", node.getAddress()); + for (Map.Entry> entry : getPerNodeTasks().entrySet()) { + DiscoveryNode node = discoveryNodes.get(entry.getKey()); + builder.startObject(entry.getKey()); + if (node != null) { + // If the node is no longer part of the cluster, oh well, we'll just skip it's useful information. + builder.field("name", node.getName()); + builder.field("transport_address", node.getAddress().toString()); + builder.field("host", node.getHostName()); + builder.field("ip", node.getAddress()); - builder.startArray("roles"); - for (DiscoveryNode.Role role : node.getRoles()) { - builder.value(role.getRoleName()); - } - builder.endArray(); - - if (!node.getAttributes().isEmpty()) { - builder.startObject("attributes"); - for (Map.Entry attrEntry : node.getAttributes().entrySet()) { - builder.field(attrEntry.getKey(), attrEntry.getValue()); + builder.startArray("roles"); + for (DiscoveryNode.Role role : node.getRoles()) { + builder.value(role.getRoleName()); + } + builder.endArray(); + + if (!node.getAttributes().isEmpty()) { + builder.startObject("attributes"); + for (Map.Entry attrEntry : node.getAttributes().entrySet()) { + builder.field(attrEntry.getKey(), attrEntry.getValue()); + } + builder.endObject(); } - builder.endObject(); } builder.startObject("tasks"); for(TaskInfo task : entry.getValue()) { - builder.startObject(task.getTaskId().toString()); + builder.field(task.getTaskId().toString()); task.toXContent(builder, params); - builder.endObject(); } builder.endObject(); builder.endObject(); } + builder.endObject(); } else if ("parents".equals(groupBy)) { builder.startObject("tasks"); for (TaskGroup group : getTaskGroups()) { - builder.startObject(group.getTaskInfo().getTaskId().toString()); + builder.field(group.getTaskInfo().getTaskId().toString()); group.toXContent(builder, params); - builder.endObject(); } builder.endObject(); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java index aa9bfd6b720..b254137163d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TaskGroup.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.util.ArrayList; @@ -79,16 +80,15 @@ public class TaskGroup implements ToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - task.toXContent(builder, params); + builder.startObject(); + task.innerToXContent(builder, params); if (childTasks.isEmpty() == false) { builder.startArray("children"); for (TaskGroup taskGroup : childTasks) { - builder.startObject(); taskGroup.toXContent(builder, params); - builder.endObject(); } builder.endArray(); } - return builder; + return builder.endObject(); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index b05049f6776..26158800346 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -19,13 +19,10 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -33,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -40,20 +38,25 @@ import java.io.IOException; import java.util.List; import java.util.function.Consumer; -import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; /** * */ public class TransportListTasksAction extends TransportTasksAction { - private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); + public static long waitForCompletionTimeout(TimeValue timeout) { + if (timeout == null) { + timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT; + } + return System.nanoTime() + timeout.nanos(); + } + private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); @Inject - public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, + public TransportListTasksAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ListTasksAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters, + super(settings, ListTasksAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, ListTasksRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT); } @@ -75,35 +78,18 @@ public class TransportListTasksAction extends TransportTasksAction operation) { - if (false == request.getWaitForCompletion()) { - super.processTasks(request, operation); - return; - } - // If we should wait for completion then we have to intercept every found task and wait for it to leave the manager. - TimeValue timeout = request.getTimeout(); - if (timeout == null) { - timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT; - } - long timeoutTime = System.nanoTime() + timeout.nanos(); - super.processTasks(request, operation.andThen((Task t) -> { - while (System.nanoTime() - timeoutTime < 0) { - Task task = taskManager.getTask(t.getId()); - if (task == null) { - return; - } + if (request.getWaitForCompletion()) { + long timeoutNanos = waitForCompletionTimeout(request.getTimeout()); + operation = operation.andThen(task -> { if (task.getAction().startsWith(ListTasksAction.NAME)) { // It doesn't make sense to wait for List Tasks and it can cause an infinite loop of the task waiting - // for itself of one of its child tasks + // for itself or one of its child tasks return; } - try { - Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); - } catch (InterruptedException e) { - throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, t); - } - } - throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", t); - })); + taskManager.waitForTaskCompletion(task, timeoutNanos); + }); + } + super.processTasks(request, operation); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index a17d2aac892..6baf834278b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -76,7 +76,7 @@ public class TransportDeleteRepositoryAction extends TransportMasterNodeAction { - AllocationCommands commands = new AllocationCommands(); - boolean dryRun; - boolean explain; + private AllocationCommands commands = new AllocationCommands(); + private boolean dryRun; + private boolean explain; + private boolean retryFailed; public ClusterRerouteRequest() { } @@ -81,6 +77,15 @@ public class ClusterRerouteRequest extends AcknowledgedRequestfalse). If true, the + * request will retry allocating shards that can't currently be allocated due to too many allocation failures. + */ + public ClusterRerouteRequest setRetryFailed(boolean retryFailed) { + this.retryFailed = retryFailed; + return this; + } + /** * Returns the current explain flag */ @@ -88,41 +93,27 @@ public class ClusterRerouteRequest extends AcknowledgedRequest { - +public class ClusterRerouteRequestBuilder + extends AcknowledgedRequestBuilder { public ClusterRerouteRequestBuilder(ElasticsearchClient client, ClusterRerouteAction action) { super(client, action, new ClusterRerouteRequest()); } @@ -61,10 +60,11 @@ public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilderfalse). If true, the + * request will retry allocating shards that can't currently be allocated due to too many allocation failures. */ - public ClusterRerouteRequestBuilder setCommands(AllocationCommand... commands) throws Exception { - request.commands(commands); + public ClusterRerouteRequestBuilder setRetryFailed(boolean retryFailed) { + request.setRetryFailed(retryFailed); return this; } -} +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index e6116dbfbc4..875562ad64a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -68,38 +69,55 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction listener) { - clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) { - - private volatile ClusterState clusterStateToSend; - private volatile RoutingExplanations explanations; - - @Override - protected ClusterRerouteResponse newResponse(boolean acknowledged) { - return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations); - } - - @Override - public void onAckTimeout() { - listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations())); - } - - @Override - public void onFailure(String source, Throwable t) { - logger.debug("failed to perform [{}]", t, source); - super.onFailure(source, t); - } - - @Override - public ClusterState execute(ClusterState currentState) { - RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.commands, request.explain()); - ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build(); - clusterStateToSend = newState; - explanations = routingResult.explanations(); - if (request.dryRun) { - return currentState; - } - return newState; - } - }); + clusterService.submitStateUpdateTask("cluster_reroute (api)", new ClusterRerouteResponseAckedClusterStateUpdateTask(logger, + allocationService, request, listener)); } -} \ No newline at end of file + + static class ClusterRerouteResponseAckedClusterStateUpdateTask extends AckedClusterStateUpdateTask { + + private final ClusterRerouteRequest request; + private final ActionListener listener; + private final ESLogger logger; + private final AllocationService allocationService; + private volatile ClusterState clusterStateToSend; + private volatile RoutingExplanations explanations; + + ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request, + ActionListener listener) { + super(Priority.IMMEDIATE, request, listener); + this.request = request; + this.listener = listener; + this.logger = logger; + this.allocationService = allocationService; + } + + @Override + protected ClusterRerouteResponse newResponse(boolean acknowledged) { + return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations); + } + + @Override + public void onAckTimeout() { + listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations())); + } + + @Override + public void onFailure(String source, Exception e) { + logger.debug("failed to perform [{}]", e, source); + super.onFailure(source, e); + } + + @Override + public ClusterState execute(ClusterState currentState) { + RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.getCommands(), request.explain(), + request.isRetryFailed()); + ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build(); + clusterStateToSend = newState; + explanations = routingResult.explanations(); + if (request.dryRun()) { + return currentState; + } + return newState; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java index 652401194bb..575fbcd3b98 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java @@ -77,7 +77,7 @@ final class SettingsUpdater { Settings settings = build.metaData().settings(); // now we try to apply things and if they are invalid we fail // this dryRun will validate & parse settings but won't actually apply them. - clusterSettings.dryRun(settings); + clusterSettings.validateUpdate(settings); return build; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 1abca63794b..4464b5d793f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -93,11 +93,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct } @Override - public void onAllNodesAcked(@Nullable Throwable t) { + public void onAllNodesAcked(@Nullable Exception e) { if (changed) { reroute(true); } else { - super.onAllNodesAcked(t); + super.onAllNodesAcked(e); } } @@ -146,10 +146,10 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { //if the reroute fails we only log - logger.debug("failed to perform [{}]", t, source); - listener.onFailure(new ElasticsearchException("reroute after update settings failed", t)); + logger.debug("failed to perform [{}]", e, source); + listener.onFailure(new ElasticsearchException("reroute after update settings failed", e)); } @Override @@ -165,9 +165,9 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct } @Override - public void onFailure(String source, Throwable t) { - logger.debug("failed to perform [{}]", t, source); - super.onFailure(source, t); + public void onFailure(String source, Exception e) { + logger.debug("failed to perform [{}]", e, source); + super.onFailure(source, e); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java index 1b329d17289..ccb4d32465e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -33,16 +34,14 @@ import java.io.IOException; */ public class ClusterSearchShardsGroup implements Streamable, ToXContent { - private Index index; - private int shardId; + private ShardId shardId; ShardRouting[] shards; ClusterSearchShardsGroup() { } - public ClusterSearchShardsGroup(Index index, int shardId, ShardRouting[] shards) { - this.index = index; + public ClusterSearchShardsGroup(ShardId shardId, ShardRouting[] shards) { this.shardId = shardId; this.shards = shards; } @@ -54,11 +53,11 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent { } public String getIndex() { - return index.getName(); + return shardId.getIndexName(); } public int getShardId() { - return shardId; + return shardId.id(); } public ShardRouting[] getShards() { @@ -67,18 +66,16 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent { @Override public void readFrom(StreamInput in) throws IOException { - index = new Index(in); - shardId = in.readVInt(); + shardId = ShardId.readShardId(in); shards = new ShardRouting[in.readVInt()]; for (int i = 0; i < shards.length; i++) { - shards[i] = ShardRouting.readShardRoutingEntry(in, index, shardId); + shards[i] = new ShardRouting(shardId, in); } } @Override public void writeTo(StreamOutput out) throws IOException { - index.writeTo(out); - out.writeVInt(shardId); + shardId.writeTo(out); out.writeVInt(shards.length); for (ShardRouting shardRouting : shards) { shardRouting.writeToThin(out); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 8b26fd6c04f..2f9a6e7dede 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -78,8 +79,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()]; int currentGroup = 0; for (ShardIterator shardIt : groupShardsIterator) { - Index index = shardIt.shardId().getIndex(); - int shardId = shardIt.shardId().getId(); + ShardId shardId = shardIt.shardId(); ShardRouting[] shardRoutings = new ShardRouting[shardIt.size()]; int currentShard = 0; shardIt.reset(); @@ -87,7 +87,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA shardRoutings[currentShard++] = shard; nodeIds.add(shard.currentNodeId()); } - groupResponses[currentGroup++] = new ClusterSearchShardsGroup(index, shardId, shardRoutings); + groupResponses[currentGroup++] = new ClusterSearchShardsGroup(shardId, shardRoutings); } DiscoveryNode[] nodes = new DiscoveryNode[nodeIds.size()]; int currentNode = 0; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index 0be07c703f1..efc2fbeb5b5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -57,13 +57,13 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - snapshotInfo = SnapshotInfo.readOptionalSnapshotInfo(in); + snapshotInfo = in.readOptionalWriteable(SnapshotInfo::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalStreamable(snapshotInfo); + out.writeOptionalWriteable(snapshotInfo); } /** @@ -81,18 +81,13 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent return snapshotInfo.status(); } - static final class Fields { - static final String SNAPSHOT = "snapshot"; - static final String ACCEPTED = "accepted"; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (snapshotInfo != null) { - builder.field(Fields.SNAPSHOT); + builder.field("snapshot"); snapshotInfo.toXContent(builder, params); } else { - builder.field(Fields.ACCEPTED, true); + builder.field("accepted", true); } return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 2654ac0c269..269edfc401b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -26,10 +26,10 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; @@ -72,7 +72,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction listener) { SnapshotsService.SnapshotRequest snapshotRequest = - new SnapshotsService.SnapshotRequest("create_snapshot [" + request.snapshot() + "]", request.snapshot(), request.repository()) + new SnapshotsService.SnapshotRequest(request.repository(), request.snapshot(), "create_snapshot [" + request.snapshot() + "]") .indices(request.indices()) .indicesOptions(request.indicesOptions()) .partial(request.partial()) @@ -84,20 +84,20 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction listener) { - SnapshotId snapshotIds = new SnapshotId(request.repository(), request.snapshot()); - snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() { + snapshotsService.deleteSnapshot(request.repository(), request.snapshot(), new SnapshotsService.DeleteSnapshotListener() { @Override public void onResponse() { listener.onResponse(new DeleteSnapshotResponse(true)); } @Override - public void onFailure(Throwable t) { - listener.onFailure(t); + public void onFailure(Exception e) { + listener.onFailure(e); } }); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 65b0e4faa4a..924f5a90d42 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -42,7 +42,7 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent { } GetSnapshotsResponse(List snapshots) { - this.snapshots = snapshots; + this.snapshots = Collections.unmodifiableList(snapshots); } /** @@ -60,7 +60,7 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent { int size = in.readVInt(); List builder = new ArrayList<>(); for (int i = 0; i < size; i++) { - builder.add(SnapshotInfo.readSnapshotInfo(in)); + builder.add(new SnapshotInfo(in)); } snapshots = Collections.unmodifiableList(builder); } @@ -74,13 +74,9 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent { } } - static final class Fields { - static final String SNAPSHOTS = "snapshots"; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray(Fields.SNAPSHOTS); + builder.startArray("snapshots"); for (SnapshotInfo snapshotInfo : snapshots) { snapshotInfo.toXContent(builder, params); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 0198102a200..ad8cb1ae88e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -26,21 +26,22 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; -import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; +import java.util.Map; import java.util.Set; /** @@ -53,7 +54,8 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, + final ActionListener listener) { try { + final String repository = request.repository(); List snapshotInfoBuilder = new ArrayList<>(); if (isAllSnapshots(request.snapshots())) { - List snapshots = snapshotsService.snapshots(request.repository(), request.ignoreUnavailable()); - for (Snapshot snapshot : snapshots) { - snapshotInfoBuilder.add(new SnapshotInfo(snapshot)); - } + snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository)); + snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, + snapshotsService.snapshotIds(repository), + request.ignoreUnavailable())); } else if (isCurrentSnapshots(request.snapshots())) { - List snapshots = snapshotsService.currentSnapshots(request.repository()); - for (Snapshot snapshot : snapshots) { - snapshotInfoBuilder.add(new SnapshotInfo(snapshot)); - } + snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository)); } else { - Set snapshotsToGet = new LinkedHashSet<>(); // to keep insertion order - List snapshots = null; + final Map allSnapshotIds = new HashMap<>(); + for (SnapshotInfo snapshotInfo : snapshotsService.currentSnapshots(repository)) { + SnapshotId snapshotId = snapshotInfo.snapshotId(); + allSnapshotIds.put(snapshotId.getName(), snapshotId); + } + for (SnapshotId snapshotId : snapshotsService.snapshotIds(repository)) { + allSnapshotIds.put(snapshotId.getName(), snapshotId); + } + final Set toResolve = new LinkedHashSet<>(); // maintain order for (String snapshotOrPattern : request.snapshots()) { if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) { - snapshotsToGet.add(snapshotOrPattern); - } else { - if (snapshots == null) { // lazily load snapshots - snapshots = snapshotsService.snapshots(request.repository(), request.ignoreUnavailable()); + if (allSnapshotIds.containsKey(snapshotOrPattern)) { + toResolve.add(allSnapshotIds.get(snapshotOrPattern)); + } else if (request.ignoreUnavailable() == false) { + throw new SnapshotMissingException(repository, snapshotOrPattern); } - for (Snapshot snapshot : snapshots) { - if (Regex.simpleMatch(snapshotOrPattern, snapshot.name())) { - snapshotsToGet.add(snapshot.name()); + } else { + for (Map.Entry entry : allSnapshotIds.entrySet()) { + if (Regex.simpleMatch(snapshotOrPattern, entry.getKey())) { + toResolve.add(entry.getValue()); } } } } - for (String snapshot : snapshotsToGet) { - SnapshotId snapshotId = new SnapshotId(request.repository(), snapshot); - snapshotInfoBuilder.add(new SnapshotInfo(snapshotsService.snapshot(snapshotId))); + + if (toResolve.isEmpty() && request.ignoreUnavailable() == false) { + throw new SnapshotMissingException(repository, request.snapshots()[0]); } + + snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, new ArrayList<>(toResolve), request.ignoreUnavailable())); } - listener.onResponse(new GetSnapshotsResponse(Collections.unmodifiableList(snapshotInfoBuilder))); - } catch (Throwable t) { - listener.onFailure(t); + listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder)); + } catch (Exception e) { + listener.onFailure(e); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index bf4e4f148fc..1a41a776c73 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -57,7 +57,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest listener) { - RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest( - "restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot(), + protected void masterOperation(final RestoreSnapshotRequest request, final ClusterState state, final ActionListener listener) { + RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(request.repository(), request.snapshot(), request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(), request.settings(), request.masterNodeTimeout(), request.includeGlobalState(), request.partial(), request.includeAliases(), - request.indexSettings(), request.ignoreIndexSettings()); + request.indexSettings(), request.ignoreIndexSettings(), "restore_snapshot[" + request.snapshot() + "]"); restoreService.restoreSnapshot(restoreRequest, new ActionListener() { @Override public void onResponse(RestoreInfo restoreInfo) { if (restoreInfo == null && request.waitForCompletion()) { restoreService.addListener(new ActionListener() { - SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot()); - @Override public void onResponse(RestoreService.RestoreCompletionResponse restoreCompletionResponse) { - if (this.snapshotId.equals(restoreCompletionResponse.getSnapshotId())) { + final Snapshot snapshot = restoreCompletionResponse.getSnapshot(); + if (snapshot.getRepository().equals(request.repository()) && + snapshot.getSnapshotId().getName().equals(request.snapshot())) { listener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo())); restoreService.removeListener(this); } } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { listener.onFailure(e); } }); @@ -105,7 +104,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction, Map indexShards = new HashMap<>(); stats = new SnapshotStats(); for (SnapshotIndexShardStatus shard : shards) { - indexShards.put(shard.getShardId(), shard); + indexShards.put(shard.getShardId().getId(), shard); stats.add(shard.getStats()); } shardsStats = new SnapshotShardsStats(shards); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 54f87f3c5fc..1a5ef9ab933 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; import org.elasticsearch.cluster.SnapshotsInProgress.State; -import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -35,6 +35,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import static java.util.Collections.unmodifiableMap; @@ -44,7 +45,7 @@ import static java.util.Collections.unmodifiableMap; */ public class SnapshotStatus implements ToXContent, Streamable { - private SnapshotId snapshotId; + private Snapshot snapshot; private State state; @@ -56,11 +57,10 @@ public class SnapshotStatus implements ToXContent, Streamable { private SnapshotStats stats; - - SnapshotStatus(SnapshotId snapshotId, State state, List shards) { - this.snapshotId = snapshotId; - this.state = state; - this.shards = shards; + SnapshotStatus(final Snapshot snapshot, final State state, final List shards) { + this.snapshot = Objects.requireNonNull(snapshot); + this.state = Objects.requireNonNull(state); + this.shards = Objects.requireNonNull(shards); shardsStats = new SnapshotShardsStats(shards); updateShardStats(); } @@ -69,10 +69,10 @@ public class SnapshotStatus implements ToXContent, Streamable { } /** - * Returns snapshot id + * Returns snapshot */ - public SnapshotId getSnapshotId() { - return snapshotId; + public Snapshot getSnapshot() { + return snapshot; } /** @@ -124,7 +124,7 @@ public class SnapshotStatus implements ToXContent, Streamable { @Override public void readFrom(StreamInput in) throws IOException { - snapshotId = SnapshotId.readSnapshotId(in); + snapshot = new Snapshot(in); state = State.fromValue(in.readByte()); int size = in.readVInt(); List builder = new ArrayList<>(); @@ -137,7 +137,7 @@ public class SnapshotStatus implements ToXContent, Streamable { @Override public void writeTo(StreamOutput out) throws IOException { - snapshotId.writeTo(out); + snapshot.writeTo(out); out.writeByte(state.value()); out.writeVInt(shards.size()); for (SnapshotIndexShardStatus shard : shards) { @@ -170,7 +170,6 @@ public class SnapshotStatus implements ToXContent, Streamable { } } - /** * Returns number of files in the snapshot */ @@ -178,22 +177,22 @@ public class SnapshotStatus implements ToXContent, Streamable { return stats; } - static final class Fields { - static final String SNAPSHOT = "snapshot"; - static final String REPOSITORY = "repository"; - static final String STATE = "state"; - static final String INDICES = "indices"; - } + private static final String SNAPSHOT = "snapshot"; + private static final String REPOSITORY = "repository"; + private static final String UUID = "uuid"; + private static final String STATE = "state"; + private static final String INDICES = "indices"; @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(Fields.SNAPSHOT, snapshotId.getSnapshot()); - builder.field(Fields.REPOSITORY, snapshotId.getRepository()); - builder.field(Fields.STATE, state.name()); + builder.field(SNAPSHOT, snapshot.getSnapshotId().getName()); + builder.field(REPOSITORY, snapshot.getRepository()); + builder.field(UUID, snapshot.getSnapshotId().getUUID()); + builder.field(STATE, state.name()); shardsStats.toXContent(builder, params); stats.toXContent(builder, params); - builder.startObject(Fields.INDICES); + builder.startObject(INDICES); for (SnapshotIndexStatus indexStatus : getIndices().values()) { indexStatus.toXContent(builder, params); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index 34e503224ce..b9800a2d9ed 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -73,13 +73,9 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten } } - static final class Fields { - static final String SNAPSHOTS = "snapshots"; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(Fields.SNAPSHOTS); + builder.startArray("snapshots"); for (SnapshotStatus snapshot : snapshots) { snapshot.toXContent(builder, params); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index fb6310a45bf..71a709f0b5b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -29,7 +29,7 @@ import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -43,30 +43,32 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicReferenceArray; import static java.util.Collections.unmodifiableMap; /** * Transport client that collects snapshot shard statuses from data nodes */ -public class TransportNodesSnapshotsStatus extends TransportNodesAction { +public class TransportNodesSnapshotsStatus extends TransportNodesAction { public static final String ACTION_NAME = SnapshotsStatusAction.NAME + "[nodes]"; private final SnapshotShardsService snapshotShardsService; @Inject - public TransportNodesSnapshotsStatus(Settings settings, ClusterName clusterName, ThreadPool threadPool, + public TransportNodesSnapshotsStatus(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, SnapshotShardsService snapshotShardsService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - Request::new, NodeRequest::new, ThreadPool.Names.GENERIC); + super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, + Request::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeSnapshotStatus.class); this.snapshotShardsService = snapshotShardsService; } @@ -86,30 +88,17 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction nodesList = new ArrayList<>(); - final List failures = new ArrayList<>(); - for (int i = 0; i < responses.length(); i++) { - Object resp = responses.get(i); - if (resp instanceof NodeSnapshotStatus) { // will also filter out null response for unallocated ones - nodesList.add((NodeSnapshotStatus) resp); - } else if (resp instanceof FailedNodeException) { - failures.add((FailedNodeException) resp); - } else { - logger.warn("unknown response type [{}], expected NodeSnapshotStatus or FailedNodeException", resp); - } - } - return new NodesSnapshotStatus(clusterName, nodesList.toArray(new NodeSnapshotStatus[nodesList.size()]), - failures.toArray(new FailedNodeException[failures.size()])); + protected NodesSnapshotStatus newResponse(Request request, List responses, List failures) { + return new NodesSnapshotStatus(clusterService.getClusterName(), responses, failures); } @Override protected NodeSnapshotStatus nodeOperation(NodeRequest request) { - Map> snapshotMapBuilder = new HashMap<>(); + Map> snapshotMapBuilder = new HashMap<>(); try { String nodeId = clusterService.localNode().getId(); - for (SnapshotId snapshotId : request.snapshotIds) { - Map shardsStatus = snapshotShardsService.currentSnapshotShards(snapshotId); + for (Snapshot snapshot : request.snapshots) { + Map shardsStatus = snapshotShardsService.currentSnapshotShards(snapshot); if (shardsStatus == null) { continue; } @@ -125,7 +114,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction { - private SnapshotId[] snapshotIds; + private Snapshot[] snapshots; public Request() { } @@ -149,8 +138,8 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction { - private FailedNodeException[] failures; - - NodesSnapshotStatus() { - } - - public NodesSnapshotStatus(ClusterName clusterName, NodeSnapshotStatus[] nodes, FailedNodeException[] failures) { - super(clusterName, nodes); - this.failures = failures; + public NodesSnapshotStatus(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); } @Override - public FailedNodeException[] failures() { - return failures; + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readStreamableList(NodeSnapshotStatus::new); } @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - nodes = new NodeSnapshotStatus[in.readVInt()]; - for (int i = 0; i < nodes.length; i++) { - nodes[i] = new NodeSnapshotStatus(); - nodes[i].readFrom(in); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(nodes.length); - for (NodeSnapshotStatus response : nodes) { - response.writeTo(out); - } + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeStreamableList(nodes); } } public static class NodeRequest extends BaseNodeRequest { - private SnapshotId[] snapshotIds; + private List snapshots; public NodeRequest() { } NodeRequest(String nodeId, TransportNodesSnapshotsStatus.Request request) { super(nodeId); - snapshotIds = request.snapshotIds; + snapshots = Arrays.asList(request.snapshots); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - int n = in.readVInt(); - snapshotIds = new SnapshotId[n]; - for (int i = 0; i < n; i++) { - snapshotIds[i] = SnapshotId.readSnapshotId(in); - } + snapshots = in.readList(Snapshot::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (snapshotIds != null) { - out.writeVInt(snapshotIds.length); - for (int i = 0; i < snapshotIds.length; i++) { - snapshotIds[i].writeTo(out); - } - } else { - out.writeVInt(0); - } + out.writeList(snapshots); } } public static class NodeSnapshotStatus extends BaseNodeResponse { - private Map> status; + private Map> status; NodeSnapshotStatus() { } - public NodeSnapshotStatus(DiscoveryNode node, Map> status) { + public NodeSnapshotStatus(DiscoveryNode node, Map> status) { super(node); this.status = status; } - public Map> status() { + public Map> status() { return status; } @@ -261,9 +219,9 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction> snapshotMapBuilder = new HashMap<>(numberOfSnapshots); + Map> snapshotMapBuilder = new HashMap<>(numberOfSnapshots); for (int i = 0; i < numberOfSnapshots; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + Snapshot snapshot = new Snapshot(in); int numberOfShards = in.readVInt(); Map shardMapBuilder = new HashMap<>(numberOfShards); for (int j = 0; j < numberOfShards; j++) { @@ -271,7 +229,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction> entry : status.entrySet()) { + for (Map.Entry> entry : status.entrySet()) { entry.getKey().writeTo(out); out.writeVInt(entry.getValue().size()); for (Map.Entry shardEntry : entry.getValue().entrySet()) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index efa156eaa0c..76fe9510ef5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -29,26 +29,32 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotMissingException; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; /** */ @@ -87,8 +93,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction listener) throws Exception { - List currentSnapshots = snapshotsService.currentSnapshots(request.repository(), request.snapshots()); - + List currentSnapshots = + snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())); if (currentSnapshots.isEmpty()) { listener.onResponse(buildResponse(request, currentSnapshots, null)); return; @@ -105,27 +111,27 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction() { @Override public void onResponse(TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) { try { List currentSnapshots = - snapshotsService.currentSnapshots(request.repository(), request.snapshots()); + snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())); listener.onResponse(buildResponse(request, currentSnapshots, nodeSnapshotStatuses)); - } catch (Throwable e) { + } catch (Exception e) { listener.onFailure(e); } } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { listener.onFailure(e); } }); @@ -136,12 +142,12 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction currentSnapshots, + private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, List currentSnapshotEntries, TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) throws IOException { // First process snapshot that are currently processed List builder = new ArrayList<>(); - Set currentSnapshotIds = new HashSet<>(); - if (!currentSnapshots.isEmpty()) { + Set currentSnapshotNames = new HashSet<>(); + if (!currentSnapshotEntries.isEmpty()) { Map nodeSnapshotStatusMap; if (nodeSnapshotStatuses != null) { nodeSnapshotStatusMap = nodeSnapshotStatuses.getNodesMap(); @@ -149,8 +155,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction(); } - for (SnapshotsInProgress.Entry entry : currentSnapshots) { - currentSnapshotIds.add(entry.snapshotId()); + for (SnapshotsInProgress.Entry entry : currentSnapshotEntries) { + currentSnapshotNames.add(entry.snapshot().getSnapshotId().getName()); List shardStatusBuilder = new ArrayList<>(); for (ObjectObjectCursor shardEntry : entry.shards()) { SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.value; @@ -158,7 +164,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction shardStatues = nodeStatus.status().get(entry.snapshotId()); + Map shardStatues = nodeStatus.status().get(entry.snapshot()); if (shardStatues != null) { SnapshotIndexShardStatus shardStatus = shardStatues.get(shardEntry.key); if (shardStatus != null) { @@ -190,41 +196,49 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction 0) { - for (String snapshotName : request.snapshots()) { - SnapshotId snapshotId = new SnapshotId(request.repository(), snapshotName); - if (currentSnapshotIds.contains(snapshotId)) { - // This is a snapshot the is currently running - skipping - continue; + final String repositoryName = request.repository(); + if (Strings.hasText(repositoryName) && request.snapshots() != null && request.snapshots().length > 0) { + final Set requestedSnapshotNames = Sets.newHashSet(request.snapshots()); + final Map matchedSnapshotIds = snapshotsService.snapshotIds(repositoryName).stream() + .filter(s -> requestedSnapshotNames.contains(s.getName())) + .collect(Collectors.toMap(SnapshotId::getName, Function.identity())); + for (final String snapshotName : request.snapshots()) { + if (currentSnapshotNames.contains(snapshotName)) { + // we've already found this snapshot in the current snapshot entries, so skip over + continue; + } + SnapshotId snapshotId = matchedSnapshotIds.get(snapshotName); + if (snapshotId == null) { + // neither in the current snapshot entries nor found in the repository + throw new SnapshotMissingException(repositoryName, snapshotName); + } + SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId); + List shardStatusBuilder = new ArrayList<>(); + if (snapshotInfo.state().completed()) { + Map shardStatues = + snapshotsService.snapshotShards(request.repository(), snapshotInfo); + for (Map.Entry shardStatus : shardStatues.entrySet()) { + shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), shardStatus.getValue())); } - Snapshot snapshot = snapshotsService.snapshot(snapshotId); - List shardStatusBuilder = new ArrayList<>(); - if (snapshot.state().completed()) { - Map shardStatues = snapshotsService.snapshotShards(snapshotId); - for (Map.Entry shardStatus : shardStatues.entrySet()) { - shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), shardStatus.getValue())); - } - final SnapshotsInProgress.State state; - switch (snapshot.state()) { - case FAILED: - state = SnapshotsInProgress.State.FAILED; - break; - case SUCCESS: - case PARTIAL: - // Translating both PARTIAL and SUCCESS to SUCCESS for now - // TODO: add the differentiation on the metadata level in the next major release - state = SnapshotsInProgress.State.SUCCESS; - break; - default: - throw new IllegalArgumentException("Unknown snapshot state " + snapshot.state()); - } - builder.add(new SnapshotStatus(snapshotId, state, Collections.unmodifiableList(shardStatusBuilder))); + final SnapshotsInProgress.State state; + switch (snapshotInfo.state()) { + case FAILED: + state = SnapshotsInProgress.State.FAILED; + break; + case SUCCESS: + case PARTIAL: + // Translating both PARTIAL and SUCCESS to SUCCESS for now + // TODO: add the differentiation on the metadata level in the next major release + state = SnapshotsInProgress.State.SUCCESS; + break; + default: + throw new IllegalArgumentException("Unknown snapshot state " + snapshotInfo.state()); } + builder.add(new SnapshotStatus(new Snapshot(repositoryName, snapshotInfo.snapshotId()), state, Collections.unmodifiableList(shardStatusBuilder))); } } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index e9aa9b723fa..2a2f4707f69 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -54,7 +54,7 @@ public class ClusterStateResponse extends ActionResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - clusterName = ClusterName.readClusterName(in); + clusterName = new ClusterName(in); clusterState = ClusterState.Builder.readFrom(in, null); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 4bd826237c5..ea1c1507448 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -42,13 +41,11 @@ import org.elasticsearch.transport.TransportService; */ public class TransportClusterStateAction extends TransportMasterNodeReadAction { - private final ClusterName clusterName; @Inject public TransportClusterStateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - ClusterName clusterName, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ClusterStateAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterStateRequest::new); - this.clusterName = clusterName; + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, ClusterStateAction.NAME, false, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterStateRequest::new); } @Override @@ -127,7 +124,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction nodeResponses) { ObjectObjectHashMap countsPerIndex = new ObjectObjectHashMap<>(); this.docs = new DocsStats(); @@ -61,7 +54,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable { this.queryCache = new QueryCacheStats(); this.completion = new CompletionStats(); this.segments = new SegmentsStats(); - this.percolatorCache = new PercolatorQueryCacheStats(); for (ClusterStatsNodeResponse r : nodeResponses) { for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { @@ -84,7 +76,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache.add(shardCommonStats.queryCache); completion.add(shardCommonStats.completion); segments.add(shardCommonStats.segments); - percolatorCache.add(shardCommonStats.percolatorCache); } } @@ -127,42 +118,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable { return segments; } - public PercolatorQueryCacheStats getPercolatorCache() { - return percolatorCache; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - indexCount = in.readVInt(); - shards = ShardStats.readShardStats(in); - docs = DocsStats.readDocStats(in); - store = StoreStats.readStoreStats(in); - fieldData = FieldDataStats.readFieldDataStats(in); - queryCache = QueryCacheStats.readQueryCacheStats(in); - completion = CompletionStats.readCompletionStats(in); - segments = SegmentsStats.readSegmentsStats(in); - percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(indexCount); - shards.writeTo(out); - docs.writeTo(out); - store.writeTo(out); - fieldData.writeTo(out); - queryCache.writeTo(out); - completion.writeTo(out); - segments.writeTo(out); - percolatorCache.writeTo(out); - } - - public static ClusterStatsIndices readIndicesStats(StreamInput in) throws IOException { - ClusterStatsIndices indicesStats = new ClusterStatsIndices(); - indicesStats.readFrom(in); - return indicesStats; - } - static final class Fields { static final String COUNT = "count"; } @@ -177,11 +132,10 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache.toXContent(builder, params); completion.toXContent(builder, params); segments.toXContent(builder, params); - percolatorCache.toXContent(builder, params); return builder; } - public static class ShardStats implements ToXContent, Streamable { + public static class ShardStats implements ToXContent { int indices; int total; @@ -326,40 +280,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable { } } - public static ShardStats readShardStats(StreamInput in) throws IOException { - ShardStats c = new ShardStats(); - c.readFrom(in); - return c; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - indices = in.readVInt(); - total = in.readVInt(); - primaries = in.readVInt(); - minIndexShards = in.readVInt(); - maxIndexShards = in.readVInt(); - minIndexPrimaryShards = in.readVInt(); - maxIndexPrimaryShards = in.readVInt(); - minIndexReplication = in.readDouble(); - totalIndexReplication = in.readDouble(); - maxIndexReplication = in.readDouble(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(indices); - out.writeVInt(total); - out.writeVInt(primaries); - out.writeVInt(minIndexShards); - out.writeVInt(maxIndexShards); - out.writeVInt(minIndexPrimaryShards); - out.writeVInt(maxIndexPrimaryShards); - out.writeDouble(minIndexReplication); - out.writeDouble(totalIndexReplication); - out.writeDouble(maxIndexReplication); - } - static final class Fields { static final String SHARDS = "shards"; static final String TOTAL = "total"; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 05b7753ef3a..017b4481240 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -26,9 +26,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; @@ -48,7 +45,7 @@ import java.util.List; import java.util.Map; import java.util.Set; -public class ClusterStatsNodes implements ToXContent, Writeable { +public class ClusterStatsNodes implements ToXContent { private final Counts counts; private final Set versions; @@ -58,33 +55,12 @@ public class ClusterStatsNodes implements ToXContent, Writeable { private final FsInfo.Path fs; private final Set plugins; - ClusterStatsNodes(StreamInput in) throws IOException { - this.counts = new Counts(in); - - int size = in.readVInt(); - this.versions = new HashSet<>(size); - for (int i = 0; i < size; i++) { - this.versions.add(Version.readVersion(in)); - } - - this.os = new OsStats(in); - this.process = new ProcessStats(in); - this.jvm = new JvmStats(in); - this.fs = new FsInfo.Path(in); - - size = in.readVInt(); - this.plugins = new HashSet<>(size); - for (int i = 0; i < size; i++) { - this.plugins.add(PluginInfo.readFromStream(in)); - } - } - - ClusterStatsNodes(ClusterStatsNodeResponse[] nodeResponses) { + ClusterStatsNodes(List nodeResponses) { this.versions = new HashSet<>(); this.fs = new FsInfo.Path(); this.plugins = new HashSet<>(); - Set seenAddresses = new HashSet<>(nodeResponses.length); + Set seenAddresses = new HashSet<>(nodeResponses.size()); List nodeInfos = new ArrayList<>(); List nodeStats = new ArrayList<>(); for (ClusterStatsNodeResponse nodeResponse : nodeResponses) { @@ -140,21 +116,6 @@ public class ClusterStatsNodes implements ToXContent, Writeable { return plugins; } - @Override - public void writeTo(StreamOutput out) throws IOException { - counts.writeTo(out); - out.writeVInt(versions.size()); - for (Version v : versions) Version.writeVersion(v, out); - os.writeTo(out); - process.writeTo(out); - jvm.writeTo(out); - fs.writeTo(out); - out.writeVInt(plugins.size()); - for (PluginInfo p : plugins) { - p.writeTo(out); - } - } - static final class Fields { static final String COUNT = "count"; static final String VERSIONS = "versions"; @@ -200,18 +161,12 @@ public class ClusterStatsNodes implements ToXContent, Writeable { return builder; } - public static class Counts implements Writeable, ToXContent { + public static class Counts implements ToXContent { static final String COORDINATING_ONLY = "coordinating_only"; private final int total; private final Map roles; - @SuppressWarnings("unchecked") - private Counts(StreamInput in) throws IOException { - this.total = in.readVInt(); - this.roles = (Map)in.readGenericValue(); - } - private Counts(List nodeInfos) { this.roles = new HashMap<>(); for (DiscoveryNode.Role role : DiscoveryNode.Role.values()) { @@ -243,12 +198,6 @@ public class ClusterStatsNodes implements ToXContent, Writeable { return roles; } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(total); - out.writeGenericValue(roles); - } - static final class Fields { static final String TOTAL = "total"; } @@ -263,7 +212,7 @@ public class ClusterStatsNodes implements ToXContent, Writeable { } } - public static class OsStats implements ToXContent, Writeable { + public static class OsStats implements ToXContent { final int availableProcessors; final int allocatedProcessors; final ObjectIntHashMap names; @@ -287,30 +236,6 @@ public class ClusterStatsNodes implements ToXContent, Writeable { this.allocatedProcessors = allocatedProcessors; } - /** - * Read from a stream. - */ - private OsStats(StreamInput in) throws IOException { - this.availableProcessors = in.readVInt(); - this.allocatedProcessors = in.readVInt(); - int size = in.readVInt(); - this.names = new ObjectIntHashMap<>(); - for (int i = 0; i < size; i++) { - names.addTo(in.readString(), in.readVInt()); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(availableProcessors); - out.writeVInt(allocatedProcessors); - out.writeVInt(names.size()); - for (ObjectIntCursor name : names) { - out.writeString(name.key); - out.writeVInt(name.value); - } - } - public int getAvailableProcessors() { return availableProcessors; } @@ -343,7 +268,7 @@ public class ClusterStatsNodes implements ToXContent, Writeable { } } - public static class ProcessStats implements ToXContent, Writeable { + public static class ProcessStats implements ToXContent { final int count; final int cpuPercent; @@ -384,27 +309,6 @@ public class ClusterStatsNodes implements ToXContent, Writeable { this.maxOpenFileDescriptors = maxOpenFileDescriptors; } - /** - * Read from a stream. - */ - private ProcessStats(StreamInput in) throws IOException { - this.count = in.readVInt(); - this.cpuPercent = in.readVInt(); - this.totalOpenFileDescriptors = in.readVLong(); - this.minOpenFileDescriptors = in.readLong(); - this.maxOpenFileDescriptors = in.readLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(count); - out.writeVInt(cpuPercent); - out.writeVLong(totalOpenFileDescriptors); - out.writeLong(minOpenFileDescriptors); - out.writeLong(maxOpenFileDescriptors); - } - - /** * Cpu usage in percentages - 100 is 1 core. */ @@ -456,7 +360,7 @@ public class ClusterStatsNodes implements ToXContent, Writeable { } } - public static class JvmStats implements Writeable, ToXContent { + public static class JvmStats implements ToXContent { private final ObjectIntHashMap versions; private final long threads; @@ -497,34 +401,6 @@ public class ClusterStatsNodes implements ToXContent, Writeable { this.heapMax = heapMax; } - /** - * Read from a stream. - */ - private JvmStats(StreamInput in) throws IOException { - int size = in.readVInt(); - this.versions = new ObjectIntHashMap<>(size); - for (int i = 0; i < size; i++) { - this.versions.addTo(new JvmVersion(in), in.readVInt()); - } - this.threads = in.readVLong(); - this.maxUptime = in.readVLong(); - this.heapUsed = in.readVLong(); - this.heapMax = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(versions.size()); - for (ObjectIntCursor v : versions) { - v.key.writeTo(out); - out.writeVInt(v.value); - } - out.writeVLong(threads); - out.writeVLong(maxUptime); - out.writeVLong(heapUsed); - out.writeVLong(heapMax); - } - public ObjectIntHashMap getVersions() { return versions; } @@ -598,7 +474,7 @@ public class ClusterStatsNodes implements ToXContent, Writeable { } } - public static class JvmVersion implements Writeable { + public static class JvmVersion { String version; String vmName; String vmVersion; @@ -611,27 +487,6 @@ public class ClusterStatsNodes implements ToXContent, Writeable { vmVendor = jvmInfo.getVmVendor(); } - /** - * Read from a stream. - */ - JvmVersion(StreamInput in) throws IOException { - version = in.readString(); - vmName = in.readString(); - vmVersion = in.readString(); - vmVendor = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(version); - out.writeString(vmName); - out.writeString(vmVersion); - out.writeString(vmVendor); - } - - JvmVersion() { - } - @Override public boolean equals(Object o) { if (this == o) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index c272e6d6fbe..efc72d104f8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.cluster.stats; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -29,9 +30,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; -import java.util.Iterator; +import java.util.List; import java.util.Locale; -import java.util.Map; /** * @@ -48,8 +48,9 @@ public class ClusterStatsResponse extends BaseNodesResponse nodes, List failures) { + super(clusterName, nodes, failures); this.timestamp = timestamp; this.clusterUUID = clusterUUID; nodesStats = new ClusterStatsNodes(nodes); @@ -79,77 +80,53 @@ public class ClusterStatsResponse extends BaseNodesResponse getNodesMap() { - throw new UnsupportedOperationException(); - } - - @Override - public ClusterStatsNodeResponse getAt(int position) { - throw new UnsupportedOperationException(); - } - - @Override - public Iterator iterator() { - throw new UnsupportedOperationException(); - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); timestamp = in.readVLong(); - status = null; - if (in.readBoolean()) { - // it may be that the master switched on us while doing the operation. In this case the status may be null. - status = ClusterHealthStatus.fromValue(in.readByte()); - } clusterUUID = in.readString(); - nodesStats = new ClusterStatsNodes(in); - indicesStats = ClusterStatsIndices.readIndicesStats(in); + // it may be that the master switched on us while doing the operation. In this case the status may be null. + status = in.readOptionalWriteable(ClusterHealthStatus::readFrom); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVLong(timestamp); - if (status == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeByte(status.value()); - } out.writeString(clusterUUID); - nodesStats.writeTo(out); - indicesStats.writeTo(out); + out.writeOptionalWriteable(status); } - static final class Fields { - static final String NODES = "nodes"; - static final String INDICES = "indices"; - static final String UUID = "uuid"; - static final String CLUSTER_NAME = "cluster_name"; - static final String STATUS = "status"; + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + List nodes = in.readList(ClusterStatsNodeResponse::readNodeResponse); + + // built from nodes rather than from the stream directly + nodesStats = new ClusterStatsNodes(nodes); + indicesStats = new ClusterStatsIndices(nodes); + + return nodes; + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + // nodeStats and indicesStats are rebuilt from nodes + out.writeStreamableList(nodes); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("timestamp", getTimestamp()); - builder.field(Fields.CLUSTER_NAME, getClusterName().value()); if (params.paramAsBoolean("output_uuid", false)) { - builder.field(Fields.UUID, clusterUUID); + builder.field("uuid", clusterUUID); } if (status != null) { - builder.field(Fields.STATUS, status.name().toLowerCase(Locale.ROOT)); + builder.field("status", status.name().toLowerCase(Locale.ROOT)); } - builder.startObject(Fields.INDICES); + builder.startObject("indices"); indicesStats.toXContent(builder, params); builder.endObject(); - builder.startObject(Fields.NODES); + builder.startObject("nodes"); nodesStats.toXContent(builder, params); builder.endObject(); return builder; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 4a0eb33c0b5..bc894952370 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.cluster.stats; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.CommonStats; @@ -27,7 +28,6 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -46,7 +46,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicReferenceArray; /** * @@ -55,35 +54,29 @@ public class TransportClusterStatsAction extends TransportNodesAction { private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store, - CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments, - CommonStatsFlags.Flag.PercolatorCache); + CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments); private final NodeService nodeService; private final IndicesService indicesService; @Inject - public TransportClusterStatsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, + public TransportClusterStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, NodeService nodeService, IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ClusterStatsAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters, - indexNameExpressionResolver, ClusterStatsRequest::new, ClusterStatsNodeRequest::new, ThreadPool.Names.MANAGEMENT); + super(settings, ClusterStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, ClusterStatsRequest::new, ClusterStatsNodeRequest::new, ThreadPool.Names.MANAGEMENT, + ClusterStatsNodeResponse.class); this.nodeService = nodeService; this.indicesService = indicesService; } @Override - protected ClusterStatsResponse newResponse(ClusterStatsRequest clusterStatsRequest, AtomicReferenceArray responses) { - final List nodeStats = new ArrayList<>(responses.length()); - for (int i = 0; i < responses.length(); i++) { - Object resp = responses.get(i); - if (resp instanceof ClusterStatsNodeResponse) { - nodeStats.add((ClusterStatsNodeResponse) resp); - } - } - return new ClusterStatsResponse(System.currentTimeMillis(), clusterName, - clusterService.state().metaData().clusterUUID(), nodeStats.toArray(new ClusterStatsNodeResponse[nodeStats.size()])); + protected ClusterStatsResponse newResponse(ClusterStatsRequest request, + List responses, List failures) { + return new ClusterStatsResponse(System.currentTimeMillis(), clusterService.getClusterName(), + clusterService.state().metaData().clusterUUID(), responses, failures); } @Override @@ -98,14 +91,14 @@ public class TransportClusterStatsAction extends TransportNodesAction shardsStats = new ArrayList<>(); for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) { // only report on fully started shards - shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats())); + shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats())); } } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateRequest.java deleted file mode 100644 index 81ff9021815..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateRequest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.validate.template; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.script.Template; - -import java.io.IOException; - -public class RenderSearchTemplateRequest extends ActionRequest { - - private Template template; - - public void template(Template template) { - this.template = template; - } - - public Template template() { - return template; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException exception = null; - if (template == null) { - exception = new ActionRequestValidationException(); - exception.addValidationError("template must not be null"); - } - return exception; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - boolean hasTemplate = template!= null; - out.writeBoolean(hasTemplate); - if (hasTemplate) { - template.writeTo(out); - } - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - if (in.readBoolean()) { - template = new Template(in); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java deleted file mode 100644 index ad573c4a074..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.validate.template; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.Collections; - -public class TransportRenderSearchTemplateAction extends HandledTransportAction { - - private final ScriptService scriptService; - private final ClusterService clusterService; - - @Inject - public TransportRenderSearchTemplateAction(ScriptService scriptService, Settings settings, ThreadPool threadPool, - TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService) { - super(settings, RenderSearchTemplateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, RenderSearchTemplateRequest::new); - this.scriptService = scriptService; - this.clusterService = clusterService; - } - - @Override - protected void doExecute(final RenderSearchTemplateRequest request, final ActionListener listener) { - threadPool.generic().execute(new AbstractRunnable() { - - @Override - public void onFailure(Throwable t) { - listener.onFailure(t); - } - - @Override - protected void doRun() throws Exception { - ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, - Collections.emptyMap(), clusterService.state()); - BytesReference processedTemplate = (BytesReference) executable.run(); - RenderSearchTemplateResponse response = new RenderSearchTemplateResponse(); - response.source(processedTemplate); - listener.onResponse(response); - } - }); - } - -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java index 30cbd6d9ec7..42d34d6ebd7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesClusterStateUpdateRequest.java @@ -28,7 +28,7 @@ public class IndicesAliasesClusterStateUpdateRequest extends ClusterStateUpdateR AliasAction[] actions; - IndicesAliasesClusterStateUpdateRequest() { + public IndicesAliasesClusterStateUpdateRequest() { } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 218b84e68ae..3ae7d7ebb7c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -119,7 +119,7 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction blocks = new HashSet<>(); - CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) { + public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) { this.originalMessage = originalMessage; this.cause = cause; this.index = index; @@ -91,6 +93,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ return this; } + public CreateIndexClusterStateUpdateRequest shrinkFrom(Index shrinkFrom) { + this.shrinkFrom = shrinkFrom; + return this; + } + public TransportMessage originalMessage() { return originalMessage; } @@ -127,6 +134,10 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ return blocks; } + public Index shrinkFrom() { + return shrinkFrom; + } + /** True if all fields that span multiple types should be updated, false otherwise */ public boolean updateAllTypes() { return updateAllTypes; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 43d45672aec..b808484cef5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -364,7 +364,7 @@ public class CreateIndexRequest extends AcknowledgedRequest throw new ElasticsearchParseException("failed to parse source for create index", e); } } else { - settings(new String(source.toBytes(), StandardCharsets.UTF_8)); + settings(source.utf8ToString()); } return this; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index 485f0a53f69..b9282002349 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -30,10 +30,10 @@ import java.io.IOException; */ public class CreateIndexResponse extends AcknowledgedResponse { - CreateIndexResponse() { + protected CreateIndexResponse() { } - CreateIndexResponse(boolean acknowledged) { + protected CreateIndexResponse(boolean acknowledged) { super(acknowledged); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 7b47a46a236..444da8df082 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -87,7 +87,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction { + + DeleteIndexClusterStateUpdateRequest() { + + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java index 7c957eaebfc..f1d7d38f6ac 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -23,26 +23,22 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import java.io.IOException; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.common.unit.TimeValue.readTimeValue; /** * A request to delete an index. Best created with {@link org.elasticsearch.client.Requests#deleteIndexRequest(String)}. */ -public class DeleteIndexRequest extends MasterNodeRequest implements IndicesRequest.Replaceable { +public class DeleteIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private String[] indices; // Delete index should work by default on both open and closed indices. private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); - private TimeValue timeout = AcknowledgedRequest.DEFAULT_ACK_TIMEOUT; public DeleteIndexRequest() { } @@ -98,37 +94,11 @@ public class DeleteIndexRequest extends MasterNodeRequest im return indices; } - /** - * Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults - * to 10s. - */ - public TimeValue timeout() { - return timeout; - } - - /** - * Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults - * to 10s. - */ - public DeleteIndexRequest timeout(TimeValue timeout) { - this.timeout = timeout; - return this; - } - - /** - * Timeout to wait for the index deletion to be acknowledged by current cluster nodes. Defaults - * to 10s. - */ - public DeleteIndexRequest timeout(String timeout) { - return timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - timeout = readTimeValue(in); } @Override @@ -136,6 +106,5 @@ public class DeleteIndexRequest extends MasterNodeRequest im super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - timeout.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 489001d9b89..947936bddc7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -85,15 +86,21 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction() { @Override - public void onResponse(MetaDataDeleteIndexService.Response response) { - listener.onResponse(new DeleteIndexResponse(response.acknowledged())); + public void onResponse(ClusterStateUpdateResponse response) { + listener.onResponse(new DeleteIndexResponse(response.isAcknowledged())); } @Override - public void onFailure(Throwable t) { + public void onFailure(Exception t) { + logger.debug("failed to delete indices [{}]", t, concreteIndices); listener.onFailure(t); } }); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 8bb124d8fc4..a29918b438e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 7e750b97677..82fb6d70ca4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -19,14 +19,13 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; @@ -55,18 +54,19 @@ public class TransportShardFlushAction extends TransportReplicationAction shardOperationOnPrimary(ShardFlushRequest shardRequest) { + protected PrimaryResult shardOperationOnPrimary(ShardFlushRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.flush(shardRequest.getRequest()); logger.trace("{} flush request executed on primary", indexShard.shardId()); - return new Tuple<>(new ReplicationResponse(), shardRequest); + return new PrimaryResult(shardRequest, new ReplicationResponse()); } @Override - protected void shardOperationOnReplica(ShardFlushRequest request) { + protected ReplicaResult shardOperationOnReplica(ShardFlushRequest request) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); indexShard.flush(request.getRequest()); logger.trace("{} flush request executed on replica", indexShard.shardId()); + return new ReplicaResult(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index d9031807ae8..a69dd2ed437 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -77,7 +77,7 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction { +public class TransportShardRefreshAction + extends TransportReplicationAction { public static final String NAME = RefreshAction.NAME + "[s]"; @@ -47,8 +44,8 @@ public class TransportShardRefreshAction extends TransportReplicationAction shardOperationOnPrimary(BasicReplicationRequest shardRequest) { + protected PrimaryResult shardOperationOnPrimary(BasicReplicationRequest shardRequest) { IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id()); indexShard.refresh("api"); logger.trace("{} refresh request executed on primary", indexShard.shardId()); - return new Tuple<>(new ReplicationResponse(), shardRequest); + return new PrimaryResult(shardRequest, new ReplicationResponse()); } @Override - protected void shardOperationOnReplica(BasicReplicationRequest request) { + protected ReplicaResult shardOperationOnReplica(BasicReplicationRequest request) { final ShardId shardId = request.shardId(); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); indexShard.refresh("api"); logger.trace("{} refresh request executed on replica", indexShard.shardId()); + return new ReplicaResult(); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java new file mode 100644 index 00000000000..8d9b48f2000 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcherSupplier; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; + +import java.util.Set; + +/** + * Base class for rollover request conditions + */ +public abstract class Condition implements NamedWriteable { + + public static ObjectParser, ParseFieldMatcherSupplier> PARSER = + new ObjectParser<>("conditions", null); + static { + PARSER.declareString((conditions, s) -> + conditions.add(new MaxAgeCondition(TimeValue.parseTimeValue(s, MaxAgeCondition.NAME))), + new ParseField(MaxAgeCondition.NAME)); + PARSER.declareLong((conditions, value) -> + conditions.add(new MaxDocsCondition(value)), new ParseField(MaxDocsCondition.NAME)); + } + + protected T value; + protected final String name; + + protected Condition(String name) { + this.name = name; + } + + public abstract Result evaluate(final Stats stats); + + @Override + public final String toString() { + return "[" + name + ": " + value + "]"; + } + + /** + * Holder for index stats used to evaluate conditions + */ + public static class Stats { + public final long numDocs; + public final long indexCreated; + + public Stats(long numDocs, long indexCreated) { + this.numDocs = numDocs; + this.indexCreated = indexCreated; + } + } + + /** + * Holder for evaluated condition result + */ + public static class Result { + public final Condition condition; + public final boolean matched; + + protected Result(Condition condition, boolean matched) { + this.condition = condition; + this.matched = matched; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java new file mode 100644 index 00000000000..9208193c984 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxAgeCondition.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; + +import java.io.IOException; + +/** + * Condition for index maximum age. Evaluates to true + * when the index is at least {@link #value} old + */ +public class MaxAgeCondition extends Condition { + public static final String NAME = "max_age"; + + public MaxAgeCondition(TimeValue value) { + super(NAME); + this.value = value; + } + + public MaxAgeCondition(StreamInput in) throws IOException { + super(NAME); + this.value = TimeValue.timeValueMillis(in.readLong()); + } + + @Override + public Result evaluate(final Stats stats) { + long indexAge = System.currentTimeMillis() - stats.indexCreated; + return new Result(this, this.value.getMillis() <= indexAge); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(value.getMillis()); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java similarity index 55% rename from core/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java index dae4a37765d..7c1f802389e 100644 --- a/core/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxDocsCondition.java @@ -17,58 +17,42 @@ * under the License. */ -package org.elasticsearch.common.transport; +package org.elasticsearch.action.admin.indices.rollover; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; /** - * + * Condition for maximum index docs. Evaluates to true + * when the index has at least {@link #value} docs */ -public class DummyTransportAddress implements TransportAddress { +public class MaxDocsCondition extends Condition { + public static final String NAME = "max_docs"; - public static final DummyTransportAddress INSTANCE = new DummyTransportAddress(); + public MaxDocsCondition(Long value) { + super(NAME); + this.value = value; + } - private DummyTransportAddress() { + public MaxDocsCondition(StreamInput in) throws IOException { + super(NAME); + this.value = in.readLong(); } @Override - public short uniqueAddressTypeId() { - return 0; + public Result evaluate(final Stats stats) { + return new Result(this, this.value <= stats.numDocs); } @Override - public boolean sameHost(TransportAddress other) { - return other == INSTANCE; - } - - @Override - public boolean isLoopbackOrLinkLocalAddress() { - return false; - } - - @Override - public String getHost() { - return "dummy"; - } - - @Override - public String getAddress() { - return "0.0.0.0"; // see https://en.wikipedia.org/wiki/0.0.0.0 - } - - @Override - public int getPort() { - return 42; + public String getWriteableName() { + return NAME; } @Override public void writeTo(StreamOutput out) throws IOException { - } - - @Override - public String toString() { - return "_dummy_addr_"; + out.writeLong(value); } } diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java similarity index 55% rename from core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java index 8455a3bf562..dd9d50dac73 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverAction.java @@ -17,24 +17,29 @@ * under the License. */ -package org.elasticsearch.nodesinfo.plugin.dummy1; +package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; -public class TestPlugin extends Plugin { +/** + */ +public class RolloverAction extends Action { - static final public class Fields { - static public final String NAME = "test-plugin"; - static public final String DESCRIPTION = NAME + " description"; + public static final RolloverAction INSTANCE = new RolloverAction(); + public static final String NAME = "indices:admin/rollover"; + + private RolloverAction() { + super(NAME); } @Override - public String name() { - return Fields.NAME; + public RolloverResponse newResponse() { + return new RolloverResponse(); } @Override - public String description() { - return Fields.DESCRIPTION; + public RolloverRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RolloverRequestBuilder(client, this); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java new file mode 100644 index 00000000000..9e98418f184 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -0,0 +1,209 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParseFieldMatcherSupplier; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request class to swap index under an alias upon satisfying conditions + */ +public class RolloverRequest extends AcknowledgedRequest implements IndicesRequest { + + public static ObjectParser PARSER = + new ObjectParser<>("conditions", null); + static { + PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> + Condition.PARSER.parse(parser, request.conditions, parseFieldMatcherSupplier), + new ParseField("conditions"), ObjectParser.ValueType.OBJECT); + PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> + request.createIndexRequest.settings(parser.map()), + new ParseField("settings"), ObjectParser.ValueType.OBJECT); + PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> { + for (Map.Entry mappingsEntry : parser.map().entrySet()) { + request.createIndexRequest.mapping(mappingsEntry.getKey(), + (Map) mappingsEntry.getValue()); + } + }, new ParseField("mappings"), ObjectParser.ValueType.OBJECT); + PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> + request.createIndexRequest.aliases(parser.map()), + new ParseField("aliases"), ObjectParser.ValueType.OBJECT); + } + + private String alias; + private String newIndexName; + private boolean dryRun; + private Set conditions = new HashSet<>(2); + private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); + + RolloverRequest() {} + + public RolloverRequest(String alias, String newIndexName) { + this.alias = alias; + this.newIndexName = newIndexName; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = createIndexRequest == null ? null : createIndexRequest.validate(); + if (alias == null) { + validationException = addValidationError("index alias is missing", validationException); + } + if (createIndexRequest == null) { + validationException = addValidationError("create index request is missing", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + alias = in.readString(); + newIndexName = in.readOptionalString(); + dryRun = in.readBoolean(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + this.conditions.add(in.readNamedWriteable(Condition.class)); + } + createIndexRequest = new CreateIndexRequest(); + createIndexRequest.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(alias); + out.writeOptionalString(newIndexName); + out.writeBoolean(dryRun); + out.writeVInt(conditions.size()); + for (Condition condition : conditions) { + out.writeNamedWriteable(condition); + } + createIndexRequest.writeTo(out); + } + + @Override + public String[] indices() { + return new String[] {alias}; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + + /** + * Sets the alias to rollover to another index + */ + public void setAlias(String alias) { + this.alias = alias; + } + + /** + * Sets the alias to rollover to another index + */ + public void setNewIndexName(String newIndexName) { + this.newIndexName = newIndexName; + } + /** + * Sets if the rollover should not be executed when conditions are met + */ + public void dryRun(boolean dryRun) { + this.dryRun = dryRun; + } + + /** + * Adds condition to check if the index is at least age old + */ + public void addMaxIndexAgeCondition(TimeValue age) { + this.conditions.add(new MaxAgeCondition(age)); + } + + /** + * Adds condition to check if the index has at least numDocs + */ + public void addMaxIndexDocsCondition(long numDocs) { + this.conditions.add(new MaxDocsCondition(numDocs)); + } + + /** + * Sets rollover index creation request to override index settings when + * the rolled over index has to be created + */ + public void setCreateIndexRequest(CreateIndexRequest createIndexRequest) { + this.createIndexRequest = Objects.requireNonNull(createIndexRequest, "create index request must not be null");; + } + + boolean isDryRun() { + return dryRun; + } + + Set getConditions() { + return conditions; + } + + String getAlias() { + return alias; + } + + String getNewIndexName() { + return newIndexName; + } + + CreateIndexRequest getCreateIndexRequest() { + return createIndexRequest; + } + + public void source(BytesReference source) { + XContentType xContentType = XContentFactory.xContentType(source); + if (xContentType != null) { + try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(source)) { + PARSER.parse(parser, this, () -> ParseFieldMatcher.EMPTY); + } catch (IOException e) { + throw new ElasticsearchParseException("failed to parse source for rollover index", e); + } + } else { + throw new ElasticsearchParseException("failed to parse content type for rollover index source"); + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java new file mode 100644 index 00000000000..e9b4351fc5d --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; + + +public class RolloverRequestBuilder extends MasterNodeOperationRequestBuilder { + public RolloverRequestBuilder(ElasticsearchClient client, RolloverAction action) { + super(client, action, new RolloverRequest()); + } + + public RolloverRequestBuilder setAlias(String alias) { + this.request.setAlias(alias); + return this; + } + + public RolloverRequestBuilder setNewIndexName(String newIndexName) { + this.request.setNewIndexName(newIndexName); + return this; + } + + public RolloverRequestBuilder addMaxIndexAgeCondition(TimeValue age) { + this.request.addMaxIndexAgeCondition(age); + return this; + } + + public RolloverRequestBuilder addMaxIndexDocsCondition(long docs) { + this.request.addMaxIndexDocsCondition(docs); + return this; + } + + public RolloverRequestBuilder dryRun(boolean dryRun) { + this.request.dryRun(dryRun); + return this; + } + + public RolloverRequestBuilder settings(Settings settings) { + this.request.getCreateIndexRequest().settings(settings); + return this; + } + + public RolloverRequestBuilder alias(Alias alias) { + this.request.getCreateIndexRequest().alias(alias); + return this; + } + + public RolloverRequestBuilder mapping(String type, String source) { + this.request.getCreateIndexRequest().mapping(type, source); + return this; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java new file mode 100644 index 00000000000..0f254e825da --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public final class RolloverResponse extends ActionResponse implements ToXContent { + + private static final String NEW_INDEX = "new_index"; + private static final String OLD_INDEX = "old_index"; + private static final String DRY_RUN = "dry_run"; + private static final String ROLLED_OVER = "rolled_over"; + private static final String CONDITIONS = "conditions"; + + private String oldIndex; + private String newIndex; + private Set> conditionStatus; + private boolean dryRun; + private boolean rolledOver; + + RolloverResponse() { + } + + RolloverResponse(String oldIndex, String newIndex, Set conditionResults, + boolean dryRun, boolean rolledOver) { + this.oldIndex = oldIndex; + this.newIndex = newIndex; + this.dryRun = dryRun; + this.rolledOver = rolledOver; + this.conditionStatus = conditionResults.stream() + .map(result -> new AbstractMap.SimpleEntry<>(result.condition.toString(), result.matched)) + .collect(Collectors.toSet()); + } + + /** + * Returns the name of the index that the request alias was pointing to + */ + public String getOldIndex() { + return oldIndex; + } + + /** + * Returns the name of the index that the request alias currently points to + */ + public String getNewIndex() { + return newIndex; + } + + /** + * Returns the statuses of all the request conditions + */ + public Set> getConditionStatus() { + return conditionStatus; + } + + /** + * Returns if the rollover execution was skipped even when conditions were met + */ + public boolean isDryRun() { + return dryRun; + } + + /** + * Returns if the rollover was not simulated and the conditions were met + */ + public boolean isRolledOver() { + return rolledOver; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + oldIndex = in.readString(); + newIndex = in.readString(); + int conditionSize = in.readVInt(); + Set> conditions = new HashSet<>(conditionSize); + for (int i = 0; i < conditionSize; i++) { + String condition = in.readString(); + boolean satisfied = in.readBoolean(); + conditions.add(new AbstractMap.SimpleEntry<>(condition, satisfied)); + } + conditionStatus = conditions; + dryRun = in.readBoolean(); + rolledOver = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(oldIndex); + out.writeString(newIndex); + out.writeVInt(conditionStatus.size()); + for (Map.Entry entry : conditionStatus) { + out.writeString(entry.getKey()); + out.writeBoolean(entry.getValue()); + } + out.writeBoolean(dryRun); + out.writeBoolean(rolledOver); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(OLD_INDEX, oldIndex); + builder.field(NEW_INDEX, newIndex); + builder.field(ROLLED_OVER, rolledOver); + builder.field(DRY_RUN, dryRun); + builder.startObject(CONDITIONS); + for (Map.Entry entry : conditionStatus) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + return builder; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java new file mode 100644 index 00000000000..af81e5cf8c9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -0,0 +1,222 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.AliasAction; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Set; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * Main class to swap the index pointed to by an alias, given some conditions + */ +public class TransportRolloverAction extends TransportMasterNodeAction { + + private static final Pattern INDEX_NAME_PATTERN = Pattern.compile("^.*-(\\d)+$"); + private final MetaDataCreateIndexService createIndexService; + private final MetaDataIndexAliasesService indexAliasesService; + private final Client client; + + @Inject + public TransportRolloverAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, MetaDataCreateIndexService createIndexService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + MetaDataIndexAliasesService indexAliasesService, Client client) { + super(settings, RolloverAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + RolloverRequest::new); + this.createIndexService = createIndexService; + this.indexAliasesService = indexAliasesService; + this.client = client; + } + + @Override + protected String executor() { + // we go async right away + return ThreadPool.Names.SAME; + } + + @Override + protected RolloverResponse newResponse() { + return new RolloverResponse(); + } + + @Override + protected ClusterBlockException checkBlock(RolloverRequest request, ClusterState state) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, + request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed()); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, + indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices())); + } + + @Override + protected void masterOperation(final RolloverRequest rolloverRequest, final ClusterState state, + final ActionListener listener) { + final MetaData metaData = state.metaData(); + validate(metaData, rolloverRequest); + final AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(rolloverRequest.getAlias()); + final IndexMetaData indexMetaData = aliasOrIndex.getIndices().get(0); + final String sourceIndexName = indexMetaData.getIndex().getName(); + client.admin().indices().prepareStats(sourceIndexName).clear().setDocs(true).execute( + new ActionListener() { + @Override + public void onResponse(IndicesStatsResponse statsResponse) { + final Set conditionResults = evaluateConditions(rolloverRequest.getConditions(), + statsResponse.getTotal().getDocs(), metaData.index(sourceIndexName)); + final String rolloverIndexName = (rolloverRequest.getNewIndexName() != null) + ? rolloverRequest.getNewIndexName() + : generateRolloverIndexName(sourceIndexName); + if (rolloverRequest.isDryRun()) { + listener.onResponse( + new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false)); + return; + } + if (conditionResults.size() == 0 || conditionResults.stream().anyMatch(result -> result.matched)) { + createIndexService.createIndex(prepareCreateIndexRequest(rolloverIndexName, rolloverRequest), + new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse response) { + // switch the alias to point to the newly created index + indexAliasesService.indicesAliases( + prepareRolloverAliasesUpdateRequest(sourceIndexName, rolloverIndexName, + rolloverRequest), + new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + listener.onResponse( + new RolloverResponse(sourceIndexName, rolloverIndexName, + conditionResults, false, true)); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + @Override + public void onFailure(Exception t) { + listener.onFailure(t); + } + }); + } else { + // conditions not met + listener.onResponse( + new RolloverResponse(sourceIndexName, sourceIndexName, conditionResults, false, false) + ); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + } + ); + } + + static IndicesAliasesClusterStateUpdateRequest prepareRolloverAliasesUpdateRequest(String oldIndex, String newIndex, + RolloverRequest request) { + final IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest() + .ackTimeout(request.ackTimeout()) + .masterNodeTimeout(request.masterNodeTimeout()); + AliasAction[] actions = new AliasAction[2]; + actions[0] = new AliasAction(AliasAction.Type.ADD, newIndex, request.getAlias()); + actions[1] = new AliasAction(AliasAction.Type.REMOVE, oldIndex, request.getAlias()); + updateRequest.actions(actions); + return updateRequest; + } + + + static String generateRolloverIndexName(String sourceIndexName) { + if (INDEX_NAME_PATTERN.matcher(sourceIndexName).matches()) { + int numberIndex = sourceIndexName.lastIndexOf("-"); + assert numberIndex != -1 : "no separator '-' found"; + int counter = Integer.parseInt(sourceIndexName.substring(numberIndex + 1)); + return String.join("-", sourceIndexName.substring(0, numberIndex), String.valueOf(++counter)); + } else { + throw new IllegalArgumentException("index name [" + sourceIndexName + "] does not match pattern '^.*-(\\d)+$'"); + } + } + + static Set evaluateConditions(final Set conditions, + final DocsStats docsStats, final IndexMetaData metaData) { + final long numDocs = docsStats == null ? 0 : docsStats.getCount(); + final Condition.Stats stats = new Condition.Stats(numDocs, metaData.getCreationDate()); + return conditions.stream() + .map(condition -> condition.evaluate(stats)) + .collect(Collectors.toSet()); + } + + static void validate(MetaData metaData, RolloverRequest request) { + final AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(request.getAlias()); + if (aliasOrIndex == null) { + throw new IllegalArgumentException("source alias does not exist"); + } + if (aliasOrIndex.isAlias() == false) { + throw new IllegalArgumentException("source alias is a concrete index"); + } + if (aliasOrIndex.getIndices().size() != 1) { + throw new IllegalArgumentException("source alias maps to multiple indices"); + } + } + + static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final String targetIndexName, + final RolloverRequest rolloverRequest) { + + final CreateIndexRequest createIndexRequest = rolloverRequest.getCreateIndexRequest(); + createIndexRequest.cause("rollover_index"); + createIndexRequest.index(targetIndexName); + return new CreateIndexClusterStateUpdateRequest(createIndexRequest, + "rollover_index", targetIndexName, true) + .ackTimeout(createIndexRequest.timeout()) + .masterNodeTimeout(createIndexRequest.masterNodeTimeout()) + .settings(createIndexRequest.settings()) + .aliases(createIndexRequest.aliases()) + .mappings(createIndexRequest.mappings()); + } + +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java index 3e39cfd561f..1052740248d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java @@ -31,8 +31,6 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; -import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; - public class ShardSegments implements Streamable, Iterable { private ShardRouting shardRouting; @@ -88,7 +86,7 @@ public class ShardSegments implements Streamable, Iterable { @Override public void readFrom(StreamInput in) throws IOException { - shardRouting = readShardRoutingEntry(in); + shardRouting = new ShardRouting(in); int size = in.readVInt(); if (size == 0) { segments = Collections.emptyList(); @@ -108,4 +106,4 @@ public class ShardSegments implements Streamable, Iterable { segment.writeTo(out); } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index b20f636977a..5655400465f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -91,7 +91,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction responses, List failures) { fetchResponses.add(new Response(shardId, responses, failures)); if (expectedOps.countDown()) { finish(); @@ -220,10 +221,10 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc public class Response { private final ShardId shardId; - private final NodeGatewayStartedShards[] responses; - private final FailedNodeException[] failures; + private final List responses; + private final List failures; - public Response(ShardId shardId, NodeGatewayStartedShards[] responses, FailedNodeException[] failures) { + public Response(ShardId shardId, List responses, List failures) { this.shardId = shardId; this.responses = responses; this.failures = failures; diff --git a/core/src/main/java/org/elasticsearch/script/AbstractFloatSearchScript.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java similarity index 56% rename from core/src/main/java/org/elasticsearch/script/AbstractFloatSearchScript.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java index 21db0b05501..4c09241ad75 100644 --- a/core/src/main/java/org/elasticsearch/script/AbstractFloatSearchScript.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java @@ -17,29 +17,29 @@ * under the License. */ -package org.elasticsearch.script; +package org.elasticsearch.action.admin.indices.shrink; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; /** - * A simpler base class instead of {@link AbstractSearchScript} for computations - * that return a float number. */ -public abstract class AbstractFloatSearchScript extends AbstractSearchScript { +public class ShrinkAction extends Action { - @Override - public Object run() { - return runAsFloat(); + public static final ShrinkAction INSTANCE = new ShrinkAction(); + public static final String NAME = "indices:admin/shrink"; + + private ShrinkAction() { + super(NAME); } @Override - public abstract float runAsFloat(); - - @Override - public double runAsDouble() { - return runAsFloat(); + public ShrinkResponse newResponse() { + return new ShrinkResponse(); } @Override - public long runAsLong() { - return (long) runAsFloat(); + public ShrinkRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new ShrinkRequestBuilder(client, this); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java new file mode 100644 index 00000000000..791d35220e2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.indices.shrink; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParseFieldMatcherSupplier; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request class to shrink an index into a single shard + */ +public class ShrinkRequest extends AcknowledgedRequest implements IndicesRequest { + + public static ObjectParser PARSER = + new ObjectParser<>("shrink_request", null); + static { + PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> + request.getShrinkIndexRequest().settings(parser.map()), + new ParseField("settings"), ObjectParser.ValueType.OBJECT); + PARSER.declareField((parser, request, parseFieldMatcherSupplier) -> + request.getShrinkIndexRequest().aliases(parser.map()), + new ParseField("aliases"), ObjectParser.ValueType.OBJECT); + } + + private CreateIndexRequest shrinkIndexRequest; + private String sourceIndex; + + ShrinkRequest() {} + + public ShrinkRequest(String targetIndex, String sourceindex) { + this.shrinkIndexRequest = new CreateIndexRequest(targetIndex); + this.sourceIndex = sourceindex; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = shrinkIndexRequest == null ? null : shrinkIndexRequest.validate(); + if (sourceIndex == null) { + validationException = addValidationError("source index is missing", validationException); + } + if (shrinkIndexRequest == null) { + validationException = addValidationError("shrink index request is missing", validationException); + } + return validationException; + } + + public void setSourceIndex(String index) { + this.sourceIndex = index; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + shrinkIndexRequest = new CreateIndexRequest(); + shrinkIndexRequest.readFrom(in); + sourceIndex = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shrinkIndexRequest.writeTo(out); + out.writeString(sourceIndex); + } + + @Override + public String[] indices() { + return new String[] {sourceIndex}; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.lenientExpandOpen(); + } + + public void setShrinkIndex(CreateIndexRequest shrinkIndexRequest) { + this.shrinkIndexRequest = Objects.requireNonNull(shrinkIndexRequest, "shrink index request must not be null"); + } + + /** + * Returns the {@link CreateIndexRequest} for the shrink index + */ + public CreateIndexRequest getShrinkIndexRequest() { + return shrinkIndexRequest; + } + + /** + * Returns the source index name + */ + public String getSourceIndex() { + return sourceIndex; + } + + public void source(BytesReference source) { + XContentType xContentType = XContentFactory.xContentType(source); + if (xContentType != null) { + try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(source)) { + PARSER.parse(parser, this, () -> ParseFieldMatcher.EMPTY); + } catch (IOException e) { + throw new ElasticsearchParseException("failed to parse source for shrink index", e); + } + } else { + throw new ElasticsearchParseException("failed to parse content type for shrink index source"); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequestBuilder.java new file mode 100644 index 00000000000..ab392a7f824 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequestBuilder.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.indices.shrink; + +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.settings.Settings; + +public class ShrinkRequestBuilder extends AcknowledgedRequestBuilder { + public ShrinkRequestBuilder(ElasticsearchClient client, ShrinkAction action) { + super(client, action, new ShrinkRequest()); + } + + + public ShrinkRequestBuilder setTargetIndex(CreateIndexRequest request) { + this.request.setShrinkIndex(request); + return this; + } + + public ShrinkRequestBuilder setSourceIndex(String index) { + this.request.setSourceIndex(index); + return this; + } + + public ShrinkRequestBuilder setSettings(Settings settings) { + this.request.getShrinkIndexRequest().settings(settings); + return this; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java similarity index 73% rename from core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java index 359a582e169..4835471ae4c 100644 --- a/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java @@ -17,14 +17,15 @@ * under the License. */ -package org.elasticsearch.common.compress.deflate; +package org.elasticsearch.action.admin.indices.shrink; -import org.elasticsearch.common.compress.AbstractCompressedXContentTestCase; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -public class DeflateXContentTests extends AbstractCompressedXContentTestCase { - - public DeflateXContentTests() { - super(new DeflateCompressor()); +public final class ShrinkResponse extends CreateIndexResponse { + ShrinkResponse() { } + ShrinkResponse(boolean acknowledged) { + super(acknowledged); + } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java new file mode 100644 index 00000000000..57dc3e82e18 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.shrink; + +import org.apache.lucene.index.IndexWriter; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndexAlreadyExistsException; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Set; +import java.util.function.IntFunction; + +/** + * Main class to initiate shrinking an index into a new index with a single shard + */ +public class TransportShrinkAction extends TransportMasterNodeAction { + + private final MetaDataCreateIndexService createIndexService; + private final Client client; + + @Inject + public TransportShrinkAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, MetaDataCreateIndexService createIndexService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client) { + super(settings, ShrinkAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + ShrinkRequest::new); + this.createIndexService = createIndexService; + this.client = client; + } + + @Override + protected String executor() { + // we go async right away + return ThreadPool.Names.SAME; + } + + @Override + protected ShrinkResponse newResponse() { + return new ShrinkResponse(); + } + + @Override + protected ClusterBlockException checkBlock(ShrinkRequest request, ClusterState state) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getShrinkIndexRequest().index()); + } + + @Override + protected void masterOperation(final ShrinkRequest shrinkRequest, final ClusterState state, + final ActionListener listener) { + final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkRequest.getSourceIndex()); + client.admin().indices().prepareStats(sourceIndex).clear().setDocs(true).execute(new ActionListener() { + @Override + public void onResponse(IndicesStatsResponse indicesStatsResponse) { + CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(shrinkRequest, state, + (i) -> { + IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); + return shard == null ? null : shard.getPrimary().getDocs(); + }, indexNameExpressionResolver); + createIndexService.createIndex(updateRequest, new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse response) { + listener.onResponse(new ShrinkResponse(response.isAcknowledged())); + } + + @Override + public void onFailure(Exception t) { + if (t instanceof IndexAlreadyExistsException) { + logger.trace("[{}] failed to create shrink index", t, updateRequest.index()); + } else { + logger.debug("[{}] failed to create shrink index", t, updateRequest.index()); + } + listener.onFailure(t); + } + }); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + + } + + // static for unittesting this method + static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ShrinkRequest shrinkReqeust, final ClusterState state + , final IntFunction perShardDocStats, IndexNameExpressionResolver indexNameExpressionResolver) { + final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkReqeust.getSourceIndex()); + final CreateIndexRequest targetIndex = shrinkReqeust.getShrinkIndexRequest(); + final String targetIndexName = indexNameExpressionResolver.resolveDateMathExpression(targetIndex.index()); + final IndexMetaData metaData = state.metaData().index(sourceIndex); + final Settings targetIndexSettings = Settings.builder().put(targetIndex.settings()) + .normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); + int numShards = 1; + if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { + numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings); + } + for (int i = 0; i < numShards; i++) { + Set shardIds = IndexMetaData.selectShrinkShards(i, metaData, numShards); + long count = 0; + for (ShardId id : shardIds) { + DocsStats docsStats = perShardDocStats.apply(id.id()); + if (docsStats != null) { + count += docsStats.getCount(); + } + if (count > IndexWriter.MAX_DOCS) { + throw new IllegalStateException("Can't merge index with more than [" + IndexWriter.MAX_DOCS + + "] docs - too many documents in shards " + shardIds); + } + } + + } + targetIndex.cause("shrink_index"); + Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings); + settingsBuilder.put("index.number_of_shards", numShards); + targetIndex.settings(settingsBuilder); + + return new CreateIndexClusterStateUpdateRequest(targetIndex, + "shrink_index", targetIndexName, true) + // mappings are updated on the node when merging in the shards, this prevents race-conditions since all mapping must be + // applied once we took the snapshot and if somebody fucks things up and switches the index read/write and adds docs we miss + // the mappings for everything is corrupted and hard to debug + .ackTimeout(targetIndex.timeout()) + .masterNodeTimeout(targetIndex.masterNodeTimeout()) + .settings(targetIndex.settings()) + .aliases(targetIndex.aliases()) + .customs(targetIndex.customs()) + .shrinkFrom(metaData.getIndex()); + } + +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index b130e6b378f..14fc6c05e5c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -32,10 +32,8 @@ import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; -import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolatorQueryCacheStats; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; @@ -101,9 +99,6 @@ public class CommonStats implements Streamable, ToXContent { case Segments: segments = new SegmentsStats(); break; - case PercolatorCache: - percolatorCache = new PercolatorQueryCacheStats(); - break; case Translog: translog = new TranslogStats(); break; @@ -123,8 +118,7 @@ public class CommonStats implements Streamable, ToXContent { } - public CommonStats(IndicesQueryCache indicesQueryCache, PercolatorQueryCache percolatorQueryCache, - IndexShard indexShard, CommonStatsFlags flags) { + public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) { CommonStatsFlags.Flag[] setFlags = flags.getFlags(); @@ -169,9 +163,6 @@ public class CommonStats implements Streamable, ToXContent { case Segments: segments = indexShard.segmentStats(flags.includeSegmentFileSizes()); break; - case PercolatorCache: - percolatorCache = percolatorQueryCache.getStats(indexShard.shardId()); - break; case Translog: translog = indexShard.translogStats(); break; @@ -223,9 +214,6 @@ public class CommonStats implements Streamable, ToXContent { @Nullable public FieldDataStats fieldData; - @Nullable - public PercolatorQueryCacheStats percolatorCache; - @Nullable public CompletionStats completion; @@ -331,14 +319,6 @@ public class CommonStats implements Streamable, ToXContent { } else { fieldData.add(stats.getFieldData()); } - if (percolatorCache == null) { - if (stats.getPercolatorCache() != null) { - percolatorCache = new PercolatorQueryCacheStats(); - percolatorCache.add(stats.getPercolatorCache()); - } - } else { - percolatorCache.add(stats.getPercolatorCache()); - } if (completion == null) { if (stats.getCompletion() != null) { completion = new CompletionStats(); @@ -436,11 +416,6 @@ public class CommonStats implements Streamable, ToXContent { return this.fieldData; } - @Nullable - public PercolatorQueryCacheStats getPercolatorCache() { - return percolatorCache; - } - @Nullable public CompletionStats getCompletion() { return completion; @@ -528,9 +503,6 @@ public class CommonStats implements Streamable, ToXContent { if (in.readBoolean()) { fieldData = FieldDataStats.readFieldDataStats(in); } - if (in.readBoolean()) { - percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in); - } if (in.readBoolean()) { completion = CompletionStats.readCompletionStats(in); } @@ -610,12 +582,6 @@ public class CommonStats implements Streamable, ToXContent { out.writeBoolean(true); fieldData.writeTo(out); } - if (percolatorCache == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - percolatorCache.writeTo(out); - } if (completion == null) { out.writeBoolean(false); } else { @@ -669,9 +635,6 @@ public class CommonStats implements Streamable, ToXContent { if (fieldData != null) { fieldData.toXContent(builder, params); } - if (percolatorCache != null) { - percolatorCache.toXContent(builder, params); - } if (completion != null) { completion.toXContent(builder, params); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java index e2f250dd577..a9af50b2492 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java @@ -31,8 +31,8 @@ import java.util.EnumSet; */ public class CommonStatsFlags implements Streamable, Cloneable { - public final static CommonStatsFlags ALL = new CommonStatsFlags().all(); - public final static CommonStatsFlags NONE = new CommonStatsFlags().clear(); + public static final CommonStatsFlags ALL = new CommonStatsFlags().all(); + public static final CommonStatsFlags NONE = new CommonStatsFlags().clear(); private EnumSet flags = EnumSet.allOf(Flag.class); private String[] types = null; @@ -240,7 +240,6 @@ public class CommonStatsFlags implements Streamable, Cloneable { FieldData("fielddata"), Docs("docs"), Warmer("warmer"), - PercolatorCache("percolator_cache"), Completion("completion"), Segments("segments"), Translog("translog"), diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java index 4a2d137593e..2308c9bae6d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -184,15 +184,6 @@ public class IndicesStatsRequest extends BroadcastRequest { return flags.isSet(Flag.FieldData); } - public IndicesStatsRequest percolate(boolean percolate) { - flags.set(Flag.PercolatorCache, percolate); - return this; - } - - public boolean percolate() { - return flags.isSet(Flag.PercolatorCache); - } - public IndicesStatsRequest segments(boolean segments) { flags.set(Flag.Segments, segments); return this; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index cad919cbd18..8e7afe3e7e3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -127,11 +127,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder return this; } - public IndicesStatsRequestBuilder setPercolate(boolean percolate) { - request.percolate(percolate); - return this; - } - public IndicesStatsRequestBuilder setSegments(boolean segments) { request.segments(segments); return this; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index 50b3157bfab..6640defc1c0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -31,8 +31,6 @@ import org.elasticsearch.index.shard.ShardPath; import java.io.IOException; -import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; - /** */ public class ShardStats implements Streamable, ToXContent { @@ -91,7 +89,7 @@ public class ShardStats implements Streamable, ToXContent { @Override public void readFrom(StreamInput in) throws IOException { - shardRouting = readShardRoutingEntry(in); + shardRouting = new ShardRouting(in); commonStats = CommonStats.readCommonStats(in); commitStats = CommitStats.readOptionalCommitStatsFrom(in); statePath = in.readString(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 8c12dfa9fda..7e8ccd30a8a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -139,9 +139,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction< flags.set(CommonStatsFlags.Flag.FieldData); flags.fieldDataFields(request.fieldDataFields()); } - if (request.percolate()) { - flags.set(CommonStatsFlags.Flag.PercolatorCache); - } if (request.segments()) { flags.set(CommonStatsFlags.Flag.Segments); flags.includeSegmentFileSizes(request.includeSegmentFileSizes()); @@ -163,6 +160,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction< flags.set(CommonStatsFlags.Flag.Recovery); } - return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats()); + return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats()); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 0763f232711..33addcb8440 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -72,9 +72,9 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio } @Override - public void onFailure(Throwable t) { - logger.debug("failed to delete templates [{}]", t, request.name()); - listener.onFailure(t); + public void onFailure(Exception e) { + logger.debug("failed to delete templates [{}]", e, request.name()); + listener.onFailure(e); } }); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index b773efc2b55..0d14c4d24df 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -93,9 +93,9 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction

query; + private QueryBuilder query; private String[] types = Strings.EMPTY_ARRAY; private boolean explain; private boolean rewrite; @@ -57,7 +57,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { this.nowInMillis = request.nowInMillis; } - public QueryBuilder query() { + public QueryBuilder query() { return query; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 7c8d52b6a60..d1405e92e1c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; -import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -43,7 +42,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; @@ -73,8 +71,6 @@ public class TransportValidateQueryAction extends TransportBroadcastAction { - private QueryBuilder query = new MatchAllQueryBuilder(); + private QueryBuilder query = new MatchAllQueryBuilder(); private boolean explain; private boolean rewrite; @@ -73,11 +73,11 @@ public class ValidateQueryRequest extends BroadcastRequest /** * The query to validate. */ - public QueryBuilder query() { + public QueryBuilder query() { return query; } - public ValidateQueryRequest query(QueryBuilder query) { + public ValidateQueryRequest query(QueryBuilder query) { this.query = query; return this; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java index a0ccca0fb5c..bc8f8c347ab 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java @@ -89,6 +89,13 @@ public abstract class BackoffPolicy implements Iterable { return new ExponentialBackoff((int) checkDelay(initialDelay).millis(), maxNumberOfRetries); } + /** + * Wraps the backoff policy in one that calls a method every time a new backoff is taken from the policy. + */ + public static BackoffPolicy wrap(BackoffPolicy delegate, Runnable onBackoff) { + return new WrappedBackoffPolicy(delegate, onBackoff); + } + private static TimeValue checkDelay(TimeValue delay) { if (delay.millis() > Integer.MAX_VALUE) { throw new IllegalArgumentException("delay must be <= " + Integer.MAX_VALUE + " ms"); @@ -200,4 +207,43 @@ public abstract class BackoffPolicy implements Iterable { return delay; } } + + private static final class WrappedBackoffPolicy extends BackoffPolicy { + private final BackoffPolicy delegate; + private final Runnable onBackoff; + + public WrappedBackoffPolicy(BackoffPolicy delegate, Runnable onBackoff) { + this.delegate = delegate; + this.onBackoff = onBackoff; + } + + @Override + public Iterator iterator() { + return new WrappedBackoffIterator(delegate.iterator(), onBackoff); + } + } + + private static final class WrappedBackoffIterator implements Iterator { + private final Iterator delegate; + private final Runnable onBackoff; + + public WrappedBackoffIterator(Iterator delegate, Runnable onBackoff) { + this.delegate = delegate; + this.onBackoff = onBackoff; + } + + @Override + public boolean hasNext() { + return delegate.hasNext(); + } + + @Override + public TimeValue next() { + if (false == delegate.hasNext()) { + throw new NoSuchElementException(); + } + onBackoff.run(); + return delegate.next(); + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 512d2c505a9..ad45ace84c9 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -91,12 +91,12 @@ public class BulkItemResponse implements Streamable, StatusToXContent { private final Throwable cause; private final RestStatus status; - public Failure(String index, String type, String id, Throwable t) { + public Failure(String index, String type, String id, Throwable cause) { this.index = index; this.type = type; this.id = id; - this.cause = t; - this.status = ExceptionsHelper.status(t); + this.cause = cause; + this.status = ExceptionsHelper.status(cause); } /** @@ -106,7 +106,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { index = in.readString(); type = in.readString(); id = in.readOptionalString(); - cause = in.readThrowable(); + cause = in.readException(); status = ExceptionsHelper.status(cause); } @@ -115,7 +115,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent { out.writeString(getIndex()); out.writeString(getType()); out.writeOptionalString(getId()); - out.writeThrowable(getCause()); + out.writeException(getCause()); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index f008bf9a4e8..e0572344656 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -43,6 +44,7 @@ import org.elasticsearch.index.VersionType; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -53,16 +55,21 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; * Note that we only support refresh on the bulk request not per item. * @see org.elasticsearch.client.Client#bulk(BulkRequest) */ -public class BulkRequest extends ActionRequest implements CompositeIndicesRequest { +public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest { private static final int REQUEST_OVERHEAD = 50; + /** + * Requests that are part of this request. It is only possible to add things that are both {@link ActionRequest}s and + * {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare + * the one with the least casts. + */ final List> requests = new ArrayList<>(); List payloads = null; protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; - private boolean refresh = false; + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; private long sizeInBytes = 0; @@ -125,6 +132,7 @@ public class BulkRequest extends ActionRequest implements Composite } BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) { + Objects.requireNonNull(request, "'request' must not be null"); requests.add(request); addPayload(payload); // lack of source is validated in validate() method @@ -144,6 +152,7 @@ public class BulkRequest extends ActionRequest implements Composite } BulkRequest internalAdd(UpdateRequest request, @Nullable Object payload) { + Objects.requireNonNull(request, "'request' must not be null"); requests.add(request); addPayload(payload); if (request.doc() != null) { @@ -166,6 +175,7 @@ public class BulkRequest extends ActionRequest implements Composite } public BulkRequest add(DeleteRequest request, @Nullable Object payload) { + Objects.requireNonNull(request, "'request' must not be null"); requests.add(request); addPayload(payload); sizeInBytes += REQUEST_OVERHEAD; @@ -433,18 +443,15 @@ public class BulkRequest extends ActionRequest implements Composite return this.consistencyLevel; } - /** - * Should a refresh be executed post this bulk operation causing the operations to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public BulkRequest refresh(boolean refresh) { - this.refresh = refresh; + @Override + public BulkRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; return this; } - public boolean refresh() { - return this.refresh; + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; } /** @@ -479,7 +486,7 @@ public class BulkRequest extends ActionRequest implements Composite * @return Whether this bulk request contains index request with an ingest pipeline enabled. */ public boolean hasIndexRequestsWithPipelines() { - for (ActionRequest actionRequest : requests) { + for (ActionRequest actionRequest : requests) { if (actionRequest instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) actionRequest; if (Strings.hasText(indexRequest.getPipeline())) { @@ -499,10 +506,9 @@ public class BulkRequest extends ActionRequest implements Composite } for (ActionRequest request : requests) { // We first check if refresh has been set - if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) || - (request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) || - (request instanceof IndexRequest && ((IndexRequest)request).refresh())) { - validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException); + if (((WriteRequest) request).getRefreshPolicy() != RefreshPolicy.NONE) { + validationException = addValidationError( + "RefreshPolicy is not supported on an item request. Set it on the BulkRequest instead.", validationException); } ActionRequestValidationException ex = request.validate(); if (ex != null) { @@ -537,8 +543,8 @@ public class BulkRequest extends ActionRequest implements Composite requests.add(request); } } - refresh = in.readBoolean(); - timeout = TimeValue.readTimeValue(in); + refreshPolicy = RefreshPolicy.readFrom(in); + timeout = new TimeValue(in); } @Override @@ -556,7 +562,7 @@ public class BulkRequest extends ActionRequest implements Composite } request.writeTo(out); } - out.writeBoolean(refresh); + refreshPolicy.writeTo(out); timeout.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index 3744055d26c..4f2b7aa702e 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; @@ -35,7 +36,8 @@ import org.elasticsearch.common.unit.TimeValue; * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes * it in a single batch. */ -public class BulkRequestBuilder extends ActionRequestBuilder { +public class BulkRequestBuilder extends ActionRequestBuilder + implements WriteRequestBuilder { public BulkRequestBuilder(ElasticsearchClient client, BulkAction action) { super(client, action, new BulkRequest()); @@ -116,16 +118,6 @@ public class BulkRequestBuilder extends ActionRequestBuildertrue. Defaults - * to false. - */ - public BulkRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); - return this; - } - /** * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. */ diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index dc98a16c578..a829e4b0292 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -80,10 +80,10 @@ abstract class BulkRequestHandler { if (!afterCalled) { listener.afterBulk(executionId, bulkRequest, e); } - } catch (Throwable t) { - logger.warn("Failed to execute bulk request {}.", t, executionId); + } catch (Exception e) { + logger.warn("Failed to execute bulk request {}.", e, executionId); if (!afterCalled) { - listener.afterBulk(executionId, bulkRequest, t); + listener.afterBulk(executionId, bulkRequest, e); } } } @@ -131,7 +131,7 @@ abstract class BulkRequestHandler { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { try { listener.afterBulk(executionId, bulkRequest, e); } finally { @@ -144,9 +144,9 @@ abstract class BulkRequestHandler { Thread.currentThread().interrupt(); logger.info("Bulk request {} has been cancelled.", e, executionId); listener.afterBulk(executionId, bulkRequest, e); - } catch (Throwable t) { - logger.warn("Failed to execute bulk request {}.", t, executionId); - listener.afterBulk(executionId, bulkRequest, t); + } catch (Exception e) { + logger.warn("Failed to execute bulk request {}.", e, executionId); + listener.afterBulk(executionId, bulkRequest, e); } finally { if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore semaphore.release(); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index cd93735b642..e214f87ddb6 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -35,7 +35,7 @@ import java.util.Iterator; */ public class BulkResponse extends ActionResponse implements Iterable { - public final static long NO_INGEST_TOOK = -1L; + public static final long NO_INGEST_TOOK = -1L; private BulkItemResponse[] responses; private long tookInMillis; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 874789e8d61..2c16bcb5e9c 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -31,23 +31,17 @@ import java.util.List; /** * */ -public class BulkShardRequest extends ReplicationRequest { +public class BulkShardRequest extends ReplicatedWriteRequest { private BulkItemRequest[] items; - private boolean refresh; - public BulkShardRequest() { } - BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) { + BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) { super(shardId); this.items = items; - this.refresh = refresh; - } - - boolean refresh() { - return this.refresh; + setRefreshPolicy(refreshPolicy); } BulkItemRequest[] items() { @@ -77,7 +71,6 @@ public class BulkShardRequest extends ReplicationRequest { out.writeBoolean(false); } } - out.writeBoolean(refresh); } @Override @@ -89,7 +82,6 @@ public class BulkShardRequest extends ReplicationRequest { items[i] = BulkItemRequest.readBulkItem(in); } } - refresh = in.readBoolean(); } @Override @@ -97,8 +89,15 @@ public class BulkShardRequest extends ReplicationRequest { // This is included in error messages so we'll try to make it somewhat user friendly. StringBuilder b = new StringBuilder("BulkShardRequest to ["); b.append(index).append("] containing [").append(items.length).append("] requests"); - if (refresh) { + switch (getRefreshPolicy()) { + case IMMEDIATE: b.append(" and a refresh"); + break; + case WAIT_UNTIL: + b.append(" blocking until refresh"); + break; + case NONE: + break; } return b.toString(); } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java index 76c80a9b064..22260181bb1 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardResponse.java @@ -19,7 +19,9 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ReplicationResponse; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; @@ -29,7 +31,7 @@ import java.io.IOException; /** * */ -public class BulkShardResponse extends ReplicationResponse { +public class BulkShardResponse extends ReplicationResponse implements WriteResponse { private ShardId shardId; private BulkItemResponse[] responses; @@ -50,6 +52,20 @@ public class BulkShardResponse extends ReplicationResponse { return responses; } + @Override + public void setForcedRefresh(boolean forcedRefresh) { + /* + * Each DocWriteResponse already has a location for whether or not it forced a refresh so we just set that information on the + * response. + */ + for (BulkItemResponse response : responses) { + DocWriteResponse r = response.getResponse(); + if (r != null) { + r.setForcedRefresh(forcedRefresh); + } + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java index acaa784ac87..95778785ab9 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -130,7 +130,7 @@ public class Retry { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { try { listener.onFailure(e); } finally { @@ -163,8 +163,8 @@ public class Retry { } for (BulkItemResponse bulkItemResponse : bulkItemResponses) { if (bulkItemResponse.isFailed()) { - Throwable cause = bulkItemResponse.getFailure().getCause(); - Throwable rootCause = ExceptionsHelper.unwrapCause(cause); + final Throwable cause = bulkItemResponse.getFailure().getCause(); + final Throwable rootCause = ExceptionsHelper.unwrapCause(cause); if (!rootCause.getClass().equals(retryOnThrowable)) { return false; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 667e691f6c8..f23c91be201 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -150,14 +150,14 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, ActionRequest request, String index, Throwable e) { + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, ActionRequest request, String index, Exception e) { if (request instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request; if (index.equals(indexRequest.index())) { @@ -304,7 +305,7 @@ public class TransportBulkAction extends HandledTransportAction list = requestsByShard.get(shardId); if (list == null) { list = new ArrayList<>(); @@ -314,7 +315,7 @@ public class TransportBulkAction extends HandledTransportAction list = requestsByShard.get(shardId); if (list == null) { list = new ArrayList<>(); @@ -324,7 +325,7 @@ public class TransportBulkAction extends HandledTransportAction list = requestsByShard.get(shardId); if (list == null) { list = new ArrayList<>(); @@ -344,7 +345,8 @@ public class TransportBulkAction extends HandledTransportAction> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List requests = entry.getValue(); - BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()])); + BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, bulkRequest.getRefreshPolicy(), + requests.toArray(new BulkItemRequest[requests.size()])); bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel()); bulkShardRequest.timeout(bulkRequest.timeout()); if (task != null) { @@ -366,7 +368,7 @@ public class TransportBulkAction extends HandledTransportAction { +public class TransportShardBulkAction extends TransportWriteAction { - private final static String OP_TYPE_UPDATE = "update"; - private final static String OP_TYPE_DELETE = "delete"; + private static final String OP_TYPE_UPDATE = "update"; + private static final String OP_TYPE_DELETE = "delete"; public static final String ACTION_NAME = BulkAction.NAME + "[s]"; @@ -83,9 +85,8 @@ public class TransportShardBulkAction extends TransportReplicationAction shardOperationOnPrimary(BulkShardRequest request) { + protected WriteResult onPrimaryShard(BulkShardRequest request, IndexShard indexShard) throws Exception { ShardId shardId = request.shardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - final IndexShard indexShard = indexService.getShard(shardId.getId()); final IndexMetaData metaData = indexService.getIndexSettings().getIndexMetaData(); long[] preVersions = new long[request.items().length]; @@ -121,13 +121,13 @@ public class TransportShardBulkAction extends TransportReplicationAction(new BulkShardResponse(request.shardId(), responses), request); + BulkShardResponse response = new BulkShardResponse(request.shardId(), responses); + return new WriteResult<>(response, location); } private Translog.Location handleItem(IndexMetaData metaData, BulkShardRequest request, IndexShard indexShard, long[] preVersions, VersionType[] preVersionTypes, Translog.Location location, int requestIndex, BulkItemRequest item) { @@ -154,11 +154,11 @@ public class TransportShardBulkAction extends TransportReplicationAction result = shardIndexOperation(request, indexRequest, metaData, indexShard, true); - location = locationToSync(location, result.location); + location = locationToSync(location, result.getLocation()); // add the response - IndexResponse indexResponse = result.response(); + IndexResponse indexResponse = result.getResponse(); setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse)); - } catch (Throwable e) { + } catch (Exception e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it if (retryPrimaryException(e)) { // restore updated versions... @@ -181,11 +181,11 @@ public class TransportShardBulkAction extends TransportReplicationAction> void logFailure(Throwable e, String operation, ShardId shardId, ReplicationRequest request) { - if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { - logger.trace("{} failed to execute bulk item ({}) {}", e, shardId, operation, request); + private > void logFailure(Throwable t, String operation, ShardId shardId, ReplicationRequest request) { + if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { + logger.trace("{} failed to execute bulk item ({}) {}", t, shardId, operation, request); } else { - logger.debug("{} failed to execute bulk item ({}) {}", e, shardId, operation, request); + logger.debug("{} failed to execute bulk item ({}) {}", t, shardId, operation, request); } } @@ -197,10 +197,10 @@ public class TransportShardBulkAction extends TransportReplicationAction writeResult = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); - DeleteResponse deleteResponse = writeResult.response(); - location = locationToSync(location, writeResult.location); + DeleteResponse deleteResponse = writeResult.getResponse(); + location = locationToSync(location, writeResult.getLocation()); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse)); - } catch (Throwable e) { + } catch (Exception e) { // rethrow the failure if we are going to retry on primary and let parent failure to handle it if (retryPrimaryException(e)) { // restore updated versions... @@ -232,21 +232,22 @@ public class TransportShardBulkAction extends TransportReplicationAction result = updateResult.writeResult; IndexRequest indexRequest = updateResult.request(); BytesReference indexSourceAsBytes = indexRequest.source(); // add the response - IndexResponse indexResponse = result.response(); + IndexResponse indexResponse = result.getResponse(); UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated()); if (updateRequest.fields() != null && updateRequest.fields().length > 0) { Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); @@ -256,8 +257,9 @@ public class TransportShardBulkAction extends TransportReplicationAction writeResult = updateResult.writeResult; - DeleteResponse response = writeResult.response(); + DeleteResponse response = writeResult.getResponse(); DeleteRequest deleteRequest = updateResult.request(); updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); @@ -273,43 +275,43 @@ public class TransportShardBulkAction extends TransportReplicationAction= updateRequest.retryOnConflict()) { setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, - new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t))); + new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); } } else { // rethrow the failure if we are going to retry on primary and let parent failure to handle it - if (retryPrimaryException(t)) { + if (retryPrimaryException(e)) { // restore updated versions... for (int j = 0; j < requestIndex; j++) { applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]); } - throw (ElasticsearchException) t; + throw (ElasticsearchException) e; } // if its a conflict failure, and we already executed the request on a primary (and we execute it // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) // then just use the response we got from the successful execution - if (item.getPrimaryResponse() != null && isConflictException(t)) { + if (item.getPrimaryResponse() != null && isConflictException(e)) { setResponse(item, item.getPrimaryResponse()); } else if (updateResult.result == null) { - setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t))); + setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), e))); } else { switch (updateResult.result.operation()) { case UPSERT: case INDEX: IndexRequest indexRequest = updateResult.request(); - logFailure(t, "index", request.shardId(), indexRequest); + logFailure(e, "index", request.shardId(), indexRequest); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, - new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), t))); + new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e))); break; case DELETE: DeleteRequest deleteRequest = updateResult.request(); - logFailure(t, "delete", request.shardId(), deleteRequest); + logFailure(e, "delete", request.shardId(), deleteRequest); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, - new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), t))); + new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e))); break; } } @@ -326,11 +328,14 @@ public class TransportShardBulkAction extends TransportReplicationAction shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, IndexMetaData metaData, + IndexShard indexShard, boolean processed) throws Exception { MappingMetaData mappingMd = metaData.mappingOrDefault(indexRequest.type()); if (!processed) { @@ -401,26 +406,26 @@ public class TransportShardBulkAction extends TransportReplicationAction result = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); return new UpdateResult(translate, deleteRequest, result); - } catch (Throwable t) { - t = ExceptionsHelper.unwrapCause(t); + } catch (Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); boolean retry = false; - if (t instanceof VersionConflictEngineException) { + if (cause instanceof VersionConflictEngineException) { retry = true; } - return new UpdateResult(translate, deleteRequest, retry, t, null); + return new UpdateResult(translate, deleteRequest, retry, cause, null); } case NONE: UpdateResponse updateResponse = translate.action(); @@ -431,12 +436,8 @@ public class TransportShardBulkAction extends TransportReplicationAction implements DocumentRequest { +public class DeleteRequest extends ReplicatedWriteRequest implements DocumentRequest { private String type; private String id; @@ -51,7 +51,6 @@ public class DeleteRequest extends ReplicationRequest implements private String routing; @Nullable private String parent; - private boolean refresh; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; @@ -165,20 +164,6 @@ public class DeleteRequest extends ReplicationRequest implements return this.routing; } - /** - * Should a refresh be executed post this index operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public DeleteRequest refresh(boolean refresh) { - this.refresh = refresh; - return this; - } - - public boolean refresh() { - return this.refresh; - } - /** * Sets the version, which will cause the delete operation to only be performed if a matching * version exists and no changes happened on the doc since then. @@ -208,7 +193,6 @@ public class DeleteRequest extends ReplicationRequest implements id = in.readString(); routing = in.readOptionalString(); parent = in.readOptionalString(); - refresh = in.readBoolean(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); } @@ -220,7 +204,6 @@ public class DeleteRequest extends ReplicationRequest implements out.writeString(id); out.writeOptionalString(routing()); out.writeOptionalString(parent()); - out.writeBoolean(refresh); out.writeLong(version); out.writeByte(versionType.getValue()); } diff --git a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index 0ce907bac1d..b9b0f95f8de 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.delete; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; @@ -27,7 +28,8 @@ import org.elasticsearch.index.VersionType; /** * A delete document action request builder. */ -public class DeleteRequestBuilder extends ReplicationRequestBuilder { +public class DeleteRequestBuilder extends ReplicationRequestBuilder + implements WriteRequestBuilder { public DeleteRequestBuilder(ElasticsearchClient client, DeleteAction action) { super(client, action, new DeleteRequest()); @@ -71,16 +73,6 @@ public class DeleteRequestBuilder extends ReplicationRequestBuildertrue. Defaults - * to false. - */ - public DeleteRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); - return this; - } - /** * Sets the version, which will cause the delete operation to only be performed if a matching * version exists and no changes happened on the doc since then. diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 62d46766c47..4bf2bb47992 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -27,19 +27,19 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; -import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -49,7 +49,7 @@ import org.elasticsearch.transport.TransportService; /** * Performs the delete operation. */ -public class TransportDeleteAction extends TransportReplicationAction { +public class TransportDeleteAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final TransportCreateIndexAction createIndexAction; @@ -60,9 +60,8 @@ public class TransportDeleteAction extends TransportReplicationAction shardOperationOnPrimary(DeleteRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); - final WriteResult result = executeDeleteRequestOnPrimary(request, indexShard); - processAfterWrite(request.refresh(), indexShard, result.location); - return new Tuple<>(result.response, request); + protected WriteResult onPrimaryShard(DeleteRequest request, IndexShard indexShard) { + return executeDeleteRequestOnPrimary(request, indexShard); + } + + @Override + protected Location onReplicaShard(DeleteRequest request, IndexShard indexShard) { + return executeDeleteRequestOnReplica(request, indexShard).getTranslogLocation(); } public static WriteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { @@ -134,9 +135,8 @@ public class TransportDeleteAction extends TransportReplicationAction( - new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found()), - delete.getTranslogLocation()); + DeleteResponse response = new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.version(), delete.found()); + return new WriteResult<>(response, delete.getTranslogLocation()); } public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) { @@ -144,13 +144,4 @@ public class TransportDeleteAction extends TransportReplicationAction { private String id; private String routing; private String preference; - private QueryBuilder query; + private QueryBuilder query; private String[] fields; private FetchSourceContext fetchSourceContext; @@ -100,11 +100,11 @@ public class ExplainRequest extends SingleShardRequest { return this; } - public QueryBuilder query() { + public QueryBuilder query() { return query; } - public ExplainRequest query(QueryBuilder query) { + public ExplainRequest query(QueryBuilder query) { this.query = query; return this; } diff --git a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 284e31406e2..dc49f2f7ad6 100644 --- a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; -import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardIterator; @@ -65,7 +64,6 @@ public class TransportExplainAction extends TransportSingleShardAction implements Writeable, ToXContent { return this.type; } + public String getDisplayType() { + switch (type) { + case 0: + return "integer"; + case 1: + return "float"; + case 2: + return "date"; + case 3: + return "string"; + case 4: + return "ip"; + default: + throw new IllegalArgumentException("Unknown type."); + } + } + /** * @return the total number of documents. * @@ -220,23 +237,24 @@ public abstract class FieldStats implements Writeable, ToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(Fields.MAX_DOC, maxDoc); - builder.field(Fields.DOC_COUNT, docCount); - builder.field(Fields.DENSITY, getDensity()); - builder.field(Fields.SUM_DOC_FREQ, sumDocFreq); - builder.field(Fields.SUM_TOTAL_TERM_FREQ, sumTotalTermFreq); - builder.field(Fields.SEARCHABLE, isSearchable); - builder.field(Fields.AGGREGATABLE, isAggregatable); + builder.field(TYPE_FIELD, getDisplayType()); + builder.field(MAX_DOC_FIELD, maxDoc); + builder.field(DOC_COUNT_FIELD, docCount); + builder.field(DENSITY_FIELD, getDensity()); + builder.field(SUM_DOC_FREQ_FIELD, sumDocFreq); + builder.field(SUM_TOTAL_TERM_FREQ_FIELD, sumTotalTermFreq); + builder.field(SEARCHABLE_FIELD, isSearchable); + builder.field(AGGREGATABLE_FIELD, isAggregatable); toInnerXContent(builder); builder.endObject(); return builder; } protected void toInnerXContent(XContentBuilder builder) throws IOException { - builder.field(Fields.MIN_VALUE, getMinValue()); - builder.field(Fields.MIN_VALUE_AS_STRING, getMinValueAsString()); - builder.field(Fields.MAX_VALUE, getMaxValue()); - builder.field(Fields.MAX_VALUE_AS_STRING, getMaxValueAsString()); + builder.field(MIN_VALUE_FIELD, getMinValue()); + builder.field(MIN_VALUE_AS_STRING_FIELD, getMinValueAsString()); + builder.field(MAX_VALUE_FIELD, getMaxValue()); + builder.field(MAX_VALUE_AS_STRING_FIELD, getMaxValueAsString()); } @Override @@ -484,8 +502,8 @@ public abstract class FieldStats implements Writeable, ToXContent { @Override protected void toInnerXContent(XContentBuilder builder) throws IOException { - builder.field(Fields.MIN_VALUE, getMinValueAsString()); - builder.field(Fields.MAX_VALUE, getMaxValueAsString()); + builder.field(MIN_VALUE_FIELD, getMinValueAsString()); + builder.field(MAX_VALUE_FIELD, getMaxValueAsString()); } } @@ -598,34 +616,16 @@ public abstract class FieldStats implements Writeable, ToXContent { } } - public static String typeName(byte type) { - switch (type) { - case 0: - return "whole-number"; - case 1: - return "floating-point"; - case 2: - return "date"; - case 3: - return "text"; - case 4: - return "ip"; - default: - throw new IllegalArgumentException("Unknown type."); - } - } - - private final static class Fields { - final static String MAX_DOC = new String("max_doc"); - final static String DOC_COUNT = new String("doc_count"); - final static String DENSITY = new String("density"); - final static String SUM_DOC_FREQ = new String("sum_doc_freq"); - final static String SUM_TOTAL_TERM_FREQ = new String("sum_total_term_freq"); - final static String SEARCHABLE = new String("searchable"); - final static String AGGREGATABLE = new String("aggregatable"); - final static String MIN_VALUE = new String("min_value"); - final static String MIN_VALUE_AS_STRING = new String("min_value_as_string"); - final static String MAX_VALUE = new String("max_value"); - final static String MAX_VALUE_AS_STRING = new String("max_value_as_string"); - } + static final String TYPE_FIELD = new String("type"); + static final String MAX_DOC_FIELD = new String("max_doc"); + static final String DOC_COUNT_FIELD = new String("doc_count"); + static final String DENSITY_FIELD = new String("density"); + static final String SUM_DOC_FREQ_FIELD = new String("sum_doc_freq"); + static final String SUM_TOTAL_TERM_FREQ_FIELD = new String("sum_total_term_freq"); + static final String SEARCHABLE_FIELD = new String("searchable"); + static final String AGGREGATABLE_FIELD = new String("aggregatable"); + static final String MIN_VALUE_FIELD = new String("min_value"); + static final String MIN_VALUE_AS_STRING_FIELD = new String("min_value_as_string"); + static final String MAX_VALUE_FIELD = new String("max_value"); + static final String MAX_VALUE_AS_STRING_FIELD = new String("max_value_as_string"); } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java index a1e1110f622..310af30d25e 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java @@ -39,11 +39,12 @@ import java.util.List; */ public class FieldStatsRequest extends BroadcastRequest { - public final static String DEFAULT_LEVEL = "cluster"; + public static final String DEFAULT_LEVEL = "cluster"; private String[] fields = Strings.EMPTY_ARRAY; private String level = DEFAULT_LEVEL; private IndexConstraint[] indexConstraints = new IndexConstraint[0]; + private boolean useCache = true; public String[] getFields() { return fields; @@ -56,6 +57,14 @@ public class FieldStatsRequest extends BroadcastRequest { this.fields = fields; } + public void setUseCache(boolean useCache) { + this.useCache = useCache; + } + + public boolean shouldUseCache() { + return useCache; + } + public IndexConstraint[] getIndexConstraints() { return indexConstraints; } @@ -184,6 +193,7 @@ public class FieldStatsRequest extends BroadcastRequest { indexConstraints[i] = new IndexConstraint(in); } level = in.readString(); + useCache = in.readBoolean(); } @Override @@ -201,6 +211,7 @@ public class FieldStatsRequest extends BroadcastRequest { } } out.writeString(level); + out.writeBoolean(useCache); } } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java index c0c4d78de9b..1a3a8070e46 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java @@ -45,4 +45,9 @@ public class FieldStatsRequestBuilder extends request().level(level); return this; } + + public FieldStatsRequestBuilder setUseCache(boolean useCache) { + request().setUseCache(useCache); + return this; + } } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java index 6705bd0e0b5..85a0d469541 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java @@ -34,6 +34,7 @@ import java.util.Set; public class FieldStatsShardRequest extends BroadcastShardRequest { private String[] fields; + private boolean useCache; public FieldStatsShardRequest() { } @@ -46,22 +47,29 @@ public class FieldStatsShardRequest extends BroadcastShardRequest { fields.add(indexConstraint.getField()); } this.fields = fields.toArray(new String[fields.size()]); + useCache = request.shouldUseCache(); } public String[] getFields() { return fields; } + public boolean shouldUseCache() { + return useCache; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); fields = in.readStringArray(); + useCache = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArrayNullable(fields); + out.writeBoolean(useCache); } } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java index a3043d3ae35..7cc298729f0 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java @@ -46,7 +46,6 @@ public class FieldStatsShardResponse extends BroadcastShardResponse { return fieldStats; } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java b/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsAction.java similarity index 88% rename from core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java rename to core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsAction.java index fbff98bbf4c..e65f6951432 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsAction.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; @@ -45,27 +44,23 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.io.IOException; - -import java.util.Map; -import java.util.HashMap; -import java.util.List; import java.util.ArrayList; -import java.util.Iterator; -import java.util.Set; -import java.util.HashSet; import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicReferenceArray; -public class TransportFieldStatsTransportAction extends +public class TransportFieldStatsAction extends TransportBroadcastAction { private final IndicesService indicesService; @Inject - public TransportFieldStatsTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + public TransportFieldStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService) { @@ -117,11 +112,13 @@ public class TransportFieldStatsTransportAction extends if (existing != null) { if (existing.getType() != entry.getValue().getType()) { if (conflicts.containsKey(entry.getKey()) == false) { + FieldStats[] fields = new FieldStats[] {entry.getValue(), existing}; + Arrays.sort(fields, (o1, o2) -> Byte.compare(o1.getType(), o2.getType())); conflicts.put(entry.getKey(), "Field [" + entry.getKey() + "] of type [" + - FieldStats.typeName(entry.getValue().getType()) + + fields[0].getDisplayType() + "] conflicts with existing field of type [" + - FieldStats.typeName(existing.getType()) + + fields[1].getDisplayType() + "] in other index."); } } else { @@ -190,29 +187,20 @@ public class TransportFieldStatsTransportAction extends ShardId shardId = request.shardId(); Map> fieldStats = new HashMap<>(); IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex()); - MapperService mapperService = indexServices.mapperService(); IndexShard shard = indexServices.getShard(shardId.id()); try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) { + // Resolve patterns and deduplicate + Set fieldNames = new HashSet<>(); for (String field : request.getFields()) { - Collection matchFields; - if (Regex.isSimpleMatchPattern(field)) { - matchFields = mapperService.simpleMatchToIndexNames(field); - } else { - matchFields = Collections.singleton(field); - } - for (String matchField : matchFields) { - MappedFieldType fieldType = mapperService.fullName(matchField); - if (fieldType == null) { - // ignore. - continue; - } - FieldStats stats = fieldType.stats(searcher.reader()); - if (stats != null) { - fieldStats.put(matchField, stats); - } + fieldNames.addAll(shard.mapperService().simpleMatchToIndexNames(field)); + } + for (String field : fieldNames) { + FieldStats stats = indicesService.getFieldStats(shard, searcher, field, request.shouldUseCache()); + if (stats != null) { + fieldStats.put(field, stats); } } - } catch (IOException e) { + } catch (Exception e) { throw ExceptionsHelper.convertToElastic(e); } return new FieldStatsShardResponse(shardId, fieldStats); diff --git a/core/src/main/java/org/elasticsearch/action/get/GetResponse.java b/core/src/main/java/org/elasticsearch/action/get/GetResponse.java index c3042470daa..5741984d35f 100644 --- a/core/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/core/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -168,4 +169,9 @@ public class GetResponse extends ActionResponse implements Iterable, T super.writeTo(out); getResult.writeTo(out); } + + @Override + public String toString() { + return Strings.toString(this, true); + } } diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index 891f1cf6597..e1fe435fd10 100644 --- a/core/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -40,17 +40,17 @@ public class MultiGetResponse extends ActionResponse implements Iterable implements DocumentRequest { +public class IndexRequest extends ReplicatedWriteRequest implements DocumentRequest { /** * Operation type controls if the type of the index operation. @@ -145,7 +145,6 @@ public class IndexRequest extends ReplicationRequest implements Do private OpType opType = OpType.INDEX; - private boolean refresh = false; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; @@ -542,20 +541,6 @@ public class IndexRequest extends ReplicationRequest implements Do return this.opType; } - /** - * Should a refresh be executed post this index operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public IndexRequest refresh(boolean refresh) { - this.refresh = refresh; - return this; - } - - public boolean refresh() { - return this.refresh; - } - /** * Sets the version, which will cause the index operation to only be performed if a matching * version exists and no changes happened on the doc since then. @@ -648,11 +633,9 @@ public class IndexRequest extends ReplicationRequest implements Do routing = in.readOptionalString(); parent = in.readOptionalString(); timestamp = in.readOptionalString(); - ttl = in.readBoolean() ? TimeValue.readTimeValue(in) : null; + ttl = in.readOptionalWriteable(TimeValue::new); source = in.readBytesReference(); - opType = OpType.fromId(in.readByte()); - refresh = in.readBoolean(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); @@ -666,15 +649,9 @@ public class IndexRequest extends ReplicationRequest implements Do out.writeOptionalString(routing); out.writeOptionalString(parent); out.writeOptionalString(timestamp); - if (ttl == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - ttl.writeTo(out); - } + out.writeOptionalWriteable(ttl); out.writeBytesReference(source); out.writeByte(opType.id()); - out.writeBoolean(refresh); out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 4116755e4eb..20587bf0ea9 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.index; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.replication.ReplicationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; @@ -33,7 +34,8 @@ import java.util.Map; /** * An index document action request builder. */ -public class IndexRequestBuilder extends ReplicationRequestBuilder { +public class IndexRequestBuilder extends ReplicationRequestBuilder + implements WriteRequestBuilder { public IndexRequestBuilder(ElasticsearchClient client, IndexAction action) { super(client, action, new IndexRequest()); @@ -220,16 +222,6 @@ public class IndexRequestBuilder extends ReplicationRequestBuildertrue. Defaults - * to false. - */ - public IndexRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); - return this; - } - /** * Sets the version, which will cause the index operation to only be performed if a matching * version exists and no changes happened on the doc since then. diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 3915e23c2ed..fb42ef236d8 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.ReplicationOperation; -import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.action.support.replication.TransportWriteAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -36,16 +36,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.Translog.Location; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; @@ -62,7 +60,7 @@ import org.elasticsearch.transport.TransportService; *
  • allowIdGeneration: If the id is set not, should it be generated. Defaults to true. * */ -public class TransportIndexAction extends TransportReplicationAction { +public class TransportIndexAction extends TransportWriteAction { private final AutoCreateIndex autoCreateIndex; private final boolean allowIdGeneration; @@ -78,7 +76,7 @@ public class TransportIndexAction extends TransportReplicationAction shardOperationOnPrimary(IndexRequest request) throws Exception { - - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - - final WriteResult result = executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); - - final IndexResponse response = result.response; - final Translog.Location location = result.location; - processAfterWrite(request.refresh(), indexShard, location); - return new Tuple<>(response, request); + protected WriteResult onPrimaryShard(IndexRequest request, IndexShard indexShard) throws Exception { + return executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); } @Override - protected void shardOperationOnReplica(IndexRequest request) { - final ShardId shardId = request.shardId(); - IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - IndexShard indexShard = indexService.getShard(shardId.id()); - final Engine.Index operation = executeIndexRequestOnReplica(request, indexShard); - processAfterWrite(request.refresh(), indexShard, operation.getTranslogLocation()); + protected Location onReplicaShard(IndexRequest request, IndexShard indexShard) { + return executeIndexRequestOnReplica(request, indexShard).getTranslogLocation(); } /** @@ -169,7 +155,7 @@ public class TransportIndexAction extends TransportReplicationAction executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception { + public static WriteResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, + MappingUpdatedAction mappingUpdatedAction) throws Exception { Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); final ShardId shardId = indexShard.shardId(); @@ -214,8 +197,8 @@ public class TransportIndexAction extends TransportReplicationAction(new IndexResponse(shardId, request.type(), request.id(), request.version(), created), operation.getTranslogLocation()); + IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), created); + return new WriteResult<>(response, operation.getTranslogLocation()); } - } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java index 1eb9337c814..87d45f6ccd1 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestActionFilter.java @@ -104,13 +104,13 @@ public final class IngestActionFilter extends AbstractComponent implements Actio void processBulkIndexRequest(Task task, BulkRequest original, String action, ActionFilterChain chain, ActionListener listener) { long ingestStartTimeInNanos = System.nanoTime(); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); - executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, throwable) -> { - logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", throwable, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()); - bulkRequestModifier.markCurrentItemAsFailed(throwable); - }, (throwable) -> { - if (throwable != null) { - logger.error("failed to execute pipeline for a bulk request", throwable); - listener.onFailure(throwable); + executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { + logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", exception, indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()); + bulkRequestModifier.markCurrentItemAsFailed(exception); + }, (exception) -> { + if (exception != null) { + logger.error("failed to execute pipeline for a bulk request", exception); + listener.onFailure(exception); } else { long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); @@ -132,7 +132,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio return Integer.MAX_VALUE; } - final static class BulkRequestModifier implements Iterator> { + static final class BulkRequestModifier implements Iterator> { final BulkRequest bulkRequest; final Set failedSlots; @@ -162,7 +162,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio return bulkRequest; } else { BulkRequest modifiedBulkRequest = new BulkRequest(); - modifiedBulkRequest.refresh(bulkRequest.refresh()); + modifiedBulkRequest.setRefreshPolicy(bulkRequest.getRefreshPolicy()); modifiedBulkRequest.consistencyLevel(bulkRequest.consistencyLevel()); modifiedBulkRequest.timeout(bulkRequest.timeout()); @@ -188,7 +188,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { actionListener.onFailure(e); } }; @@ -197,7 +197,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } } - void markCurrentItemAsFailed(Throwable e) { + void markCurrentItemAsFailed(Exception e) { IndexRequest indexRequest = (IndexRequest) bulkRequest.requests().get(currentSlot); // We hit a error during preprocessing a request, so we: // 1) Remember the request item slot from the bulk, so that we're done processing all requests we know what failed @@ -210,7 +210,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } - final static class IngestBulkResponseListener implements ActionListener { + static final class IngestBulkResponseListener implements ActionListener { private final long ingestTookInMillis; private final int[] originalSlots; @@ -233,7 +233,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { actionListener.onFailure(e); } } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index e1a34413e2c..82cd8d8eb7b 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -35,7 +35,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.ingest.PipelineStore; -import org.elasticsearch.ingest.core.IngestInfo; +import org.elasticsearch.ingest.IngestInfo; import org.elasticsearch.node.service.NodeService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -80,7 +80,7 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction ingestInfos = new HashMap<>(); - for (NodeInfo nodeInfo : nodeInfos) { + for (NodeInfo nodeInfo : nodeInfos.getNodes()) { ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); } pipelineStore.put(clusterService, ingestInfos, request, listener); @@ -90,7 +90,7 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction { @@ -132,13 +132,16 @@ public class SimulatePipelineRequest extends ActionRequest ingestDocumentList = parseDocs(config); return new Parsed(pipeline, ingestDocumentList, verbose); } static Parsed parse(Map config, boolean verbose, PipelineStore pipelineStore) throws Exception { Map pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE); - Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorRegistry()); + Pipeline pipeline = PIPELINE_FACTORY.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories()); List ingestDocumentList = parseDocs(config); return new Parsed(pipeline, ingestDocumentList, verbose); } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java index ba7ef195aad..c2703f0224d 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java @@ -24,8 +24,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.ingest.core.AbstractProcessorFactory; -import org.elasticsearch.ingest.core.IngestDocument; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; import java.io.IOException; @@ -52,7 +52,7 @@ public class SimulateProcessorResult implements Writeable, ToXContent { public SimulateProcessorResult(StreamInput in) throws IOException { this.processorTag = in.readString(); if (in.readBoolean()) { - this.failure = in.readThrowable(); + this.failure = in.readException(); this.ingestDocument = null; } else { this.ingestDocument = new WriteableIngestDocument(in); @@ -68,7 +68,7 @@ public class SimulateProcessorResult implements Writeable, ToXContent { ingestDocument.writeTo(out); } else { out.writeBoolean(true); - out.writeThrowable(failure); + out.writeException(failure); } } @@ -91,12 +91,12 @@ public class SimulateProcessorResult implements Writeable, ToXContent { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); if (processorTag != null) { - builder.field(AbstractProcessorFactory.TAG_KEY, processorTag); + builder.field(ConfigurationUtils.TAG_KEY, processorTag); } if (failure == null) { ingestDocument.toXContent(builder, params); } else { - ElasticsearchException.renderThrowable(builder, params, failure); + ElasticsearchException.renderException(builder, params, failure); } builder.endObject(); return builder; diff --git a/core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java b/core/src/main/java/org/elasticsearch/action/ingest/TrackingResultProcessor.java similarity index 71% rename from core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java rename to core/src/main/java/org/elasticsearch/action/ingest/TrackingResultProcessor.java index af820318d83..4f2383ef1b6 100644 --- a/core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/TrackingResultProcessor.java @@ -17,14 +17,14 @@ * under the License. */ -package org.elasticsearch.ingest.processor; +package org.elasticsearch.action.ingest; -import org.elasticsearch.action.ingest.SimulateProcessorResult; -import org.elasticsearch.ingest.core.CompoundProcessor; -import org.elasticsearch.ingest.core.IngestDocument; -import org.elasticsearch.ingest.core.Processor; +import org.elasticsearch.ingest.CompoundProcessor; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -34,15 +34,12 @@ public final class TrackingResultProcessor implements Processor { private final Processor actualProcessor; private final List processorResultList; + private final boolean ignoreFailure; - public TrackingResultProcessor(Processor actualProcessor, List processorResultList) { + public TrackingResultProcessor(boolean ignoreFailure, Processor actualProcessor, List processorResultList) { + this.ignoreFailure = ignoreFailure; this.processorResultList = processorResultList; - if (actualProcessor instanceof CompoundProcessor) { - CompoundProcessor trackedCompoundProcessor = decorate((CompoundProcessor) actualProcessor, processorResultList); - this.actualProcessor = trackedCompoundProcessor; - } else { - this.actualProcessor = actualProcessor; - } + this.actualProcessor = actualProcessor; } @Override @@ -51,7 +48,11 @@ public final class TrackingResultProcessor implements Processor { actualProcessor.execute(ingestDocument); processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument))); } catch (Exception e) { - processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), e)); + if (ignoreFailure) { + processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument))); + } else { + processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), e)); + } throw e; } } @@ -72,7 +73,7 @@ public final class TrackingResultProcessor implements Processor { if (processor instanceof CompoundProcessor) { processors.add(decorate((CompoundProcessor) processor, processorResultList)); } else { - processors.add(new TrackingResultProcessor(processor, processorResultList)); + processors.add(new TrackingResultProcessor(compoundProcessor.isIgnoreFailure(), processor, processorResultList)); } } List onFailureProcessors = new ArrayList<>(compoundProcessor.getProcessors().size()); @@ -80,10 +81,10 @@ public final class TrackingResultProcessor implements Processor { if (processor instanceof CompoundProcessor) { onFailureProcessors.add(decorate((CompoundProcessor) processor, processorResultList)); } else { - onFailureProcessors.add(new TrackingResultProcessor(processor, processorResultList)); + onFailureProcessors.add(new TrackingResultProcessor(compoundProcessor.isIgnoreFailure(), processor, processorResultList)); } } - return new CompoundProcessor(processors, onFailureProcessors); + return new CompoundProcessor(compoundProcessor.isIgnoreFailure(), processors, onFailureProcessors); } } diff --git a/core/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java b/core/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java index a0416fe8338..137914701db 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/WriteableIngestDocument.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.ingest.core.IngestDocument; +import org.elasticsearch.ingest.IngestDocument; import java.io.IOException; import java.util.Map; diff --git a/core/src/main/java/org/elasticsearch/action/main/MainResponse.java b/core/src/main/java/org/elasticsearch/action/main/MainResponse.java index b9d094c7e9e..2403c3ee49c 100644 --- a/core/src/main/java/org/elasticsearch/action/main/MainResponse.java +++ b/core/src/main/java/org/elasticsearch/action/main/MainResponse.java @@ -84,7 +84,7 @@ public class MainResponse extends ActionResponse implements ToXContent { super.readFrom(in); nodeName = in.readString(); version = Version.readVersion(in); - clusterName = ClusterName.readClusterName(in); + clusterName = new ClusterName(in); build = Build.readBuild(in); available = in.readBoolean(); } diff --git a/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java b/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java index 6bf60835490..c37268a52de 100644 --- a/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java +++ b/core/src/main/java/org/elasticsearch/action/main/TransportMainAction.java @@ -37,15 +37,13 @@ import org.elasticsearch.transport.TransportService; public class TransportMainAction extends HandledTransportAction { private final ClusterService clusterService; - private final Version version; @Inject public TransportMainAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, Version version) { + ClusterService clusterService) { super(settings, MainAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MainRequest::new); this.clusterService = clusterService; - this.version = version; } @Override @@ -54,6 +52,7 @@ public class TransportMainAction extends HandledTransportAction shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); final int shardCount = shardsIts.size(); + failIfOverShardCountLimit(clusterService, shardCount); + expectedSuccessfulOps = shardCount; + // we need to add 1 for non active partition, since we count it in the total! + expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); + + firstResults = new AtomicArray<>(shardsIts.size()); + } + + private void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) { final long shardCountLimit = clusterService.getClusterSettings().get(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING); if (shardCount > shardCountLimit) { throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " @@ -116,11 +125,6 @@ abstract class AbstractSearchAsyncAction + "have a smaller number of larger shards. Update [" + TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey() + "] to a greater value if you really want to query that many shards at the same time."); } - expectedSuccessfulOps = shardCount; - // we need to add 1 for non active partition, since we count it in the total! - expectedTotalOps = shardsIts.totalSizeWith1ForEmpty(); - - firstResults = new AtomicArray<>(shardsIts.size()); } public void start() { @@ -163,7 +167,7 @@ abstract class AbstractSearchAsyncAction } @Override - public void onFailure(Throwable t) { + public void onFailure(Exception t) { onFirstPhaseResult(shardIndex, shard, node.getId(), shardIt, t); } }); @@ -184,9 +188,9 @@ abstract class AbstractSearchAsyncAction if (xTotalOps == expectedTotalOps) { try { innerMoveToSecondPhase(); - } catch (Throwable e) { + } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e); + logger.debug("{}: Failed to execute [{}] while moving to second phase", e, shardIt.shardId(), request); } raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures())); } @@ -197,37 +201,34 @@ abstract class AbstractSearchAsyncAction } void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId, - final ShardIterator shardIt, Throwable t) { + final ShardIterator shardIt, Exception e) { // we always add the shard failure for a specific shard instance // we do make sure to clean it on a successful response from a shard SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId()); - addShardFailure(shardIndex, shardTarget, t); + addShardFailure(shardIndex, shardTarget, e); if (totalOps.incrementAndGet() == expectedTotalOps) { if (logger.isDebugEnabled()) { - if (t != null && !TransportActions.isShardNotAvailableException(t)) { - if (shard != null) { - logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t); - } else { - logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t); - } + if (e != null && !TransportActions.isShardNotAvailableException(e)) { + logger.debug("{}: Failed to execute [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), request); } else if (logger.isTraceEnabled()) { - logger.trace("{}: Failed to execute [{}]", t, shard, request); + logger.trace("{}: Failed to execute [{}]", e, shard, request); } } final ShardSearchFailure[] shardSearchFailures = buildShardFailures(); if (successfulOps.get() == 0) { if (logger.isDebugEnabled()) { - logger.debug("All shards failed for phase: [{}]", t, firstPhaseName()); + logger.debug("All shards failed for phase: [{}]", e, firstPhaseName()); } // no successful ops, raise an exception - raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", t, shardSearchFailures)); + raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", e, shardSearchFailures)); } else { try { innerMoveToSecondPhase(); - } catch (Throwable e) { - raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, shardSearchFailures)); + } catch (Exception inner) { + inner.addSuppressed(e); + raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", inner, shardSearchFailures)); } } } else { @@ -235,34 +236,28 @@ abstract class AbstractSearchAsyncAction final boolean lastShard = nextShard == null; // trace log this exception if (logger.isTraceEnabled()) { - logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t); + logger.trace("{}: Failed to execute [{}] lastShard [{}]", e, shard != null ? shard.shortSummary() : shardIt.shardId(), + request, lastShard); } if (!lastShard) { try { performFirstPhase(shardIndex, shardIt, nextShard); - } catch (Throwable t1) { - onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t1); + } catch (Exception inner) { + inner.addSuppressed(e); + onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, inner); } } else { // no more shards active, add a failure if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception - if (t != null && !TransportActions.isShardNotAvailableException(t)) { - logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t); + if (e != null && !TransportActions.isShardNotAvailableException(e)) { + logger.debug("{}: Failed to execute [{}] lastShard [{}]", e, + shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard); } } } } } - private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request, - boolean lastShard) { - if (shard != null) { - return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]"; - } else { - return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]"; - } - } - protected final ShardSearchFailure[] buildShardFailures() { AtomicArray shardFailures = this.shardFailures; if (shardFailures == null) { @@ -276,9 +271,9 @@ abstract class AbstractSearchAsyncAction return failures; } - protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Throwable t) { + protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Exception e) { // we don't aggregate shard failures on non active shards (but do keep the header counts right) - if (TransportActions.isShardNotAvailableException(t)) { + if (TransportActions.isShardNotAvailableException(e)) { return; } @@ -292,26 +287,27 @@ abstract class AbstractSearchAsyncAction } ShardSearchFailure failure = shardFailures.get(shardIndex); if (failure == null) { - shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget)); + shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget)); } else { // the failure is already present, try and not override it with an exception that is less meaningless // for example, getting illegal shard state - if (TransportActions.isReadOverrideException(t)) { - shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget)); + if (TransportActions.isReadOverrideException(e)) { + shardFailures.set(shardIndex, new ShardSearchFailure(e, shardTarget)); } } } - private void raiseEarlyFailure(Throwable t) { + private void raiseEarlyFailure(Exception e) { for (AtomicArray.Entry entry : firstResults.asList()) { try { DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId()); sendReleaseSearchContext(entry.value.id(), node); - } catch (Throwable t1) { - logger.trace("failed to release context", t1); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.trace("failed to release context", inner); } } - listener.onFailure(t); + listener.onFailure(e); } /** @@ -331,8 +327,8 @@ abstract class AbstractSearchAsyncAction try { DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId()); sendReleaseSearchContext(entry.value.queryResult().id(), node); - } catch (Throwable t1) { - logger.trace("failed to release context", t1); + } catch (Exception e) { + logger.trace("failed to release context", e); } } } diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index a3236e9653f..08a1ec5b3de 100644 --- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -38,6 +38,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; */ public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest { + private int maxConcurrentSearchRequests = 0; private List requests = new ArrayList<>(); private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); @@ -60,6 +61,25 @@ public class MultiSearchRequest extends ActionRequest implem return this; } + /** + * Returns the amount of search requests specified in this multi search requests are allowed to be ran concurrently. + */ + public int maxConcurrentSearchRequests() { + return maxConcurrentSearchRequests; + } + + /** + * Sets how many search requests specified in this multi search requests are allowed to be ran concurrently. + */ + public MultiSearchRequest maxConcurrentSearchRequests(int maxConcurrentSearchRequests) { + if (maxConcurrentSearchRequests < 1) { + throw new IllegalArgumentException("maxConcurrentSearchRequests must be positive"); + } + + this.maxConcurrentSearchRequests = maxConcurrentSearchRequests; + return this; + } + public List requests() { return this.requests; } @@ -100,6 +120,7 @@ public class MultiSearchRequest extends ActionRequest implem @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); + maxConcurrentSearchRequests = in.readVInt(); int size = in.readVInt(); for (int i = 0; i < size; i++) { SearchRequest request = new SearchRequest(); @@ -111,6 +132,7 @@ public class MultiSearchRequest extends ActionRequest implem @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + out.writeVInt(maxConcurrentSearchRequests); out.writeVInt(requests.size()); for (SearchRequest request : requests) { request.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java index a0d1e4fb5c5..6cebb73fb4f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java @@ -71,4 +71,12 @@ public class MultiSearchRequestBuilder extends ActionRequestBuilder attributes; - - public ParsedScrollId(String source, String type, ScrollIdForNode[] context, Map attributes) { + public ParsedScrollId(String source, String type, ScrollIdForNode[] context) { this.source = source; this.type = type; this.context = context; - this.attributes = attributes; } public String getSource() { @@ -56,8 +51,4 @@ class ParsedScrollId { public ScrollIdForNode[] getContext() { return context; } - - public Map getAttributes() { - return this.attributes; - } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index f7cb72b22e9..e19540e26d5 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -89,7 +89,7 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction implements Indic private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; - private Template template; - public SearchRequest() { } @@ -222,21 +219,6 @@ public class SearchRequest extends ActionRequest implements Indic return source; } - - /** - * The stored template - */ - public void template(Template template) { - this.template = template; - } - - /** - * The stored template - */ - public Template template() { - return template; - } - /** * The tye of search to execute. */ @@ -326,7 +308,6 @@ public class SearchRequest extends ActionRequest implements Indic indicesOptions = IndicesOptions.readIndicesOptions(in); requestCache = in.readOptionalBoolean(); - template = in.readOptionalWriteable(Template::new); } @Override @@ -357,6 +338,5 @@ public class SearchRequest extends ActionRequest implements Indic out.writeStringArray(types); indicesOptions.writeIndicesOptions(out); out.writeOptionalBoolean(requestCache); - out.writeOptionalWriteable(template); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index fc28addc111..5c08acb99ea 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -26,10 +26,10 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.script.Script; -import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.aggregations.AggregatorBuilder; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; +import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.rescore.RescoreBuilder; @@ -166,7 +166,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder queryBuilder) { + public SearchRequestBuilder setQuery(QueryBuilder queryBuilder) { sourceBuilder().query(queryBuilder); return this; } @@ -175,7 +175,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder postFilter) { + public SearchRequestBuilder setPostFilter(QueryBuilder postFilter) { sourceBuilder().postFilter(postFilter); return this; } @@ -252,8 +252,8 @@ public class SearchRequestBuilder extends ActionRequestBuilderfalse. @@ -361,19 +379,31 @@ public class SearchRequestBuilder extends ActionRequestBuilder aggregation) { + public SearchRequestBuilder addAggregation(AggregationBuilder aggregation) { sourceBuilder().aggregation(aggregation); return this; } @@ -381,7 +411,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder> getProfileResults() { + @Nullable public Map getProfileResults() { return internalResponse.profile(); } @@ -232,14 +231,6 @@ public class SearchResponse extends ActionResponse implements StatusToXContent { @Override public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return builder.string(); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; - } + return Strings.toString(this, true); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java index 4e0ee3ff5e5..94ce1887c34 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java @@ -103,7 +103,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { executePhase(i, node, target.getScrollId()); } else { if (logger.isDebugEnabled()) { - logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]"); + logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource()); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { @@ -116,7 +116,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { DiscoveryNode node = nodes.get(target.getNode()); if (node == null) { if (logger.isDebugEnabled()) { - logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]"); + logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource()); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { @@ -138,21 +138,21 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { } @Override - public void onFailure(Throwable t) { + public void onFailure(Exception t) { onPhaseFailure(t, searchId, shardIndex); } }); } - private void onPhaseFailure(Throwable t, long searchId, int shardIndex) { + private void onPhaseFailure(Exception e, long searchId, int shardIndex) { if (logger.isDebugEnabled()) { - logger.debug("[{}] Failed to execute query phase", t, searchId); + logger.debug("[{}] Failed to execute query phase", e, searchId); } - addShardFailure(shardIndex, new ShardSearchFailure(t)); + addShardFailure(shardIndex, new ShardSearchFailure(e)); successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { if (successfulOps.get() == 0) { - listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", t, buildShardFailures())); + listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", e, buildShardFailures())); } else { finishHim(); } @@ -162,7 +162,7 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { private void finishHim() { try { innerFinishHim(); - } catch (Throwable e) { + } catch (Exception e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures())); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 8e822302d2f..ac8715eeb9f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -107,13 +107,13 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { executeQueryPhase(i, counter, node, target.getScrollId()); } else { if (logger.isDebugEnabled()) { - logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]"); + logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource()); } successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { try { executeFetchPhase(); - } catch (Throwable e) { + } catch (Exception e) { listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY)); return; } @@ -131,32 +131,33 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { if (counter.decrementAndGet() == 0) { try { executeFetchPhase(); - } catch (Throwable e) { + } catch (Exception e) { onFailure(e); } } } @Override - public void onFailure(Throwable t) { + public void onFailure(Exception t) { onQueryPhaseFailure(shardIndex, counter, searchId, t); } }); } - void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Throwable t) { + void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) { if (logger.isDebugEnabled()) { - logger.debug("[{}] Failed to execute query phase", t, searchId); + logger.debug("[{}] Failed to execute query phase", failure, searchId); } - addShardFailure(shardIndex, new ShardSearchFailure(t)); + addShardFailure(shardIndex, new ShardSearchFailure(failure)); successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { if (successfulOps.get() == 0) { - listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", t, buildShardFailures())); + listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", failure, buildShardFailures())); } else { try { executeFetchPhase(); - } catch (Throwable e) { + } catch (Exception e) { + e.addSuppressed(failure); listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY)); } } @@ -193,7 +194,7 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { } @Override - public void onFailure(Throwable t) { + public void onFailure(Exception t) { if (logger.isDebugEnabled()) { logger.debug("Failed to execute fetch phase", t); } @@ -209,8 +210,8 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { private void finishHim() { try { innerFinishHim(); - } catch (Throwable e) { - listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures())); + } catch (Exception e) { + listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures())); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 2a01eb4e1c6..8070081dcd8 100644 --- a/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/core/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -48,19 +48,19 @@ public class ShardSearchFailure implements ShardOperationFailedException { } - public ShardSearchFailure(Throwable t) { - this(t, null); + public ShardSearchFailure(Exception e) { + this(e, null); } - public ShardSearchFailure(Throwable t, @Nullable SearchShardTarget shardTarget) { - Throwable actual = ExceptionsHelper.unwrapCause(t); + public ShardSearchFailure(Exception e, @Nullable SearchShardTarget shardTarget) { + final Throwable actual = ExceptionsHelper.unwrapCause(e); if (actual != null && actual instanceof SearchException) { this.shardTarget = ((SearchException) actual).shard(); } else if (shardTarget != null) { this.shardTarget = shardTarget; } status = ExceptionsHelper.status(actual); - this.reason = ExceptionsHelper.detailedMessage(t); + this.reason = ExceptionsHelper.detailedMessage(e); this.cause = actual; } @@ -135,7 +135,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { } reason = in.readString(); status = RestStatus.readFrom(in); - cause = in.readThrowable(); + cause = in.readException(); } @Override @@ -148,7 +148,7 @@ public class ShardSearchFailure implements ShardOperationFailedException { } out.writeString(reason); RestStatus.writeTo(out, status); - out.writeThrowable(cause); + out.writeException(cause); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index c2922bf6deb..092b69fc936 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -103,7 +103,7 @@ public class TransportClearScrollAction extends HandledTransportAction { + private final int availableProcessors; private final ClusterService clusterService; - private final TransportSearchAction searchAction; + private final TransportAction searchAction; @Inject public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, TransportSearchAction searchAction, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + ClusterService clusterService, TransportSearchAction searchAction, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiSearchRequest::new); this.clusterService = clusterService; this.searchAction = searchAction; + this.availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); + } + + // For testing only: + TransportMultiSearchAction(ThreadPool threadPool, ActionFilters actionFilters, TransportService transportService, + ClusterService clusterService, TransportAction searchAction, + IndexNameExpressionResolver indexNameExpressionResolver, int availableProcessors) { + super(Settings.EMPTY, MultiSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiSearchRequest::new); + this.clusterService = clusterService; + this.searchAction = searchAction; + this.availableProcessors = availableProcessors; } @Override - protected void doExecute(final MultiSearchRequest request, final ActionListener listener) { + protected void doExecute(MultiSearchRequest request, ActionListener listener) { ClusterState clusterState = clusterService.state(); clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - final AtomicArray responses = new AtomicArray<>(request.requests().size()); - final AtomicInteger counter = new AtomicInteger(responses.length()); - for (int i = 0; i < responses.length(); i++) { - final int index = i; - searchAction.execute(request.requests().get(i), new ActionListener() { - @Override - public void onResponse(SearchResponse searchResponse) { - responses.set(index, new MultiSearchResponse.Item(searchResponse, null)); - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + int maxConcurrentSearches = request.maxConcurrentSearchRequests(); + if (maxConcurrentSearches == 0) { + maxConcurrentSearches = defaultMaxConcurrentSearches(availableProcessors, clusterState); + } - @Override - public void onFailure(Throwable e) { - responses.set(index, new MultiSearchResponse.Item(null, e)); - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + Queue searchRequestSlots = new ConcurrentLinkedQueue<>(); + for (int i = 0; i < request.requests().size(); i++) { + SearchRequest searchRequest = request.requests().get(i); + searchRequestSlots.add(new SearchRequestSlot(searchRequest, i)); + } - private void finishHim() { + int numRequests = request.requests().size(); + final AtomicArray responses = new AtomicArray<>(numRequests); + final AtomicInteger responseCounter = new AtomicInteger(numRequests); + int numConcurrentSearches = Math.min(numRequests, maxConcurrentSearches); + for (int i = 0; i < numConcurrentSearches; i++) { + executeSearch(searchRequestSlots, responses, responseCounter, listener); + } + } + + /* + * This is not perfect and makes a big assumption, that all nodes have the same thread pool size / have the number + * of processors and that shard of the indices the search requests go to are more or less evenly distributed across + * all nodes in the cluster. But I think it is a good enough default for most cases, if not then the default should be + * overwritten in the request itself. + */ + static int defaultMaxConcurrentSearches(int availableProcessors, ClusterState state) { + int numDateNodes = state.getNodes().getDataNodes().size(); + // availableProcessors will never be larger than 32, so max defaultMaxConcurrentSearches will never be larger than 49, + // but we don't know about about other search requests that are being executed so lets cap at 10 per node + int defaultSearchThreadPoolSize = Math.min(ThreadPool.searchThreadPoolSize(availableProcessors), 10); + return Math.max(1, numDateNodes * defaultSearchThreadPoolSize); + } + + void executeSearch(Queue requests, AtomicArray responses, + AtomicInteger responseCounter, ActionListener listener) { + SearchRequestSlot request = requests.poll(); + if (request == null) { + // Ok... so there're no more requests then this is ok, we're then waiting for running requests to complete + return; + } + searchAction.execute(request.request, new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + responses.set(request.responseSlot, new MultiSearchResponse.Item(searchResponse, null)); + handleResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(request.responseSlot, new MultiSearchResponse.Item(null, e)); + handleResponse(); + } + + private void handleResponse() { + if (responseCounter.decrementAndGet() == 0) { listener.onResponse(new MultiSearchResponse(responses.toArray(new MultiSearchResponse.Item[responses.length()]))); + } else { + executeSearch(requests, responses, responseCounter, listener); } - }); + } + }); + } + + static final class SearchRequestSlot { + + final SearchRequest request; + final int responseSlot; + + SearchRequestSlot(SearchRequest request, int responseSlot) { + this.request = request; + this.responseSlot = responseSlot; } } } diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 6e6583bae14..8a33bff8f0e 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -42,9 +42,6 @@ import java.util.Set; import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; -/** - * - */ public class TransportSearchAction extends HandledTransportAction { /** The maximum number of shards for a single search request. */ @@ -96,6 +93,10 @@ public class TransportSearchAction extends HandledTransportAction listener) { AbstractSearchAsyncAction searchAsyncAction; switch(searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: @@ -117,6 +118,7 @@ public class TransportSearchAction extends HandledTransportAction searchPhaseResults, - @Nullable Map attributes) throws IOException { + static String buildScrollId(SearchType searchType, AtomicArray searchPhaseResults) throws IOException { if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) { - return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults, attributes); + return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults); } else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) { - return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults, attributes); + return buildScrollId(ParsedScrollId.QUERY_AND_FETCH_TYPE, searchPhaseResults); } else { throw new IllegalStateException("search_type [" + searchType + "] not supported"); } } - static String buildScrollId(String type, AtomicArray searchPhaseResults, - @Nullable Map attributes) throws IOException { - StringBuilder sb = new StringBuilder().append(type).append(';'); - sb.append(searchPhaseResults.asList().size()).append(';'); - for (AtomicArray.Entry entry : searchPhaseResults.asList()) { - SearchPhaseResult searchPhaseResult = entry.value; - sb.append(searchPhaseResult.id()).append(':').append(searchPhaseResult.shardTarget().nodeId()).append(';'); - } - if (attributes == null) { - sb.append("0;"); - } else { - sb.append(attributes.size()).append(";"); - for (Map.Entry entry : attributes.entrySet()) { - sb.append(entry.getKey()).append(':').append(entry.getValue()).append(';'); + static String buildScrollId(String type, AtomicArray searchPhaseResults) throws IOException { + try (RAMOutputStream out = new RAMOutputStream()) { + out.writeString(type); + out.writeVInt(searchPhaseResults.asList().size()); + for (AtomicArray.Entry entry : searchPhaseResults.asList()) { + SearchPhaseResult searchPhaseResult = entry.value; + out.writeLong(searchPhaseResult.id()); + out.writeString(searchPhaseResult.shardTarget().nodeId()); } + byte[] bytes = new byte[(int) out.getFilePointer()]; + out.writeTo(bytes, 0); + return Base64.getUrlEncoder().encodeToString(bytes); } - BytesRef bytesRef = new BytesRef(sb); - return Base64.encodeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length, Base64.URL_SAFE); } static ParsedScrollId parseScrollId(String scrollId) { - CharsRefBuilder spare = new CharsRefBuilder(); try { - byte[] decode = Base64.decode(scrollId, Base64.URL_SAFE); - spare.copyUTF8Bytes(decode, 0, decode.length); + byte[] bytes = Base64.getUrlDecoder().decode(scrollId); + ByteArrayDataInput in = new ByteArrayDataInput(bytes); + String type = in.readString(); + ScrollIdForNode[] context = new ScrollIdForNode[in.readVInt()]; + for (int i = 0; i < context.length; ++i) { + long id = in.readLong(); + String target = in.readString(); + context[i] = new ScrollIdForNode(target, id); + } + if (in.getPosition() != bytes.length) { + throw new IllegalArgumentException("Not all bytes were read"); + } + return new ParsedScrollId(scrollId, type, context); } catch (Exception e) { - throw new IllegalArgumentException("Failed to decode scrollId", e); + throw new IllegalArgumentException("Cannot parse scroll id", e); } - String[] elements = Strings.splitStringToArray(spare.get(), ';'); - if (elements.length < 2) { - throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]"); - } - - int index = 0; - String type = elements[index++]; - int contextSize = Integer.parseInt(elements[index++]); - if (elements.length < contextSize + 2) { - throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]"); - } - - ScrollIdForNode[] context = new ScrollIdForNode[contextSize]; - for (int i = 0; i < contextSize; i++) { - String element = elements[index++]; - int sep = element.indexOf(':'); - if (sep == -1) { - throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]"); - } - context[i] = new ScrollIdForNode(element.substring(sep + 1), Long.parseLong(element.substring(0, sep))); - } - Map attributes; - int attributesSize = Integer.parseInt(elements[index++]); - if (attributesSize == 0) { - attributes = emptyMap(); - } else { - attributes = new HashMap<>(attributesSize); - for (int i = 0; i < attributesSize; i++) { - String element = elements[index++]; - int sep = element.indexOf(':'); - attributes.put(element.substring(0, sep), element.substring(sep + 1)); - } - } - return new ParsedScrollId(scrollId, type, context, attributes); } private TransportSearchHelper() { diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index 3bcadda1725..485baaa022b 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -74,7 +74,7 @@ public class TransportSearchScrollAction extends HandledTransportAction extends AdapterActionFuture implements ListenableActionFuture { - private final static ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class); + private static final ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class); final ThreadPool threadPool; volatile Object listeners; @@ -53,7 +53,7 @@ public abstract class AbstractListenableActionFuture extends AdapterAction } public void internalAddListener(ActionListener listener) { - listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener); + listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener, false); boolean executeImmediate = false; synchronized (this) { if (executedListeners) { @@ -102,8 +102,8 @@ public abstract class AbstractListenableActionFuture extends AdapterAction // we use a timeout of 0 to by pass assertion forbidding to call actionGet() (blocking) on a network thread. // here we know we will never block listener.onResponse(actionGet(0)); - } catch (Throwable e) { + } catch (Exception e) { listener.onFailure(e); } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java b/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java index d753eda4c69..f536d9e0ceb 100644 --- a/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/support/ActionFilter.java @@ -55,7 +55,7 @@ public interface ActionFilter { * filter chain. This base class should serve any action filter implementations that doesn't require * to apply async filtering logic. */ - public static abstract class Simple extends AbstractComponent implements ActionFilter { + public abstract static class Simple extends AbstractComponent implements ActionFilter { protected Simple(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java b/core/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java index ec2db7a22b2..eab486f4929 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java +++ b/core/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java @@ -98,7 +98,7 @@ public abstract class AdapterActionFuture extends BaseFuture implements } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { setException(e); } diff --git a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java index 339abcb22bc..d4ddae78225 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java +++ b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java @@ -47,7 +47,6 @@ public final class AutoCreateIndex { private final IndexNameExpressionResolver resolver; private final AutoCreate autoCreate; - @Inject public AutoCreateIndex(Settings settings, IndexNameExpressionResolver resolver) { this.resolver = resolver; dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings); diff --git a/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 676949e367d..0fe3be1ad63 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/core/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -54,11 +54,11 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile this.status = e.status(); } - public DefaultShardOperationFailedException(String index, int shardId, Throwable t) { + public DefaultShardOperationFailedException(String index, int shardId, Throwable reason) { this.index = index; this.shardId = shardId; - this.reason = t; - status = ExceptionsHelper.status(t); + this.reason = reason; + this.status = ExceptionsHelper.status(reason); } @Override @@ -98,7 +98,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile index = in.readString(); } shardId = in.readVInt(); - reason = in.readThrowable(); + reason = in.readException(); status = RestStatus.readFrom(in); } @@ -111,7 +111,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile out.writeString(index); } out.writeVInt(shardId); - out.writeThrowable(reason); + out.writeException(reason); RestStatus.writeTo(out, status); } diff --git a/core/src/main/java/org/elasticsearch/action/support/DelegatingActionListener.java b/core/src/main/java/org/elasticsearch/action/support/DelegatingActionListener.java index 9e7c2205270..c02de8410cf 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DelegatingActionListener.java +++ b/core/src/main/java/org/elasticsearch/action/support/DelegatingActionListener.java @@ -41,7 +41,7 @@ public abstract class DelegatingActionListener, Response extends ActionResponse> extends TransportAction { - protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { + protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier request) { + this(settings, actionName, true, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); + } + + protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager()); - transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler()); + transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, false, canTripCircuitBreaker, + new TransportHandler()); } class TransportHandler implements TransportRequestHandler { @@ -56,13 +65,13 @@ public abstract class HandledTransportAction extends AbstractListenableActionFuture { public PlainListenableActionFuture(ThreadPool threadPool) { @@ -34,4 +31,5 @@ public class PlainListenableActionFuture extends AbstractListenableActionFutu protected T convert(T response) { return response; } + } diff --git a/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java index 1eec4c8d9e9..4ff7cdaa7bd 100644 --- a/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java +++ b/core/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -64,7 +64,7 @@ public final class ThreadedActionListener implements ActionListener(logger, threadPool, ThreadPool.Names.LISTENER, listener); + return new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener, false); } } @@ -72,40 +72,53 @@ public final class ThreadedActionListener implements ActionListener listener; + private final boolean forceExecution; - public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener listener) { + public ThreadedActionListener(ESLogger logger, ThreadPool threadPool, String executor, ActionListener listener, + boolean forceExecution) { this.logger = logger; this.threadPool = threadPool; this.executor = executor; this.listener = listener; + this.forceExecution = forceExecution; } @Override public void onResponse(final Response response) { threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + public boolean isForceExecution() { + return forceExecution; + } + @Override protected void doRun() throws Exception { listener.onResponse(response); } @Override - public void onFailure(Throwable t) { - listener.onFailure(t); + public void onFailure(Exception e) { + listener.onFailure(e); } }); } @Override - public void onFailure(final Throwable e) { + public void onFailure(final Exception e) { threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + public boolean isForceExecution() { + return forceExecution; + } + @Override protected void doRun() throws Exception { listener.onFailure(e); } @Override - public void onFailure(Throwable t) { - logger.warn("failed to execute failure callback on [{}], failure [{}]", t, listener, e); + public void onFailure(Exception e) { + logger.warn("failed to execute failure callback on [{}], failure [{}]", e, listener, e); } }); } diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java index 79dbf85db65..03408dab77f 100644 --- a/core/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -92,7 +92,7 @@ public abstract class TransportAction, Re } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { taskManager.unregister(task); listener.onFailure(e); } @@ -101,6 +101,10 @@ public abstract class TransportAction, Re return task; } + /** + * Execute the transport action on the local node, returning the {@link Task} used to track its execution and accepting a + * {@link TaskListener} which listens for the completion of the action. + */ public final Task execute(Request request, TaskListener listener) { Task task = taskManager.register("transport", actionName, request); execute(task, request, new ActionListener() { @@ -113,7 +117,7 @@ public abstract class TransportAction, Re } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { if (task != null) { taskManager.unregister(task); } @@ -133,12 +137,16 @@ public abstract class TransportAction, Re return; } + if (task != null && request.getShouldPersistResult()) { + listener = new PersistentActionListener<>(taskManager, task, listener); + } + if (filters.length == 0) { try { doExecute(task, request, listener); - } catch(Throwable t) { - logger.trace("Error during transport action execution.", t); - listener.onFailure(t); + } catch(Exception e) { + logger.trace("Error during transport action execution.", e); + listener.onFailure(e); } } else { RequestFilterChain requestFilterChain = new RequestFilterChain<>(this, logger); @@ -171,14 +179,14 @@ public abstract class TransportAction, Re if (i < this.action.filters.length) { this.action.filters[i].apply(task, actionName, request, listener, this); } else if (i == this.action.filters.length) { - this.action.doExecute(task, request, new FilteredActionListener(actionName, listener, + this.action.doExecute(task, request, new FilteredActionListener<>(actionName, listener, new ResponseFilterChain<>(this.action.filters, logger))); } else { listener.onFailure(new IllegalStateException("proceed was called too many times")); } - } catch(Throwable t) { - logger.trace("Error during transport action execution.", t); - listener.onFailure(t); + } catch(Exception e) { + logger.trace("Error during transport action execution.", e); + listener.onFailure(e); } } @@ -217,9 +225,9 @@ public abstract class TransportAction, Re } else { listener.onFailure(new IllegalStateException("proceed was called too many times")); } - } catch (Throwable t) { - logger.trace("Error during transport action execution.", t); - listener.onFailure(t); + } catch (Exception e) { + logger.trace("Error during transport action execution.", e); + listener.onFailure(e); } } } @@ -242,8 +250,42 @@ public abstract class TransportAction, Re } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { listener.onFailure(e); } } + + /** + * Wrapper for an action listener that persists the result at the end of the execution + */ + private static class PersistentActionListener implements ActionListener { + private final ActionListener delegate; + private final Task task; + private final TaskManager taskManager; + + private PersistentActionListener(TaskManager taskManager, Task task, ActionListener delegate) { + this.taskManager = taskManager; + this.task = task; + this.delegate = delegate; + } + + @Override + public void onResponse(Response response) { + try { + taskManager.persistResult(task, response, delegate); + } catch (Exception e) { + delegate.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + try { + taskManager.persistResult(task, e, delegate); + } catch (Exception inner) { + inner.addSuppressed(e); + delegate.onFailure(inner); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/support/TransportActions.java b/core/src/main/java/org/elasticsearch/action/support/TransportActions.java index 950596cb0b6..d27fbaf04f6 100644 --- a/core/src/main/java/org/elasticsearch/action/support/TransportActions.java +++ b/core/src/main/java/org/elasticsearch/action/support/TransportActions.java @@ -27,30 +27,23 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.ShardNotFoundException; -/** - */ public class TransportActions { - public static boolean isShardNotAvailableException(Throwable t) { - Throwable actual = ExceptionsHelper.unwrapCause(t); - if (actual instanceof ShardNotFoundException || + public static boolean isShardNotAvailableException(final Throwable e) { + final Throwable actual = ExceptionsHelper.unwrapCause(e); + return (actual instanceof ShardNotFoundException || actual instanceof IndexNotFoundException || actual instanceof IllegalIndexShardStateException || actual instanceof NoShardAvailableActionException || actual instanceof UnavailableShardsException || - actual instanceof AlreadyClosedException) { - return true; - } - return false; + actual instanceof AlreadyClosedException); } /** * If a failure is already present, should this failure override it or not for read operations. */ - public static boolean isReadOverrideException(Throwable t) { - if (isShardNotAvailableException(t)) { - return false; - } - return true; + public static boolean isReadOverrideException(Exception e) { + return !isShardNotAvailableException(e); } + } diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java new file mode 100644 index 00000000000..6379a4fb259 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequest.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Interface implemented by requests that modify the documents in an index like {@link IndexRequest}, {@link UpdateRequest}, and + * {@link BulkRequest}. Rather than implement this directly most implementers should extend {@link ReplicatedWriteRequest}. + */ +public interface WriteRequest> extends Streamable { + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). + */ + R setRefreshPolicy(RefreshPolicy refreshPolicy); + + /** + * Parse the refresh policy from a string, only modifying it if the string is non null. Convenient to use with request parsing. + */ + @SuppressWarnings("unchecked") + default R setRefreshPolicy(String refreshPolicy) { + if (refreshPolicy != null) { + setRefreshPolicy(RefreshPolicy.parse(refreshPolicy)); + } + return (R) this; + } + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). + */ + RefreshPolicy getRefreshPolicy(); + + ActionRequestValidationException validate(); + + enum RefreshPolicy implements Writeable { + /** + * Don't refresh after this request. The default. + */ + NONE, + /** + * Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful + * to present a consistent view to for indices with very low traffic. And it is wonderful for tests! + */ + IMMEDIATE, + /** + * Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is + * compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs. + */ + WAIT_UNTIL; + + /** + * Parse the string representation of a refresh policy, usually from a request parameter. + */ + public static RefreshPolicy parse(String string) { + switch (string) { + case "false": + return NONE; + /* + * Empty string is IMMEDIATE because that makes "POST /test/test/1?refresh" perform a refresh which reads well and is what folks + * are used to. + */ + case "": + case "true": + return IMMEDIATE; + case "wait_for": + return WAIT_UNTIL; + } + throw new IllegalArgumentException("Unknown value for refresh: [" + string + "]."); + } + + public static RefreshPolicy readFrom(StreamInput in) throws IOException { + return RefreshPolicy.values()[in.readByte()]; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) ordinal()); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java new file mode 100644 index 00000000000..eaaf90fd53b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/WriteRequestBuilder.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; + +public interface WriteRequestBuilder> { + WriteRequest request(); + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}, the default). + */ + @SuppressWarnings("unchecked") + default B setRefreshPolicy(RefreshPolicy refreshPolicy) { + request().setRefreshPolicy(refreshPolicy); + return (B) this; + } + + /** + * Parse the refresh policy from a string, only modifying it if the string is non null. Convenient to use with request parsing. + */ + @SuppressWarnings("unchecked") + default B setRefreshPolicy(String refreshPolicy) { + request().setRefreshPolicy(refreshPolicy); + return (B) this; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/WriteResponse.java b/core/src/main/java/org/elasticsearch/action/support/WriteResponse.java new file mode 100644 index 00000000000..e0e4d4f4c98 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/WriteResponse.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.index.IndexSettings; + +/** + * Interface implemented by responses for actions that modify the documents in an index like {@link IndexResponse}, {@link UpdateResponse}, + * and {@link BulkResponse}. Rather than implement this directly most implementers should extend {@link DocWriteResponse}. + */ +public interface WriteResponse { + /** + * Mark the response as having forced a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to + * {@link RefreshPolicy#IMMEDIATE} should always mark this as true. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will only + * set this to true if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}). + */ + void setForcedRefresh(boolean forcedRefresh); +} diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java index ad79285051a..398a8d6c905 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastShardResponse.java @@ -45,8 +45,8 @@ public abstract class BroadcastShardResponse extends TransportResponse { return this.shardId.getIndexName(); } - public int getShardId() { - return this.shardId.id(); + public ShardId getShardId() { + return this.shardId; } @Override diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 182d922fc39..826d76de83a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -37,7 +37,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequestHandler; @@ -144,7 +144,7 @@ public abstract class TransportBroadcastAction() { + transportService.sendRequest(node, transportShardAction, shardRequest, new TransportResponseHandler() { @Override public ShardResponse newInstance() { return newShardResponse(); @@ -199,7 +199,7 @@ public abstract class TransportBroadcastAction request, + String executor) { + this(settings, actionName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, request, + executor, true); + } + public TransportBroadcastByNodeAction( Settings settings, String actionName, @@ -93,15 +107,18 @@ public abstract class TransportBroadcastByNodeAction request, - String executor) { - super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); + String executor, + boolean canTripCircuitBreaker) { + super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters, indexNameExpressionResolver, + request); this.clusterService = clusterService; this.transportService = transportService; transportNodeBroadcastAction = actionName + "[n]"; - transportService.registerRequestHandler(transportNodeBroadcastAction, NodeRequest::new, executor, new BroadcastByNodeTransportRequestHandler()); + transportService.registerRequestHandler(transportNodeBroadcastAction, NodeRequest::new, executor, false, canTripCircuitBreaker, + new BroadcastByNodeTransportRequestHandler()); } private Response newResponse( @@ -282,7 +299,7 @@ public abstract class TransportBroadcastByNodeAction() { + transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new TransportResponseHandler() { @Override public NodeResponse newInstance() { return new NodeResponse(); @@ -323,7 +340,7 @@ public abstract class TransportBroadcastByNodeAction(size); for (int i = 0; i < size; i++) { - shards.add(ShardRouting.readShardRoutingEntry(in)); + shards.add(new ShardRouting(in)); } nodeId = in.readString(); } @@ -574,7 +592,7 @@ public abstract class TransportBroadcastByNodeAction request) { - super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); + this(settings, actionName, true, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, request); + } + + protected TransportMasterNodeAction(Settings settings, String actionName, boolean canTripCircuitBreaker, + TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier request) { + super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters, indexNameExpressionResolver, + request); this.transportService = transportService; this.clusterService = clusterService; this.executor = executor(); @@ -144,7 +152,7 @@ public abstract class TransportMasterNodeAction request) { - super(settings, actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,request); + this(settings, actionName, true, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,request); + } + + protected TransportMasterNodeReadAction(Settings settings, String actionName, boolean checkSizeLimit, TransportService transportService, + ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { + super(settings, actionName, checkSizeLimit, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver,request); this.forceLocal = FORCE_LOCAL_SETTING.get(settings); } diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java index 5176ae52516..663537f25da 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java @@ -21,7 +21,8 @@ package org.elasticsearch.action.support.nodes; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.Strings; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -33,10 +34,24 @@ import java.io.IOException; */ public abstract class BaseNodesRequest> extends ActionRequest { - public static String[] ALL_NODES = Strings.EMPTY_ARRAY; - + /** + * the list of nodesIds that will be used to resolve this request and {@link #concreteNodes} + * will be populated. Note that if {@link #concreteNodes} is not null, it will be used and nodeIds + * will be ignored. + * + * See {@link DiscoveryNodes#resolveNodes} for a full description of the options. + * + * TODO: once we stop using the transport client as a gateway to the cluster, we can get rid of this and resolve it to concrete nodes + * in the rest layer + **/ private String[] nodesIds; + /** + * once {@link #nodesIds} are resolved this will contain the concrete nodes that are part of this request. If set, {@link #nodesIds} + * will be ignored and this will be used. + * */ + private DiscoveryNode[] concreteNodes; + private TimeValue timeout; protected BaseNodesRequest() { @@ -47,6 +62,11 @@ public abstract class BaseNodesRequest this.nodesIds = nodesIds; } + protected BaseNodesRequest(DiscoveryNode... concreteNodes) { + this.nodesIds = null; + this.concreteNodes = concreteNodes; + } + public final String[] nodesIds() { return nodesIds; } @@ -72,6 +92,13 @@ public abstract class BaseNodesRequest this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"); return (Request) this; } + public DiscoveryNode[] concreteNodes() { + return concreteNodes; + } + + public void setConcreteNodes(DiscoveryNode[] concreteNodes) { + this.concreteNodes = concreteNodes; + } @Override public ActionRequestValidationException validate() { @@ -82,20 +109,15 @@ public abstract class BaseNodesRequest public void readFrom(StreamInput in) throws IOException { super.readFrom(in); nodesIds = in.readStringArray(); - if (in.readBoolean()) { - timeout = TimeValue.readTimeValue(in); - } + concreteNodes = in.readOptionalArray(DiscoveryNode::new, DiscoveryNode[]::new); + timeout = in.readOptionalWriteable(TimeValue::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArrayNullable(nodesIds); - if (timeout == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - timeout.writeTo(out); - } + out.writeOptionalArray(concreteNodes); + out.writeOptionalWriteable(timeout); } } diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java index 01401bc7c6e..462f0b07bd6 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java @@ -22,61 +22,77 @@ package org.elasticsearch.action.support.nodes; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import java.util.HashMap; -import java.util.Iterator; +import java.util.List; import java.util.Map; +import java.util.Objects; /** * */ -public abstract class BaseNodesResponse extends ActionResponse implements Iterable { +public abstract class BaseNodesResponse extends ActionResponse { private ClusterName clusterName; - protected TNodeResponse[] nodes; + private List failures; + private List nodes; private Map nodesMap; protected BaseNodesResponse() { } - protected BaseNodesResponse(ClusterName clusterName, TNodeResponse[] nodes) { - this.clusterName = clusterName; - this.nodes = nodes; + protected BaseNodesResponse(ClusterName clusterName, List nodes, List failures) { + this.clusterName = Objects.requireNonNull(clusterName); + this.failures = Objects.requireNonNull(failures); + this.nodes = Objects.requireNonNull(nodes); } /** - * The failed nodes, if set to be captured. + * Get the {@link ClusterName} associated with all of the nodes. + * + * @return Never {@code null}. */ - @Nullable - public FailedNodeException[] failures() { - return null; - } - public ClusterName getClusterName() { - return this.clusterName; + return clusterName; } - public String getClusterNameAsString() { - return this.clusterName.value(); + /** + * Get the failed node exceptions. + * + * @return Never {@code null}. Can be empty. + */ + public List failures() { + return failures; } - public TNodeResponse[] getNodes() { + /** + * Determine if there are any node failures in {@link #failures}. + * + * @return {@code true} if {@link #failures} contains at least 1 {@link FailedNodeException}. + */ + public boolean hasFailures() { + return failures.isEmpty() == false; + } + + /** + * Get the successful node responses. + * + * @return Never {@code null}. Can be empty. + * @see #hasFailures() + */ + public List getNodes() { return nodes; } - public TNodeResponse getAt(int position) { - return nodes[position]; - } - - @Override - public Iterator iterator() { - return getNodesMap().values().iterator(); - } - + /** + * Lazily build and get a map of Node ID to node response. + * + * @return Never {@code null}. Can be empty. + * @see #getNodes() + */ public Map getNodesMap() { if (nodesMap == null) { nodesMap = new HashMap<>(); @@ -90,12 +106,29 @@ public abstract class BaseNodesResponse @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - clusterName = ClusterName.readClusterName(in); + clusterName = new ClusterName(in); + nodes = readNodesFrom(in); + failures = in.readList(FailedNodeException::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); clusterName.writeTo(out); + writeNodesTo(out, nodes); + out.writeList(failures); } + + /** + * Read the {@link #nodes} from the stream. + * + * @return Never {@code null}. + */ + protected abstract List readNodesFrom(StreamInput in) throws IOException; + + /** + * Write the {@link #nodes} to the stream. + */ + protected abstract void writeNodesTo(StreamOutput out, List nodes) throws IOException; + } diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index c996d530dce..276484286b4 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -24,17 +24,14 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.NoSuchNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.NodeShouldNotConnectException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -43,33 +40,41 @@ import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.Supplier; -/** - * - */ -public abstract class TransportNodesAction, NodesResponse extends BaseNodesResponse, NodeRequest extends BaseNodeRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction { +public abstract class TransportNodesAction, + NodesResponse extends BaseNodesResponse, + NodeRequest extends BaseNodeRequest, + NodeResponse extends BaseNodeResponse> + extends HandledTransportAction { - protected final ClusterName clusterName; protected final ClusterService clusterService; protected final TransportService transportService; + protected final Class nodeResponseClass; final String transportNodeAction; - protected TransportNodesAction(Settings settings, String actionName, ClusterName clusterName, ThreadPool threadPool, + protected TransportNodesAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, Supplier nodeRequest, - String nodeExecutor) { + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier request, Supplier nodeRequest, + String nodeExecutor, + Class nodeResponseClass) { super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); - this.clusterName = clusterName; - this.clusterService = clusterService; - this.transportService = transportService; + this.clusterService = Objects.requireNonNull(clusterService); + this.transportService = Objects.requireNonNull(transportService); + this.nodeResponseClass = Objects.requireNonNull(nodeResponseClass); this.transportNodeAction = actionName + "[n]"; - transportService.registerRequestHandler(transportNodeAction, nodeRequest, nodeExecutor, new NodeTransportHandler()); + transportService.registerRequestHandler( + transportNodeAction, nodeRequest, nodeExecutor, new NodeTransportHandler()); } @Override @@ -87,7 +92,46 @@ public abstract class TransportNodesAction responses = new ArrayList<>(); + final List failures = new ArrayList<>(); + + for (int i = 0; i < nodesResponses.length(); ++i) { + Object response = nodesResponses.get(i); + + if (nodeResponseClass.isInstance(response)) { + responses.add(nodeResponseClass.cast(response)); + } else if (response instanceof FailedNodeException) { + failures.add((FailedNodeException)response); + } else { + logger.warn("ignoring unexpected response [{}] of type [{}], expected [{}] or [{}]", + response, response != null ? response.getClass().getName() : null, + nodeResponseClass.getSimpleName(), FailedNodeException.class.getSimpleName()); + } + } + + return newResponse(request, responses, failures); + } + + /** + * Create a new {@link NodesResponse} (multi-node response). + * + * @param request The associated request. + * @param responses All successful node-level responses. + * @param failures All node-level failures. + * @return Never {@code null}. + * @throws NullPointerException if any parameter is {@code null}. + */ + protected abstract NodesResponse newResponse(NodesRequest request, List responses, List failures); protected abstract NodeRequest newNodeRequest(String nodeId, NodesRequest request); @@ -101,20 +145,19 @@ public abstract class TransportNodesAction listener; private final AtomicReferenceArray responses; private final AtomicInteger counter = new AtomicInteger(); @@ -124,26 +167,18 @@ public abstract class TransportNodesAction nodes = clusterState.nodes().getNodes(); - this.nodes = new DiscoveryNode[nodesIds.length]; - for (int i = 0; i < nodesIds.length; i++) { - this.nodes[i] = nodes.get(nodesIds[i]); + if (request.concreteNodes() == null) { + resolveRequest(request, clusterService.state()); + assert request.concreteNodes() != null; } - this.responses = new AtomicReferenceArray<>(this.nodesIds.length); + this.responses = new AtomicReferenceArray<>(request.concreteNodes().length); } void start() { - if (nodesIds.length == 0) { + final DiscoveryNode[] nodes = request.concreteNodes(); + if (nodes.length == 0) { // nothing to notify - threadPool.generic().execute(new Runnable() { - @Override - public void run() { - listener.onResponse(newResponse(request, responses)); - } - }); + threadPool.generic().execute(() -> listener.onResponse(newResponse(request, responses))); return; } TransportRequestOptions.Builder builder = TransportRequestOptions.builder(); @@ -151,10 +186,10 @@ public abstract class TransportNodesAction() { + transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), + new TransportResponseHandler() { @Override public NodeResponse newInstance() { return newNodeResponse(); @@ -187,8 +223,8 @@ public abstract class TransportNodesAction> extends ReplicationRequest implements WriteRequest { + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; + + /** + * Constructor for deserialization. + */ + public ReplicatedWriteRequest() { + } + + public ReplicatedWriteRequest(ShardId shardId) { + super(shardId); + } + + @Override + @SuppressWarnings("unchecked") + public R setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return (R) this; + } + + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + refreshPolicy = RefreshPolicy.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + refreshPolicy.writeTo(out); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 1f7313c1943..dc7846a74de 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.TransportActions; @@ -29,7 +28,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -47,66 +45,86 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Supplier; -public class ReplicationOperation, ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse> { - final private ESLogger logger; - final private Request request; - final private Supplier clusterStateSupplier; - final private String opType; - final private AtomicInteger totalShards = new AtomicInteger(); - final private AtomicInteger pendingShards = new AtomicInteger(); - final private AtomicInteger successfulShards = new AtomicInteger(); - final private boolean executeOnReplicas; - final private boolean checkWriteConsistency; - final private Primary primary; - final private Replicas replicasProxy; - final private AtomicBoolean finished = new AtomicBoolean(); - final protected ActionListener finalResponseListener; +public class ReplicationOperation< + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + PrimaryResultT extends ReplicationOperation.PrimaryResult + > { + private final ESLogger logger; + private final Request request; + private final Supplier clusterStateSupplier; + private final String opType; + private final AtomicInteger totalShards = new AtomicInteger(); + /** + * The number of pending sub-operations in this operation. This is incremented when the following operations start and decremented when + * they complete: + *
      + *
    • The operation on the primary
    • + *
    • The operation on each replica
    • + *
    • Coordination of the operation as a whole. This prevents the operation from terminating early if we haven't started any replica + * operations and the primary finishes.
    • + *
    + */ + private final AtomicInteger pendingShards = new AtomicInteger(); + private final AtomicInteger successfulShards = new AtomicInteger(); + private final boolean executeOnReplicas; + private final boolean checkWriteConsistency; + private final Primary primary; + private final Replicas replicasProxy; + private final AtomicBoolean finished = new AtomicBoolean(); + protected final ActionListener resultListener; - private volatile Response finalResponse = null; + private volatile PrimaryResultT primaryResult = null; private final List shardReplicaFailures = Collections.synchronizedList(new ArrayList<>()); - ReplicationOperation(Request request, Primary primary, - ActionListener listener, - boolean executeOnReplicas, boolean checkWriteConsistency, - Replicas replicas, - Supplier clusterStateSupplier, ESLogger logger, String opType) { + public ReplicationOperation(Request request, Primary primary, + ActionListener listener, + boolean executeOnReplicas, boolean checkWriteConsistency, + Replicas replicas, + Supplier clusterStateSupplier, ESLogger logger, String opType) { this.checkWriteConsistency = checkWriteConsistency; this.executeOnReplicas = executeOnReplicas; this.replicasProxy = replicas; this.primary = primary; - this.finalResponseListener = listener; + this.resultListener = listener; this.logger = logger; this.request = request; this.clusterStateSupplier = clusterStateSupplier; this.opType = opType; } - void execute() throws Exception { + public void execute() throws Exception { final String writeConsistencyFailure = checkWriteConsistency ? checkWriteConsistency() : null; - final ShardId shardId = primary.routingEntry().shardId(); + final ShardRouting primaryRouting = primary.routingEntry(); + final ShardId primaryId = primaryRouting.shardId(); if (writeConsistencyFailure != null) { - finishAsFailed(new UnavailableShardsException(shardId, + finishAsFailed(new UnavailableShardsException(primaryId, "{} Timeout: [{}], request: [{}]", writeConsistencyFailure, request.timeout(), request)); return; } totalShards.incrementAndGet(); - pendingShards.incrementAndGet(); // increase by 1 until we finish all primary coordination - Tuple primaryResponse = primary.perform(request); - successfulShards.incrementAndGet(); // mark primary as successful - finalResponse = primaryResponse.v1(); - ReplicaRequest replicaRequest = primaryResponse.v2(); + pendingShards.incrementAndGet(); + primaryResult = primary.perform(request); + final ReplicaRequest replicaRequest = primaryResult.replicaRequest(); assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term"; if (logger.isTraceEnabled()) { - logger.trace("[{}] op [{}] completed on primary for request [{}]", shardId, opType, request); + logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request); } + + performOnReplicas(primaryId, replicaRequest); + + successfulShards.incrementAndGet(); + decPendingAndFinishIfNeeded(); + } + + private void performOnReplicas(ShardId primaryId, ReplicaRequest replicaRequest) { // we have to get a new state after successfully indexing into the primary in order to honour recovery semantics. // we have to make sure that every operation indexed into the primary after recovery start will also be replicated // to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then. // If the index gets deleted after primary operation, we skip replication - List shards = getShards(shardId, clusterStateSupplier.get()); + final List shards = getShards(primaryId, clusterStateSupplier.get()); final String localNodeId = primary.routingEntry().currentNodeId(); for (final ShardRouting shard : shards) { if (executeOnReplicas == false || shard.unassigned()) { @@ -124,9 +142,6 @@ public class ReplicationOperation, R performOnReplica(shard.buildTargetRelocatingShard(), replicaRequest); } } - - // decrement pending and finish (if there are no replicas, or those are done) - decPendingAndFinishIfNeeded(); // incremented in the beginning of this method } private void performOnReplica(final ShardRouting shard, final ReplicaRequest replicaRequest) { @@ -144,7 +159,7 @@ public class ReplicationOperation, R } @Override - public void onFailure(Throwable replicaException) { + public void onFailure(Exception replicaException) { logger.trace("[{}] failure while performing [{}] on replica {}, request [{}]", replicaException, shard.shardId(), opType, shard, replicaRequest); if (ignoreReplicaException(replicaException)) { @@ -165,7 +180,7 @@ public class ReplicationOperation, R }); } - private void onPrimaryDemoted(Throwable demotionFailure) { + private void onPrimaryDemoted(Exception demotionFailure) { String primaryFail = String.format(Locale.ROOT, "primary shard [%s] was demoted while failing replica shard", primary.routingEntry()); @@ -241,19 +256,19 @@ public class ReplicationOperation, R failuresArray = new ReplicationResponse.ShardInfo.Failure[shardReplicaFailures.size()]; shardReplicaFailures.toArray(failuresArray); } - finalResponse.setShardInfo(new ReplicationResponse.ShardInfo( + primaryResult.setShardInfo(new ReplicationResponse.ShardInfo( totalShards.get(), successfulShards.get(), failuresArray ) ); - finalResponseListener.onResponse(finalResponse); + resultListener.onResponse(primaryResult); } } - private void finishAsFailed(Throwable throwable) { + private void finishAsFailed(Exception exception) { if (finished.compareAndSet(false, true)) { - finalResponseListener.onFailure(throwable); + resultListener.onFailure(exception); } } @@ -261,7 +276,7 @@ public class ReplicationOperation, R /** * Should an exception be ignored when the operation is performed on the replica. */ - public static boolean ignoreReplicaException(Throwable e) { + public static boolean ignoreReplicaException(Exception e) { if (TransportActions.isShardNotAvailableException(e)) { return true; } @@ -273,62 +288,66 @@ public class ReplicationOperation, R return false; } - public static boolean isConflictException(Throwable e) { - Throwable cause = ExceptionsHelper.unwrapCause(e); + public static boolean isConflictException(Throwable t) { + final Throwable cause = ExceptionsHelper.unwrapCause(t); // on version conflict or document missing, it means // that a new change has crept into the replica, and it's fine - if (cause instanceof VersionConflictEngineException) { - return true; - } - return false; + return cause instanceof VersionConflictEngineException; } - interface Primary, ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse> { - - /** routing entry for this primary */ - ShardRouting routingEntry(); - - /** fail the primary, typically due to the fact that the operation has learned the primary has been demoted by the master */ - void failShard(String message, Throwable throwable); + public interface Primary< + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + PrimaryResultT extends PrimaryResult + > { /** - * Performs the given request on this primary - * - * @return A tuple containing not null values, as first value the result of the primary operation and as second value - * the request to be executed on the replica shards. + * routing entry for this primary */ - Tuple perform(Request request) throws Exception; + ShardRouting routingEntry(); + + /** + * fail the primary, typically due to the fact that the operation has learned the primary has been demoted by the master + */ + void failShard(String message, Exception exception); + + /** + * Performs the given request on this primary. Yes, this returns as soon as it can with the request for the replicas and calls a + * listener when the primary request is completed. Yes, the primary request might complete before the method returns. Yes, it might + * also complete after. Deal with it. + * + * @param request the request to perform + * @return the request to send to the repicas + */ + PrimaryResultT perform(Request request) throws Exception; } - interface Replicas> { + public interface Replicas> { /** * performs the the given request on the specified replica * - * @param replica {@link ShardRouting} of the shard this request should be executed on + * @param replica {@link ShardRouting} of the shard this request should be executed on * @param replicaRequest operation to peform - * @param listener a callback to call once the operation has been complicated, either successfully or with an error. + * @param listener a callback to call once the operation has been complicated, either successfully or with an error. */ void performOn(ShardRouting replica, ReplicaRequest replicaRequest, ActionListener listener); /** * Fail the specified shard, removing it from the current set of active shards - * @param replica shard to fail - * @param primary the primary shard that requested the failure - * @param message a (short) description of the reason - * @param throwable the original exception which caused the ReplicationOperation to request the shard to be failed - * @param onSuccess a callback to call when the shard has been successfully removed from the active set. + * @param replica shard to fail + * @param primary the primary shard that requested the failure + * @param message a (short) description of the reason + * @param exception the original exception which caused the ReplicationOperation to request the shard to be failed + * @param onSuccess a callback to call when the shard has been successfully removed from the active set. * @param onPrimaryDemoted a callback to call when the shard can not be failed because the current primary has been demoted - * by the master. +* by the master. * @param onIgnoredFailure a callback to call when failing a shard has failed, but it that failure can be safely ignored and the - * replication operation can finish processing - * Note: this callback should be used in extreme situations, typically node shutdown. */ - void failShard(ShardRouting replica, ShardRouting primary, String message, Throwable throwable, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure); + void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception, Runnable onSuccess, + Consumer onPrimaryDemoted, Consumer onIgnoredFailure); } public static class RetryOnPrimaryException extends ElasticsearchException { @@ -345,4 +364,11 @@ public class ReplicationOperation, R super(in); } } + + public interface PrimaryResult> { + + R replicaRequest(); + + void setShardInfo(ReplicationResponse.ShardInfo shardInfo); + } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 3e88575b717..adb44dd4964 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.WriteConsistencyLevel; +import org.elasticsearch.action.admin.indices.refresh.TransportShardRefreshAction; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -38,7 +40,8 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.ValidateActions.addValidationError; /** - * + * Requests that are run on a particular replica, first on the primary and then on the replicas like {@link IndexRequest} or + * {@link TransportShardRefreshAction}. */ public abstract class ReplicationRequest> extends ActionRequest implements IndicesRequest { @@ -65,7 +68,6 @@ public abstract class ReplicationRequest, - ReplicaRequest extends ReplicationRequest, - Response extends ReplicationResponse> extends TransportAction { +public abstract class TransportReplicationAction< + Request extends ReplicationRequest, + ReplicaRequest extends ReplicationRequest, + Response extends ReplicationResponse + > extends TransportAction { - final protected TransportService transportService; - final protected ClusterService clusterService; - final protected IndicesService indicesService; - final private ShardStateAction shardStateAction; - final private WriteConsistencyLevel defaultWriteConsistencyLevel; - final private TransportRequestOptions transportOptions; + protected final TransportService transportService; + protected final ClusterService clusterService; + protected final IndicesService indicesService; + private final ShardStateAction shardStateAction; + private final WriteConsistencyLevel defaultWriteConsistencyLevel; + private final TransportRequestOptions transportOptions; + private final String executor; - final private String transportReplicaAction; - final private String transportPrimaryAction; - final private ReplicasProxy replicasProxy; + // package private for testing + final String transportReplicaAction; + final String transportPrimaryAction; + private final ReplicasProxy replicasProxy; protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, IndicesService indicesService, @@ -107,13 +110,14 @@ public abstract class TransportReplicationAction shardOperationOnPrimary(Request shardRequest) throws Exception; + protected abstract PrimaryResult shardOperationOnPrimary(Request shardRequest) throws Exception; /** - * Replica operation on nodes with replica copies + * Synchronous replica operation on nodes with replica copies. This is done under the lock form + * {@link #acquireReplicaOperationLock(ShardId, long, ActionListener)}. */ - protected abstract void shardOperationOnReplica(ReplicaRequest shardRequest); + protected abstract ReplicaResult shardOperationOnReplica(ReplicaRequest shardRequest); /** * True if write consistency should be checked for an implementation @@ -192,31 +196,11 @@ public abstract class TransportReplicationAction { - - public final T response; - public final Translog.Location location; - - public WriteResult(T response, Translog.Location location) { - this.response = response; - this.location = location; - } - - @SuppressWarnings("unchecked") - public T response() { - // this sets total, pending and failed to 0 and this is ok, because we will embed this into the replica - // request and not use it - response.setShardInfo(new ReplicationResponse.ShardInfo()); - return (T) response; - } - - } - class OperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { @@ -225,17 +209,18 @@ public abstract class TransportReplicationAction { + + private final Request request; + private final TransportChannel channel; + private final ReplicationTask replicationTask; + + AsyncPrimaryAction(Request request, TransportChannel channel, ReplicationTask replicationTask) { + this.request = request; + this.channel = channel; + this.replicationTask = replicationTask; + } + + @Override + protected void doRun() throws Exception { + acquirePrimaryShardReference(request.shardId(), this); + } + + @Override + public void onResponse(PrimaryShardReference primaryShardReference) { try { if (primaryShardReference.isRelocated()) { + primaryShardReference.close(); // release shard operation lock as soon as possible setPhase(replicationTask, "primary_delegation"); // delegate primary phase to relocation target // it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary @@ -287,31 +293,42 @@ public abstract class TransportReplicationAction listener = createResponseListener(channel, replicationTask, primaryShardReference); - createReplicatedOperation(request, listener, primaryShardReference, executeOnReplicas).execute(); - success = true; - } - } finally { - if (success == false) { - primaryShardReference.close(); + final ActionListener listener = createResponseListener(primaryShardReference); + createReplicatedOperation(request, new ActionListener() { + @Override + public void onResponse(PrimaryResult result) { + result.respond(listener); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, primaryShardReference, executeOnReplicas).execute(); } + } catch (Exception e) { + Releasables.closeWhileHandlingException(primaryShardReference); // release shard operation lock before responding to caller + onFailure(e); } } - protected ReplicationOperation - createReplicatedOperation(Request request, ActionListener listener, - PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { - return new ReplicationOperation<>(request, primaryShardReference, listener, - executeOnReplicas, checkWriteConsistency(), replicasProxy, clusterService::state, logger, actionName - ); + @Override + public void onFailure(Exception e) { + setPhase(replicationTask, "finished"); + try { + channel.sendResponse(e); + } catch (IOException inner) { + inner.addSuppressed(e); + logger.warn("failed to send response", inner); + } } - private ActionListener createResponseListener(final TransportChannel channel, final ReplicationTask replicationTask, - final PrimaryShardReference primaryShardReference) { + private ActionListener createResponseListener(final PrimaryShardReference primaryShardReference) { return new ActionListener() { @Override public void onResponse(Response response) { - finish(); + primaryShardReference.close(); // release shard operation lock before responding to caller + setPhase(replicationTask, "finished"); try { channel.sendResponse(response); } catch (IOException e) { @@ -319,15 +336,10 @@ public abstract class TransportReplicationAction createReplicatedOperation( + Request request, ActionListener listener, + PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { + return new ReplicationOperation<>(request, primaryShardReference, listener, + executeOnReplicas, checkWriteConsistency(), replicasProxy, clusterService::state, logger, actionName + ); + } + } + + protected class PrimaryResult implements ReplicationOperation.PrimaryResult { + final ReplicaRequest replicaRequest; + final Response finalResponse; + + public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponse) { + this.replicaRequest = replicaRequest; + this.finalResponse = finalResponse; + } + + @Override + public ReplicaRequest replicaRequest() { + return replicaRequest; + } + + @Override + public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) { + finalResponse.setShardInfo(shardInfo); + } + + public void respond(ActionListener listener) { + listener.onResponse(finalResponse); + } + } + + protected class ReplicaResult { + /** + * Public constructor so subclasses can call it. + */ + public ReplicaResult() {} + + public void respond(ActionListener listener) { + listener.onResponse(TransportResponse.Empty.INSTANCE); + } } class ReplicaOperationTransportHandler implements TransportRequestHandler { @@ -362,7 +417,7 @@ public abstract class TransportReplicationAction { private final ReplicaRequest request; private final TransportChannel channel; /** @@ -380,9 +435,21 @@ public abstract class TransportReplicationAction { + @Override + public void onResponse(Empty response) { if (logger.isTraceEnabled()) { logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), - request); + request); + } + setPhase(task, "finished"); + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); } } - setPhase(task, "finished"); - channel.sendResponse(TransportResponse.Empty.INSTANCE); + + @Override + public void onFailure(Exception e) { + responseWithFailure(e); + } } } @@ -462,7 +545,7 @@ public abstract class TransportReplicationAction() { + transportService.sendRequest(node, action, request, transportOptions, new TransportResponseHandler() { @Override public Response newInstance() { @@ -612,14 +695,15 @@ public abstract class TransportReplicationAction onReferenceAcquired) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); // we may end up here if the cluster state used to route the primary is so stale that the underlying @@ -700,17 +784,29 @@ public abstract class TransportReplicationAction onAcquired = new ActionListener() { + @Override + public void onResponse(Releasable releasable) { + onReferenceAcquired.onResponse(new PrimaryShardReference(indexShard, releasable)); + } + + @Override + public void onFailure(Exception e) { + onReferenceAcquired.onFailure(e); + } + }; + + indexShard.acquirePrimaryOperationLock(onAcquired, executor); } /** - * Acquire an operation on replicas. The lock is closed as soon as - * replication is completed on the node. + * tries to acquire an operation on replicas. The lock is closed as soon as replication is completed on the node. */ - protected Releasable acquireReplicaOperationLock(ShardId shardId, long primaryTerm) { + protected void acquireReplicaOperationLock(ShardId shardId, long primaryTerm, ActionListener onLockAcquired) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); - return indexShard.acquireReplicaOperationLock(primaryTerm); + indexShard.acquireReplicaOperationLock(primaryTerm, onLockAcquired, executor); } /** @@ -721,7 +817,7 @@ public abstract class TransportReplicationAction, Releasable { + class PrimaryShardReference implements ReplicationOperation.Primary, Releasable { private final IndexShard indexShard; private final Releasable operationLock; @@ -741,18 +837,18 @@ public abstract class TransportReplicationAction perform(Request request) throws Exception { - Tuple result = shardOperationOnPrimary(request); - result.v2().primaryTerm(indexShard.getPrimaryTerm()); + public PrimaryResult perform(Request request) throws Exception { + PrimaryResult result = shardOperationOnPrimary(request); + result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm()); return result; } @@ -777,10 +873,10 @@ public abstract class TransportReplicationAction onFailure, Consumer onIgnoredFailure) { + public void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception, + Runnable onSuccess, Consumer onFailure, Consumer onIgnoredFailure) { shardStateAction.shardFailed( - replica, primary, message, throwable, + replica, primary, message, exception, new ShardStateAction.Listener() { @Override public void onSuccess() { @@ -788,7 +884,7 @@ public abstract class TransportReplicationAction, + Response extends ReplicationResponse & WriteResponse + > extends TransportReplicationAction { + + protected TransportWriteAction(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, + String executor) { + super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + indexNameExpressionResolver, request, request, executor); + } + + /** + * Called on the primary with a reference to the {@linkplain IndexShard} to modify. + */ + protected abstract WriteResult onPrimaryShard(Request request, IndexShard indexShard) throws Exception; + + /** + * Called once per replica with a reference to the {@linkplain IndexShard} to modify. + * + * @return the translog location of the {@linkplain IndexShard} after the write was completed or null if no write occurred + */ + protected abstract Translog.Location onReplicaShard(Request request, IndexShard indexShard); + + @Override + protected final WritePrimaryResult shardOperationOnPrimary(Request request) throws Exception { + IndexShard indexShard = indexShard(request); + WriteResult result = onPrimaryShard(request, indexShard); + return new WritePrimaryResult(request, result.getResponse(), result.getLocation(), indexShard); + } + + @Override + protected final WriteReplicaResult shardOperationOnReplica(Request request) { + IndexShard indexShard = indexShard(request); + Translog.Location location = onReplicaShard(request, indexShard); + return new WriteReplicaResult(indexShard, request, location); + } + + /** + * Fetch the IndexShard for the request. Protected so it can be mocked in tests. + */ + protected IndexShard indexShard(Request request) { + final ShardId shardId = request.shardId(); + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + return indexService.getShard(shardId.id()); + } + + /** + * Simple result from a write action. Write actions have static method to return these so they can integrate with bulk. + */ + public static class WriteResult { + private final Response response; + private final Translog.Location location; + + public WriteResult(Response response, @Nullable Location location) { + this.response = response; + this.location = location; + } + + public Response getResponse() { + return response; + } + + public Translog.Location getLocation() { + return location; + } + } + + /** + * Result of taking the action on the primary. + */ + class WritePrimaryResult extends PrimaryResult implements RespondingWriteResult { + boolean finishedAsyncActions; + ActionListener listener = null; + + public WritePrimaryResult(Request request, Response finalResponse, + @Nullable Translog.Location location, + IndexShard indexShard) { + super(request, finalResponse); + /* + * We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the + * refresh in parallel on the primary and on the replica. + */ + postWriteActions(indexShard, request, location, this, logger); + } + + @Override + public synchronized void respond(ActionListener listener) { + this.listener = listener; + respondIfPossible(); + } + + /** + * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. + */ + protected void respondIfPossible() { + if (finishedAsyncActions && listener != null) { + super.respond(listener); + } + } + + @Override + public synchronized void respondAfterAsyncAction(boolean forcedRefresh) { + finalResponse.setForcedRefresh(forcedRefresh); + finishedAsyncActions = true; + respondIfPossible(); + } + } + + /** + * Result of taking the action on the replica. + */ + class WriteReplicaResult extends ReplicaResult implements RespondingWriteResult { + boolean finishedAsyncActions; + private ActionListener listener; + + public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest request, Translog.Location location) { + postWriteActions(indexShard, request, location, this, logger); + } + + @Override + public void respond(ActionListener listener) { + this.listener = listener; + respondIfPossible(); + } + + /** + * Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}. + */ + protected void respondIfPossible() { + if (finishedAsyncActions && listener != null) { + super.respond(listener); + } + } + + @Override + public synchronized void respondAfterAsyncAction(boolean forcedRefresh) { + finishedAsyncActions = true; + respondIfPossible(); + } + } + + private interface RespondingWriteResult { + void respondAfterAsyncAction(boolean forcedRefresh); + } + + static void postWriteActions(final IndexShard indexShard, + final WriteRequest request, + @Nullable final Translog.Location location, + final RespondingWriteResult respond, + final ESLogger logger) { + boolean pendingOps = false; + boolean immediateRefresh = false; + switch (request.getRefreshPolicy()) { + case IMMEDIATE: + indexShard.refresh("refresh_flag_index"); + immediateRefresh = true; + break; + case WAIT_UNTIL: + if (location != null) { + pendingOps = true; + indexShard.addRefreshListener(location, forcedRefresh -> { + logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + respond.respondAfterAsyncAction(forcedRefresh); + }); + } + break; + case NONE: + break; + } + boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null; + if (fsyncTranslog) { + indexShard.sync(location); + } + indexShard.maybeFlush(); + if (pendingOps == false) { + respond.respondAfterAsyncAction(immediateRefresh); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java index 186eb536b08..cb9a6ab9f69 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -121,7 +121,7 @@ public abstract class InstanceShardOperationRequest() { + transportService.sendRequest(node, shardActionName, request, transportOptions(), new TransportResponseHandler() { @Override public Response newInstance() { @@ -193,11 +193,11 @@ public abstract class TransportInstanceSingleOperationAction listener) { this.listener = listener; @@ -159,7 +159,7 @@ public abstract class TransportSingleShardAction() { + transportService.sendRequest(clusterService.localNode(), transportShardAction, internalRequest.request(), new TransportResponseHandler() { @Override public Response newInstance() { return newResponse(); @@ -185,22 +185,22 @@ public abstract class TransportSingleShardAction() { + transportService.sendRequest(node, transportShardAction, internalRequest.request(), new TransportResponseHandler() { @Override public Response newInstance() { @@ -261,13 +261,13 @@ public abstract class TransportSingleShardAction> extends parentTaskId = TaskId.readFromStream(in); nodesIds = in.readStringArray(); actions = in.readStringArray(); - if (in.readBoolean()) { - timeout = TimeValue.readTimeValue(in); - } + timeout = in.readOptionalWriteable(TimeValue::new); } @Override @@ -156,7 +154,7 @@ public class BaseTasksRequest> extends parentTaskId.writeTo(out); out.writeStringArrayNullable(nodesIds); out.writeStringArrayNullable(actions); - out.writeOptionalStreamable(timeout); + out.writeOptionalWriteable(timeout); } public boolean match(Task task) { diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java index 78a2de20a89..a3528cb75c4 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.tasks.TaskId; /** * Builder for task-based requests @@ -36,6 +37,15 @@ public class TasksRequestBuilder< super(client, action, request); } + /** + * Set the task to lookup. + */ + @SuppressWarnings("unchecked") + public final RequestBuilder setTaskId(TaskId taskId) { + request.setTaskId(taskId); + return (RequestBuilder) this; + } + @SuppressWarnings("unchecked") public final RequestBuilder setNodesIds(String... nodesIds) { request.setNodesIds(nodesIds); diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index e3070832417..a30d9c1f254 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.NoSuchNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -39,7 +38,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.NodeShouldNotConnectException; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -67,7 +66,6 @@ public abstract class TransportTasksAction< TaskResponse extends Writeable > extends HandledTransportAction { - protected final ClusterName clusterName; protected final ClusterService clusterService; protected final TransportService transportService; protected final Supplier requestSupplier; @@ -75,13 +73,12 @@ public abstract class TransportTasksAction< protected final String transportNodeAction; - protected TransportTasksAction(Settings settings, String actionName, ClusterName clusterName, ThreadPool threadPool, + protected TransportTasksAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier requestSupplier, Supplier responseSupplier, String nodeExecutor) { super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, requestSupplier); - this.clusterName = clusterName; this.clusterService = clusterService; this.transportService = transportService; this.transportNodeAction = actionName + "[n]"; @@ -127,7 +124,7 @@ public abstract class TransportTasksAction< if (request.getTaskId().isSet()) { return new String[]{request.getTaskId().getNodeId()}; } else { - return clusterState.nodes().resolveNodesIds(request.getNodesIds()); + return clusterState.nodes().resolveNodes(request.getNodesIds()); } } @@ -218,9 +215,9 @@ public abstract class TransportTasksAction< // nothing to do try { listener.onResponse(newResponse(request, responses)); - } catch (Throwable t) { - logger.debug("failed to generate empty response", t); - listener.onFailure(t); + } catch (Exception e) { + logger.debug("failed to generate empty response", e); + listener.onFailure(e); } } else { TransportRequestOptions.Builder builder = TransportRequestOptions.builder(); @@ -240,7 +237,7 @@ public abstract class TransportTasksAction< nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); taskManager.registerChildTask(task, node.getId()); transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public NodeTasksResponse newInstance() { return new NodeTasksResponse(); @@ -262,8 +259,8 @@ public abstract class TransportTasksAction< } }); } - } catch (Throwable t) { - onFailure(idx, nodeId, t); + } catch (Exception e) { + onFailure(idx, nodeId, e); } } } @@ -292,9 +289,9 @@ public abstract class TransportTasksAction< TasksResponse finalResponse; try { finalResponse = newResponse(request, responses); - } catch (Throwable t) { - logger.debug("failed to combine responses from nodes", t); - listener.onFailure(t); + } catch (Exception e) { + logger.debug("failed to combine responses from nodes", e); + listener.onFailure(e); return; } listener.onResponse(finalResponse); diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java index 8e442538d77..233d4b0c638 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java @@ -40,13 +40,13 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable private String index; private String type; private String id; - private Throwable cause; + private Exception cause; Failure() { } - public Failure(String index, String type, String id, Throwable cause) { + public Failure(String index, String type, String id, Exception cause) { this.index = index; this.type = type; this.id = id; @@ -77,7 +77,7 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable /** * The failure cause. */ - public Throwable getCause() { + public Exception getCause() { return this.cause; } @@ -92,7 +92,7 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable index = in.readString(); type = in.readOptionalString(); id = in.readString(); - cause = in.readThrowable(); + cause = in.readException(); } @Override @@ -100,7 +100,7 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable out.writeString(index); out.writeOptionalString(type); out.writeString(id); - out.writeThrowable(cause); + out.writeException(cause); } } @@ -132,7 +132,7 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable builder.field(Fields._INDEX, failure.getIndex()); builder.field(Fields._TYPE, failure.getType()); builder.field(Fields._ID, failure.getId()); - ElasticsearchException.renderThrowable(builder, params, failure.getCause()); + ElasticsearchException.renderException(builder, params, failure.getCause()); builder.endObject(); } else { TermVectorsResponse getResponse = response.getResponse(); diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 9c745adf864..0ae8824ce8d 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -130,7 +130,7 @@ public final class TermVectorsFields extends Fields { * @param termVectors Stores the actual term vectors as a {@link BytesRef}. */ public TermVectorsFields(BytesReference headerRef, BytesReference termVectors) throws IOException { - StreamInput header = StreamInput.wrap(headerRef.toBytesArray()); + StreamInput header = headerRef.streamInput(); fieldMap = new ObjectLongHashMap<>(); // here we read the header to fill the field offset map String headerString = header.readString(); @@ -201,7 +201,7 @@ public final class TermVectorsFields extends Fields { private int docCount; public TermVector(BytesReference termVectors, long readOffset) throws IOException { - this.perFieldTermVectorInput = StreamInput.wrap(termVectors.toBytesArray()); + this.perFieldTermVectorInput = termVectors.streamInput(); this.readOffset = readOffset; reset(); } diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index 6f5bc3dccfa..3f33b2e3901 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -157,7 +158,7 @@ public class TermVectorsRequest extends SingleShardRequest i this.id = other.id(); this.type = other.type(); if (this.doc != null) { - this.doc = other.doc().copyBytesArray(); + this.doc = new BytesArray(other.doc().toBytesRef(), true); } this.flagsEnum = other.getFlags().clone(); this.preference = other.preference(); @@ -594,7 +595,7 @@ public class TermVectorsRequest extends SingleShardRequest i } else if (currentFieldName.equals("per_field_analyzer") || currentFieldName.equals("perFieldAnalyzer")) { termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map())); } else if (currentFieldName.equals("filter")) { - termVectorsRequest.filterSettings(readFilterSettings(parser, termVectorsRequest)); + termVectorsRequest.filterSettings(readFilterSettings(parser)); } else if ("_index".equals(currentFieldName)) { // the following is important for multi request parsing. termVectorsRequest.index = parser.text(); } else if ("_type".equals(currentFieldName)) { @@ -640,7 +641,7 @@ public class TermVectorsRequest extends SingleShardRequest i return mapStrStr; } - private static FilterSettings readFilterSettings(XContentParser parser, TermVectorsRequest termVectorsRequest) throws IOException { + private static FilterSettings readFilterSettings(XContentParser parser) throws IOException { FilterSettings settings = new FilterSettings(); XContentParser.Token token; String currentFieldName = null; diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 0ef588f2947..964aa00b5c3 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -143,8 +143,8 @@ public class TermVectorsResponse extends ActionResponse implements ToXContent { public Fields getFields() throws IOException { if (hasTermVectors() && isExists()) { if (!sourceCopied) { // make the bytes safe - headerRef = headerRef.copyBytesArray(); - termVectors = termVectors.copyBytesArray(); + headerRef = new BytesArray(headerRef.toBytesRef(), true); + termVectors = new BytesArray(termVectors.toBytesRef(), true); } TermVectorsFields termVectorsFields = new TermVectorsFields(headerRef, termVectors); hasScores = termVectorsFields.hasScores; diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java b/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java index 8c3dc496296..fc44ba64a98 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java @@ -107,7 +107,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction { @@ -96,7 +100,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio } @Override - protected boolean retryOnFailure(Throwable e) { + protected boolean retryOnFailure(Exception e) { return TransportActions.isShardNotAvailableException(e); } @@ -124,13 +128,14 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio } @Override - public void onFailure(Throwable e) { - if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) { + public void onFailure(Exception e) { + if (unwrapCause(e) instanceof IndexAlreadyExistsException) { // we have the index, do it try { innerExecute(request, listener); - } catch (Throwable e1) { - listener.onFailure(e1); + } catch (Exception inner) { + inner.addSuppressed(e); + listener.onFailure(inner); } } else { listener.onFailure(e); @@ -152,7 +157,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio return clusterState.routingTable().index(request.concreteIndex()).shard(request.getShardId().getId()).primaryShardIt(); } ShardIterator shardIterator = clusterService.operationRouting() - .indexShards(clusterState, request.concreteIndex(), request.type(), request.id(), request.routing()); + .indexShards(clusterState, request.concreteIndex(), request.id(), request.routing()); ShardRouting shard; while ((shard = shardIterator.nextOrNull()) != null) { if (shard.primary()) { @@ -187,13 +192,14 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio } else { update.setGetResult(null); } + update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); } @Override - public void onFailure(Throwable e) { - e = ExceptionsHelper.unwrapCause(e); - if (e instanceof VersionConflictEngineException) { + public void onFailure(Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id()); @@ -206,7 +212,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio return; } } - listener.onFailure(e); + listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); } }); break; @@ -219,13 +225,14 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio public void onResponse(IndexResponse response) { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.isCreated()); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); } @Override - public void onFailure(Throwable e) { - e = ExceptionsHelper.unwrapCause(e); - if (e instanceof VersionConflictEngineException) { + public void onFailure(Exception e) { + final Throwable cause = unwrapCause(e); + if (cause instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { threadPool.executor(executor()).execute(new ActionRunnable(listener) { @Override @@ -236,23 +243,25 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio return; } } - listener.onFailure(e); + listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); } }); break; case DELETE: - deleteAction.execute(result.action(), new ActionListener() { + DeleteRequest deleteRequest = result.action(); + deleteAction.execute(deleteRequest, new ActionListener() { @Override public void onResponse(DeleteResponse response) { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); } @Override - public void onFailure(Throwable e) { - e = ExceptionsHelper.unwrapCause(e); - if (e instanceof VersionConflictEngineException) { + public void onFailure(Exception e) { + final Throwable cause = unwrapCause(e); + if (cause instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { threadPool.executor(executor()).execute(new ActionRunnable(listener) { @Override @@ -263,7 +272,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio return; } } - listener.onFailure(e); + listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); } }); break; diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 9ac77050202..aa92510e38b 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -63,13 +61,11 @@ import java.util.Map; public class UpdateHelper extends AbstractComponent { private final ScriptService scriptService; - private final ClusterService clusterService; @Inject - public UpdateHelper(Settings settings, ScriptService scriptService, ClusterService clusterService) { + public UpdateHelper(Settings settings, ScriptService scriptService) { super(settings); this.scriptService = scriptService; - this.clusterService = clusterService; } /** @@ -131,7 +127,7 @@ public class UpdateHelper extends AbstractComponent { // it has to be a "create!" .create(true) .ttl(ttl) - .refresh(request.refresh()) + .setRefreshPolicy(request.getRefreshPolicy()) .routing(request.routing()) .parent(request.parent()) .consistencyLevel(request.consistencyLevel()); @@ -229,12 +225,13 @@ public class UpdateHelper extends AbstractComponent { .version(updateVersion).versionType(request.versionType()) .consistencyLevel(request.consistencyLevel()) .timestamp(timestamp).ttl(ttl) - .refresh(request.refresh()); + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(indexRequest, Operation.INDEX, updatedSourceAsMap, updateSourceContentType); } else if ("delete".equals(operation)) { DeleteRequest deleteRequest = Requests.deleteRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent) .version(updateVersion).versionType(request.versionType()) - .consistencyLevel(request.consistencyLevel()); + .consistencyLevel(request.consistencyLevel()) + .setRefreshPolicy(request.getRefreshPolicy()); return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType); } else if ("none".equals(operation)) { UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false); @@ -250,8 +247,7 @@ public class UpdateHelper extends AbstractComponent { private Map executeScript(Script script, Map ctx) { try { if (scriptService != null) { - ClusterState state = clusterService.state(); - ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.UPDATE, Collections.emptyMap(), state); + ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.UPDATE, Collections.emptyMap()); executableScript.setNextVar("ctx", ctx); executableScript.run(); // we need to unwrap the ctx... diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 31f219fd4c7..e0846c1ce5d 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; @@ -53,7 +54,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** */ -public class UpdateRequest extends InstanceShardOperationRequest implements DocumentRequest { +public class UpdateRequest extends InstanceShardOperationRequest + implements DocumentRequest, WriteRequest { private String type; private String id; @@ -72,7 +74,7 @@ public class UpdateRequest extends InstanceShardOperationRequest private VersionType versionType = VersionType.INTERNAL; private int retryOnConflict = 0; - private boolean refresh = false; + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; @@ -422,18 +424,15 @@ public class UpdateRequest extends InstanceShardOperationRequest return this.versionType; } - /** - * Should a refresh be executed post this update operation causing the operation to - * be searchable. Note, heavy indexing should not set this to true. Defaults - * to false. - */ - public UpdateRequest refresh(boolean refresh) { - this.refresh = refresh; + @Override + public UpdateRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; return this; } - public boolean refresh() { - return this.refresh; + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; } public WriteConsistencyLevel consistencyLevel() { @@ -730,7 +729,7 @@ public class UpdateRequest extends InstanceShardOperationRequest script = new Script(in); } retryOnConflict = in.readVInt(); - refresh = in.readBoolean(); + refreshPolicy = RefreshPolicy.readFrom(in); if (in.readBoolean()) { doc = new IndexRequest(); doc.readFrom(in); @@ -767,7 +766,7 @@ public class UpdateRequest extends InstanceShardOperationRequest script.writeTo(out); } out.writeVInt(retryOnConflict); - out.writeBoolean(refresh); + refreshPolicy.writeTo(out); if (doc == null) { out.writeBoolean(false); } else { diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 30b636f4efc..403f4265fcd 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; @@ -32,9 +33,8 @@ import org.elasticsearch.script.Script; import java.util.Map; -/** - */ -public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder { +public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder + implements WriteRequestBuilder { public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action) { super(client, action, new UpdateRequest()); @@ -121,17 +121,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuildertrue. Defaults - * to false. - */ - public UpdateRequestBuilder setRefresh(boolean refresh) { - request.refresh(refresh); - return this; - } - /** * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT} */ diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 1131a4a99ec..bdec058b04a 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -26,7 +26,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.PidFile; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.logging.ESLogger; @@ -45,7 +44,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; import java.nio.file.Path; -import java.util.Locale; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -87,11 +85,7 @@ final class Bootstrap { // check if the user is running as root, and bail if (Natives.definitelyRunningAsRoot()) { - if (Boolean.parseBoolean(System.getProperty("es.insecure.allow.root"))) { - logger.warn("running as ROOT user. this is a bad idea!"); - } else { - throw new RuntimeException("don't run elasticsearch as root."); - } + throw new RuntimeException("can not run elasticsearch as root"); } // enable secure computing mode @@ -130,7 +124,7 @@ final class Bootstrap { // force remainder of JNA to be loaded (if available). try { JNAKernel32Library.getInstance(); - } catch (Throwable ignored) { + } catch (Exception ignored) { // we've already logged this. } @@ -151,7 +145,7 @@ final class Bootstrap { private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception { initializeNatives( environment.tmpFile(), - BootstrapSettings.MLOCKALL_SETTING.get(settings), + BootstrapSettings.MEMORY_LOCK_SETTING.get(settings), BootstrapSettings.SECCOMP_SETTING.get(settings), BootstrapSettings.CTRLHANDLER_SETTING.get(settings)); @@ -177,15 +171,7 @@ final class Bootstrap { // install SM after natives, shutdown hooks, etc. Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings)); - // We do not need to reload system properties here as we have already applied them in building the settings and - // reloading could cause multiple prompts to the user for values if a system property was specified with a prompt - // placeholder - Settings nodeSettings = Settings.builder() - .put(settings) - .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true) - .build(); - - node = new Node(nodeSettings) { + node = new Node(settings) { @Override protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) { BootstrapCheck.check(settings, boundTransportAddress); @@ -193,13 +179,13 @@ final class Bootstrap { }; } - private static Environment initialSettings(boolean foreground, String pidFile) { + private static Environment initialSettings(boolean foreground, Path pidFile, Map esSettings) { Terminal terminal = foreground ? Terminal.DEFAULT : null; Settings.Builder builder = Settings.builder(); - if (Strings.hasLength(pidFile)) { + if (pidFile != null) { builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile); } - return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal); + return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings); } private void start() { @@ -228,16 +214,18 @@ final class Bootstrap { */ static void init( final boolean foreground, - final String pidFile, - final Map esSettings) throws Throwable { + final Path pidFile, + final Map esSettings) throws Exception { // Set the system property before anything has a chance to trigger its use initLoggerPrefix(); - elasticsearchSettings(esSettings); + // force the class initializer for BootstrapInfo to run before + // the security manager is installed + BootstrapInfo.init(); INSTANCE = new Bootstrap(); - Environment environment = initialSettings(foreground, pidFile); + Environment environment = initialSettings(foreground, pidFile, esSettings); Settings settings = environment.settings(); LogConfigurator.configure(settings, true); checkForCustomConfFile(); @@ -246,12 +234,6 @@ final class Bootstrap { PidFile.create(environment.pidFile(), true); } - // warn if running using the client VM - if (JvmInfo.jvmInfo().getVmName().toLowerCase(Locale.ROOT).contains("client")) { - ESLogger logger = Loggers.getLogger(Bootstrap.class); - logger.warn("jvm uses the client vm, make sure to run `java` with the server vm for best performance by adding `-server` to the command line"); - } - try { if (!foreground) { Loggers.disableConsoleLogging(); @@ -264,6 +246,12 @@ final class Bootstrap { // fail if somebody replaced the lucene jars checkLucene(); + // install the default uncaught exception handler; must be done before security is + // initialized as we do not want to grant the runtime permission + // setDefaultUncaughtExceptionHandler + Thread.setDefaultUncaughtExceptionHandler( + new ElasticsearchUncaughtExceptionHandler(() -> Node.NODE_NAME_SETTING.get(settings))); + INSTANCE.setup(true, settings, environment); INSTANCE.start(); @@ -271,7 +259,7 @@ final class Bootstrap { if (!foreground) { closeSysError(); } - } catch (Throwable e) { + } catch (Exception e) { // disable console logging, so user does not see the exception twice (jvm will show it already) if (foreground) { Loggers.disableConsoleLogging(); @@ -301,13 +289,6 @@ final class Bootstrap { } } - @SuppressForbidden(reason = "Sets system properties passed as CLI parameters") - private static void elasticsearchSettings(Map esSettings) { - for (Map.Entry esSetting : esSettings.entrySet()) { - System.setProperty(esSetting.getKey(), esSetting.getValue()); - } - } - @SuppressForbidden(reason = "System#out") private static void closeSystOut() { System.out.close(); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index 2f87086ede4..db74e4b3731 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -20,7 +20,7 @@ package org.elasticsearch.bootstrap; import org.apache.lucene.util.Constants; -import org.apache.lucene.util.SuppressForbidden; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; @@ -41,7 +41,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; -import java.util.stream.Collectors; /** * We enforce limits once any network host is configured. In this case we assume the node is running in production @@ -63,40 +62,75 @@ final class BootstrapCheck { * @param boundTransportAddress the node network bindings */ static void check(final Settings settings, final BoundTransportAddress boundTransportAddress) { - check(enforceLimits(boundTransportAddress), checks(settings), Node.NODE_NAME_SETTING.get(settings)); + check( + enforceLimits(boundTransportAddress), + BootstrapSettings.IGNORE_SYSTEM_BOOTSTRAP_CHECKS.get(settings), + checks(settings), + Node.NODE_NAME_SETTING.get(settings)); } /** * executes the provided checks and fails the node if * enforceLimits is true, otherwise logs warnings * - * @param enforceLimits true if the checks should be enforced or - * warned - * @param checks the checks to execute - * @param nodeName the node name to be used as a logging prefix + * @param enforceLimits true if the checks should be enforced or + * otherwise warned + * @param ignoreSystemChecks true if system checks should be enforced + * or otherwise warned + * @param checks the checks to execute + * @param nodeName the node name to be used as a logging prefix */ // visible for testing - static void check(final boolean enforceLimits, final List checks, final String nodeName) { - final ESLogger logger = Loggers.getLogger(BootstrapCheck.class, nodeName); + static void check(final boolean enforceLimits, final boolean ignoreSystemChecks, final List checks, final String nodeName) { + check(enforceLimits, ignoreSystemChecks, checks, Loggers.getLogger(BootstrapCheck.class, nodeName)); + } - final List errors = - checks.stream() - .filter(BootstrapCheck.Check::check) - .map(BootstrapCheck.Check::errorMessage) - .collect(Collectors.toList()); + /** + * executes the provided checks and fails the node if + * enforceLimits is true, otherwise logs warnings + * + * @param enforceLimits true if the checks should be enforced or + * otherwise warned + * @param ignoreSystemChecks true if system checks should be enforced + * or otherwise warned + * @param checks the checks to execute + * @param logger the logger to + */ + static void check( + final boolean enforceLimits, + final boolean ignoreSystemChecks, + final List checks, + final ESLogger logger) { + final List errors = new ArrayList<>(); + final List ignoredErrors = new ArrayList<>(); + + for (final Check check : checks) { + if (check.check()) { + if ((!enforceLimits || (check.isSystemCheck() && ignoreSystemChecks)) && !check.alwaysEnforce()) { + ignoredErrors.add(check.errorMessage()); + } else { + errors.add(check.errorMessage()); + } + } + } + + if (!ignoredErrors.isEmpty()) { + ignoredErrors.forEach(error -> log(logger, error)); + } if (!errors.isEmpty()) { final List messages = new ArrayList<>(1 + errors.size()); messages.add("bootstrap checks failed"); messages.addAll(errors); - if (enforceLimits) { - final RuntimeException re = new RuntimeException(String.join("\n", messages)); - errors.stream().map(IllegalStateException::new).forEach(re::addSuppressed); - throw re; - } else { - messages.forEach(message -> logger.warn(message)); - } + final RuntimeException re = new RuntimeException(String.join("\n", messages)); + errors.stream().map(IllegalStateException::new).forEach(re::addSuppressed); + throw re; } + + } + + static void log(final ESLogger logger, final String error) { + logger.warn(error); } /** @@ -118,7 +152,7 @@ final class BootstrapCheck { final FileDescriptorCheck fileDescriptorCheck = Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck(); checks.add(fileDescriptorCheck); - checks.add(new MlockallCheck(BootstrapSettings.MLOCKALL_SETTING.get(settings))); + checks.add(new MlockallCheck(BootstrapSettings.MEMORY_LOCK_SETTING.get(settings))); if (Constants.LINUX) { checks.add(new MaxNumberOfThreadsCheck()); } @@ -129,6 +163,9 @@ final class BootstrapCheck { if (Constants.LINUX) { checks.add(new MaxMapCountCheck()); } + checks.add(new ClientJvmCheck()); + checks.add(new OnErrorCheck()); + checks.add(new OnOutOfMemoryErrorCheck()); return Collections.unmodifiableList(checks); } @@ -151,6 +188,18 @@ final class BootstrapCheck { */ String errorMessage(); + /** + * test if the check is a system-level check + * + * @return true if the check is a system-level check as opposed + * to an Elasticsearch-level check + */ + boolean isSystemCheck(); + + default boolean alwaysEnforce() { + return false; + } + } static class HeapSizeCheck implements BootstrapCheck.Check { @@ -183,6 +232,11 @@ final class BootstrapCheck { return JvmInfo.jvmInfo().getConfiguredMaxHeapSize(); } + @Override + public final boolean isSystemCheck() { + return false; + } + } static class OsXFileDescriptorCheck extends FileDescriptorCheck { @@ -197,7 +251,6 @@ final class BootstrapCheck { } - // visible for testing static class FileDescriptorCheck implements Check { private final int limit; @@ -233,9 +286,13 @@ final class BootstrapCheck { return ProcessProbe.getInstance().getMaxFileDescriptorCount(); } + @Override + public final boolean isSystemCheck() { + return true; + } + } - // visible for testing static class MlockallCheck implements Check { private final boolean mlockallSet; @@ -259,6 +316,11 @@ final class BootstrapCheck { return Natives.isMemoryLocked(); } + @Override + public final boolean isSystemCheck() { + return true; + } + } static class MinMasterNodesCheck implements Check { @@ -277,8 +339,14 @@ final class BootstrapCheck { @Override public String errorMessage() { return "please set [" + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + - "] to a majority of the number of master eligible nodes in your cluster."; + "] to a majority of the number of master eligible nodes in your cluster"; } + + @Override + public final boolean isSystemCheck() { + return false; + } + } static class MaxNumberOfThreadsCheck implements Check { @@ -305,6 +373,11 @@ final class BootstrapCheck { return JNANatives.MAX_NUMBER_OF_THREADS; } + @Override + public final boolean isSystemCheck() { + return true; + } + } static class MaxSizeVirtualMemoryCheck implements Check { @@ -333,6 +406,11 @@ final class BootstrapCheck { return JNANatives.MAX_SIZE_VIRTUAL_MEMORY; } + @Override + public final boolean isSystemCheck() { + return true; + } + } static class MaxMapCountCheck implements Check { @@ -396,6 +474,115 @@ final class BootstrapCheck { return Long.parseLong(procSysVmMaxMapCount); } + @Override + public final boolean isSystemCheck() { + return true; + } + + } + + static class ClientJvmCheck implements BootstrapCheck.Check { + + @Override + public boolean check() { + return getVmName().toLowerCase(Locale.ROOT).contains("client"); + } + + // visible for testing + String getVmName() { + return JvmInfo.jvmInfo().getVmName(); + } + + @Override + public String errorMessage() { + return String.format( + Locale.ROOT, + "JVM is using the client VM [%s] but should be using a server VM for the best performance", + getVmName()); + } + + @Override + public final boolean isSystemCheck() { + return false; + } + + } + + abstract static class MightForkCheck implements BootstrapCheck.Check { + + @Override + public boolean check() { + return isSeccompInstalled() && mightFork(); + } + + // visible for testing + boolean isSeccompInstalled() { + return Natives.isSeccompInstalled(); + } + + // visible for testing + abstract boolean mightFork(); + + @Override + public final boolean isSystemCheck() { + return false; + } + + @Override + public final boolean alwaysEnforce() { + return true; + } + + } + + static class OnErrorCheck extends MightForkCheck { + + @Override + boolean mightFork() { + final String onError = onError(); + return onError != null && !onError.equals(""); + } + + // visible for testing + String onError() { + return JvmInfo.jvmInfo().onError(); + } + + @Override + public String errorMessage() { + return String.format( + Locale.ROOT, + "OnError [%s] requires forking but is prevented by system call filters ([%s=true]);" + + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError", + onError(), + BootstrapSettings.SECCOMP_SETTING.getKey()); + } + + } + + static class OnOutOfMemoryErrorCheck extends MightForkCheck { + + @Override + boolean mightFork() { + final String onOutOfMemoryError = onOutOfMemoryError(); + return onOutOfMemoryError != null && !onOutOfMemoryError.equals(""); + } + + // visible for testing + String onOutOfMemoryError() { + return JvmInfo.jvmInfo().onOutOfMemoryError(); + } + + @Override + public String errorMessage() { + return String.format( + Locale.ROOT, + "OnOutOfMemoryError [%s] requires forking but is prevented by system call filters ([%s=true]);" + + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError", + onOutOfMemoryError(), + BootstrapSettings.SECCOMP_SETTING.getKey()); + } + } } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java index bd693951eb2..791836bf8a4 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java @@ -120,4 +120,8 @@ public final class BootstrapInfo { } return SYSTEM_PROPERTIES; } + + public static void init() { + } + } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java index 4e9dffc995b..ad37916881b 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java @@ -31,11 +31,13 @@ public final class BootstrapSettings { public static final Setting SECURITY_FILTER_BAD_DEFAULTS_SETTING = Setting.boolSetting("security.manager.filter_bad_defaults", true, Property.NodeScope); - public static final Setting MLOCKALL_SETTING = - Setting.boolSetting("bootstrap.mlockall", false, Property.NodeScope); + public static final Setting MEMORY_LOCK_SETTING = + Setting.boolSetting("bootstrap.memory_lock", false, Property.NodeScope); public static final Setting SECCOMP_SETTING = Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope); public static final Setting CTRLHANDLER_SETTING = Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope); + public static final Setting IGNORE_SYSTEM_BOOTSTRAP_CHECKS = + Setting.boolSetting("bootstrap.ignore_system_bootstrap_checks", false, Property.NodeScope); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java index 1cd3a9ad57e..ddc25c88535 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java @@ -35,12 +35,12 @@ import java.util.Map; /** custom policy for union of static and dynamic permissions */ final class ESPolicy extends Policy { - + /** template policy file, the one used in tests */ static final String POLICY_RESOURCE = "security.policy"; /** limited policy for scripts */ static final String UNTRUSTED_RESOURCE = "untrusted.policy"; - + final Policy template; final Policy untrusted; final Policy system; @@ -60,7 +60,7 @@ final class ESPolicy extends Policy { } @Override @SuppressForbidden(reason = "fast equals check is desired") - public boolean implies(ProtectionDomain domain, Permission permission) { + public boolean implies(ProtectionDomain domain, Permission permission) { CodeSource codeSource = domain.getCodeSource(); // codesource can be null when reducing privileges via doPrivileged() if (codeSource == null) { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index bb1f6cc87d5..9c76fdfb030 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -21,42 +21,43 @@ package org.elasticsearch.bootstrap; import joptsimple.OptionSet; import joptsimple.OptionSpec; -import joptsimple.util.KeyValuePair; +import joptsimple.OptionSpecBuilder; +import joptsimple.util.PathConverter; +import joptsimple.util.PathProperties; import org.elasticsearch.Build; -import org.elasticsearch.cli.Command; import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.SettingCommand; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cli.UserError; +import org.elasticsearch.cli.UserException; import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.IOException; +import java.nio.file.Path; import java.util.Arrays; -import java.util.HashMap; import java.util.Map; /** * This class starts elasticsearch. */ -class Elasticsearch extends Command { +class Elasticsearch extends SettingCommand { - private final OptionSpec versionOption; - private final OptionSpec daemonizeOption; - private final OptionSpec pidfileOption; - private final OptionSpec propertyOption; + private final OptionSpecBuilder versionOption; + private final OptionSpecBuilder daemonizeOption; + private final OptionSpec pidfileOption; // visible for testing Elasticsearch() { super("starts elasticsearch"); - // TODO: in jopt-simple 5.0, make this mutually exclusive with all other options versionOption = parser.acceptsAll(Arrays.asList("V", "version"), "Prints elasticsearch version information and exits"); daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"), - "Starts Elasticsearch in the background"); - // TODO: in jopt-simple 5.0 this option type can be a Path + "Starts Elasticsearch in the background") + .availableUnless(versionOption); pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"), "Creates a pid file in the specified path on start") - .withRequiredArg(); - propertyOption = parser.accepts("E", "Configure an Elasticsearch setting").withRequiredArg().ofType(KeyValuePair.class); + .availableUnless(versionOption) + .withRequiredArg() + .withValuesConvertedBy(new PathConverter()); } /** @@ -75,38 +76,27 @@ class Elasticsearch extends Command { } @Override - protected void execute(Terminal terminal, OptionSet options) throws Exception { + protected void execute(Terminal terminal, OptionSet options, Map settings) throws Exception { if (options.nonOptionArguments().isEmpty() == false) { - throw new UserError(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments()); + throw new UserException(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments()); } if (options.has(versionOption)) { if (options.has(daemonizeOption) || options.has(pidfileOption)) { - throw new UserError(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option"); + throw new UserException(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option"); } terminal.println("Version: " + org.elasticsearch.Version.CURRENT - + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() - + ", JVM: " + JvmInfo.jvmInfo().version()); + + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + + ", JVM: " + JvmInfo.jvmInfo().version()); return; } final boolean daemonize = options.has(daemonizeOption); - final String pidFile = pidfileOption.value(options); + final Path pidFile = pidfileOption.value(options); - final Map esSettings = new HashMap<>(); - for (final KeyValuePair kvp : propertyOption.values(options)) { - if (!kvp.key.startsWith("es.")) { - throw new UserError(ExitCodes.USAGE, "Elasticsearch settings must be prefixed with [es.] but was [" + kvp.key + "]"); - } - if (kvp.value.isEmpty()) { - throw new UserError(ExitCodes.USAGE, "Elasticsearch setting [" + kvp.key + "] must not be empty"); - } - esSettings.put(kvp.key, kvp.value); - } - - init(daemonize, pidFile, esSettings); + init(daemonize, pidFile, settings); } - void init(final boolean daemonize, final String pidFile, final Map esSettings) { + void init(final boolean daemonize, final Path pidFile, final Map esSettings) { try { Bootstrap.init(!daemonize, pidFile, esSettings); } catch (final Throwable t) { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java new file mode 100644 index 00000000000..405e919fabd --- /dev/null +++ b/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import org.apache.lucene.index.MergePolicy; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; + +import java.io.IOError; +import java.util.Objects; +import java.util.function.Supplier; + +class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler { + + private final Supplier loggingPrefixSupplier; + + ElasticsearchUncaughtExceptionHandler(final Supplier loggingPrefixSupplier) { + this.loggingPrefixSupplier = Objects.requireNonNull(loggingPrefixSupplier); + } + + @Override + public void uncaughtException(Thread t, Throwable e) { + if (isFatalUncaught(e)) { + try { + onFatalUncaught(t.getName(), e); + } finally { + // we use specific error codes in case the above notification failed, at least we + // will have some indication of the error bringing us down + if (e instanceof InternalError) { + halt(128); + } else if (e instanceof OutOfMemoryError) { + halt(127); + } else if (e instanceof StackOverflowError) { + halt(126); + } else if (e instanceof UnknownError) { + halt(125); + } else if (e instanceof IOError) { + halt(124); + } else { + halt(1); + } + } + } else { + onNonFatalUncaught(t.getName(), e); + } + } + + // visible for testing + static boolean isFatalUncaught(Throwable e) { + return isFatalCause(e) || (e instanceof MergePolicy.MergeException && isFatalCause(e.getCause())); + } + + private static boolean isFatalCause(Throwable cause) { + return cause instanceof Error; + } + + // visible for testing + void onFatalUncaught(final String threadName, final Throwable t) { + final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); + logger.error("fatal error in thread [{}], exiting", t, threadName); + } + + // visible for testing + void onNonFatalUncaught(final String threadName, final Throwable t) { + final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); + logger.warn("uncaught exception in thread [{}]", t, threadName); + } + + // visible for testing + @SuppressForbidden(reason = "halt") + void halt(int status) { + // we halt to prevent shutdown hooks from running + Runtime.getRuntime().halt(status); + } + +} diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java index fbd6857d365..50dab6888b6 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java @@ -47,8 +47,8 @@ final class JNAKernel32Library { private List callbacks = new ArrayList<>(); // Native library instance must be kept around for the same reason. - private final static class Holder { - private final static JNAKernel32Library instance = new JNAKernel32Library(); + private static final class Holder { + private static final JNAKernel32Library instance = new JNAKernel32Library(); } private JNAKernel32Library() { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index e55d38a0f72..5a8693b3137 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -217,13 +217,13 @@ class JNANatives { if (ret == 1) { LOCAL_SECCOMP_ALL = true; } - } catch (Throwable t) { + } catch (Exception e) { // this is likely to happen unless the kernel is newish, its a best effort at the moment // so we log stacktrace at debug for now... if (logger.isDebugEnabled()) { - logger.debug("unable to install syscall filter", t); + logger.debug("unable to install syscall filter", e); } - logger.warn("unable to install syscall filter: ", t); + logger.warn("unable to install syscall filter: ", e); } } } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java b/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java index afc2b77e211..63de83d88d0 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java @@ -33,6 +33,13 @@ public class JavaVersion implements Comparable { } private JavaVersion(List version) { + if (version.size() >= 2 + && version.get(0).intValue() == 1 + && version.get(1).intValue() == 8) { + // for Java 8 there is ambiguity since both 1.8 and 8 are supported, + // so we rewrite the former to the latter + version = new ArrayList<>(version.subList(1, version.size())); + } this.version = Collections.unmodifiableList(version); } @@ -55,7 +62,7 @@ public class JavaVersion implements Comparable { return value.matches("^0*[0-9]+(\\.[0-9]+)*$"); } - private final static JavaVersion CURRENT = parse(System.getProperty("java.specification.version")); + private static final JavaVersion CURRENT = parse(System.getProperty("java.specification.version")); public static JavaVersion current() { return CURRENT; @@ -75,6 +82,19 @@ public class JavaVersion implements Comparable { return 0; } + @Override + public boolean equals(Object o) { + if (o == null || o.getClass() != getClass()) { + return false; + } + return compareTo((JavaVersion) o) == 0; + } + + @Override + public int hashCode() { + return version.hashCode(); + } + @Override public String toString() { return version.stream().map(v -> Integer.toString(v)).collect(Collectors.joining(".")); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java index 46908e60642..cfae761452f 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java @@ -97,7 +97,7 @@ final class Seccomp { // Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering /** Access to non-standard Linux libc methods */ - static interface LinuxLibrary extends Library { + interface LinuxLibrary extends Library { /** * maps to prctl(2) */ @@ -107,7 +107,7 @@ final class Seccomp { * this is the only way, DON'T use it on some other architecture unless you know wtf you are doing */ NativeLong syscall(NativeLong number, Object... args); - }; + } // null if unavailable or something goes wrong. private static final LinuxLibrary linux_libc; @@ -415,7 +415,7 @@ final class Seccomp { // OS X implementation via sandbox(7) /** Access to non-standard OS X libc methods */ - static interface MacLibrary extends Library { + interface MacLibrary extends Library { /** * maps to sandbox_init(3), since Leopard */ @@ -489,7 +489,7 @@ final class Seccomp { // Solaris implementation via priv_set(3C) /** Access to non-standard Solaris libc methods */ - static interface SolarisLibrary extends Library { + interface SolarisLibrary extends Library { /** * see priv_set(3C), a convenience method for setppriv(2). */ @@ -611,7 +611,7 @@ final class Seccomp { * This is best effort and OS and architecture dependent. It may throw any Throwable. * @return 0 if we can do this for application threads, 1 for the entire process */ - static int init(Path tmpFile) throws Throwable { + static int init(Path tmpFile) throws Exception { if (Constants.LINUX) { return linuxImpl(); } else if (Constants.MAC_OS_X) { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java index 4437097bb35..05d2c8c1bfd 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -20,6 +20,7 @@ package org.elasticsearch.bootstrap; import org.elasticsearch.SecureSM; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; @@ -119,7 +120,7 @@ final class Security { Policy.setPolicy(new ESPolicy(createPermissions(environment), getPluginPermissions(environment), filterBadDefaults)); // enable security manager - System.setSecurityManager(new SecureSM()); + System.setSecurityManager(new SecureSM(new String[] { "org.elasticsearch.bootstrap." })); // do some basic tests selfTest(); @@ -244,7 +245,7 @@ final class Security { addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binFile(), "read,readlink"); addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink"); addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink"); - addPath(policy, Environment.PATH_PLUGINS_SETTING.getKey(), environment.pluginsFile(), "read,readlink"); + addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink"); addPath(policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), "read,readlink"); addPath(policy, Environment.PATH_SCRIPTS_SETTING.getKey(), environment.scriptsFile(), "read,readlink"); // read-write dirs @@ -256,8 +257,10 @@ final class Security { for (Path path : environment.dataFiles()) { addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete"); } + // TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster as a folder + assert Version.CURRENT.major < 6 : "cluster name is no longer used in data path"; for (Path path : environment.dataWithClusterFiles()) { - addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete"); + addPathIfExists(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete"); } for (Path path : environment.repoFiles()) { addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete"); @@ -318,6 +321,27 @@ final class Security { policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions)); } + /** + * Add access to a directory iff it exists already + * @param policy current policy to add permissions to + * @param configurationName the configuration name associated with the path (for error messages only) + * @param path the path itself + * @param permissions set of filepermissions to grant to the path + */ + static void addPathIfExists(Permissions policy, String configurationName, Path path, String permissions) { + if (Files.isDirectory(path)) { + // add each path twice: once for itself, again for files underneath it + policy.add(new FilePermission(path.toString(), permissions)); + policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions)); + try { + path.getFileSystem().provider().checkAccess(path.toRealPath(), AccessMode.READ); + } catch (IOException e) { + throw new IllegalStateException("Unable to access '" + configurationName + "' (" + path + ")", e); + } + } + } + + /** * Ensures configured directory {@code path} exists. * @throws IOException if {@code path} exists, but is not a directory, not accessible, or broken symbolic link. diff --git a/core/src/main/java/org/elasticsearch/cli/Command.java b/core/src/main/java/org/elasticsearch/cli/Command.java index 1fc7c9fe74f..2e896759ebb 100644 --- a/core/src/main/java/org/elasticsearch/cli/Command.java +++ b/core/src/main/java/org/elasticsearch/cli/Command.java @@ -19,15 +19,15 @@ package org.elasticsearch.cli; -import java.io.IOException; -import java.util.Arrays; - import joptsimple.OptionException; import joptsimple.OptionParser; import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.elasticsearch.common.SuppressForbidden; +import java.io.IOException; +import java.util.Arrays; + /** * An action to execute within a cli. */ @@ -41,7 +41,8 @@ public abstract class Command { private final OptionSpec helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "show help").forHelp(); private final OptionSpec silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "show minimal output"); - private final OptionSpec verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output"); + private final OptionSpec verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output") + .availableUnless(silentOption); public Command(String description) { this.description = description; @@ -55,7 +56,7 @@ public abstract class Command { printHelp(terminal); terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); return ExitCodes.USAGE; - } catch (UserError e) { + } catch (UserException e) { if (e.exitCode == ExitCodes.USAGE) { printHelp(terminal); } @@ -77,10 +78,6 @@ public abstract class Command { } if (options.has(silentOption)) { - if (options.has(verboseOption)) { - // mutually exclusive, we can remove this with jopt-simple 5.0, which natively supports it - throw new UserError(ExitCodes.USAGE, "Cannot specify -s and -v together"); - } terminal.setVerbosity(Terminal.Verbosity.SILENT); } else if (options.has(verboseOption)) { terminal.setVerbosity(Terminal.Verbosity.VERBOSE); @@ -110,6 +107,7 @@ public abstract class Command { /** * Executes this command. * - * Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */ + * Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */ protected abstract void execute(Terminal terminal, OptionSet options) throws Exception; + } diff --git a/core/src/main/java/org/elasticsearch/cli/MultiCommand.java b/core/src/main/java/org/elasticsearch/cli/MultiCommand.java index a9feee0c9bf..16754cd7bf1 100644 --- a/core/src/main/java/org/elasticsearch/cli/MultiCommand.java +++ b/core/src/main/java/org/elasticsearch/cli/MultiCommand.java @@ -60,11 +60,11 @@ public class MultiCommand extends Command { } String[] args = arguments.values(options).toArray(new String[0]); if (args.length == 0) { - throw new UserError(ExitCodes.USAGE, "Missing command"); + throw new UserException(ExitCodes.USAGE, "Missing command"); } Command subcommand = subcommands.get(args[0]); if (subcommand == null) { - throw new UserError(ExitCodes.USAGE, "Unknown command [" + args[0] + "]"); + throw new UserException(ExitCodes.USAGE, "Unknown command [" + args[0] + "]"); } subcommand.mainWithoutErrorHandling(Arrays.copyOfRange(args, 1, args.length), terminal); } diff --git a/core/src/main/java/org/elasticsearch/cli/SettingCommand.java b/core/src/main/java/org/elasticsearch/cli/SettingCommand.java new file mode 100644 index 00000000000..17f7c9e5204 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/SettingCommand.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import joptsimple.util.KeyValuePair; + +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +public abstract class SettingCommand extends Command { + + private final OptionSpec settingOption; + + public SettingCommand(String description) { + super(description); + this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + final Map settings = new HashMap<>(); + for (final KeyValuePair kvp : settingOption.values(options)) { + if (kvp.value.isEmpty()) { + throw new UserException(ExitCodes.USAGE, "Setting [" + kvp.key + "] must not be empty"); + } + settings.put(kvp.key, kvp.value); + } + + putSystemPropertyIfSettingIsMissing(settings, "path.conf", "es.path.conf"); + putSystemPropertyIfSettingIsMissing(settings, "path.data", "es.path.data"); + putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home"); + putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs"); + + execute(terminal, options, settings); + } + + protected static void putSystemPropertyIfSettingIsMissing(final Map settings, final String setting, final String key) { + final String value = System.getProperty(key); + if (value != null) { + if (settings.containsKey(setting)) { + final String message = + String.format( + Locale.ROOT, + "duplicate setting [%s] found via command-line [%s] and system property [%s]", + setting, + settings.get(setting), + value); + throw new IllegalArgumentException(message); + } else { + settings.put(setting, value); + } + } + } + + protected abstract void execute(Terminal terminal, OptionSet options, Map settings) throws Exception; + +} diff --git a/core/src/main/java/org/elasticsearch/cli/Terminal.java b/core/src/main/java/org/elasticsearch/cli/Terminal.java index d2dc57263dc..58eb5012d07 100644 --- a/core/src/main/java/org/elasticsearch/cli/Terminal.java +++ b/core/src/main/java/org/elasticsearch/cli/Terminal.java @@ -19,6 +19,8 @@ package org.elasticsearch.cli; +import org.elasticsearch.common.SuppressForbidden; + import java.io.BufferedReader; import java.io.Console; import java.io.IOException; @@ -26,8 +28,6 @@ import java.io.InputStreamReader; import java.io.PrintWriter; import java.nio.charset.Charset; -import org.elasticsearch.common.SuppressForbidden; - /** * A Terminal wraps access to reading input and writing output for a cli. * @@ -81,8 +81,13 @@ public abstract class Terminal { /** Prints a line to the terminal at {@code verbosity} level. */ public final void println(Verbosity verbosity, String msg) { + print(verbosity, msg + lineSeparator); + } + + /** Prints message to the terminal at {@code verbosity} level, without a newline. */ + public final void print(Verbosity verbosity, String msg) { if (this.verbosity.ordinal() >= verbosity.ordinal()) { - getWriter().print(msg + lineSeparator); + getWriter().print(msg); getWriter().flush(); } } diff --git a/core/src/main/java/org/elasticsearch/cli/UserError.java b/core/src/main/java/org/elasticsearch/cli/UserException.java similarity index 85% rename from core/src/main/java/org/elasticsearch/cli/UserError.java rename to core/src/main/java/org/elasticsearch/cli/UserException.java index 2a4f2bf1233..a7f88ccab4a 100644 --- a/core/src/main/java/org/elasticsearch/cli/UserError.java +++ b/core/src/main/java/org/elasticsearch/cli/UserException.java @@ -22,13 +22,13 @@ package org.elasticsearch.cli; /** * An exception representing a user fixable problem in {@link Command} usage. */ -public class UserError extends Exception { +public class UserException extends Exception { /** The exist status the cli should use when catching this user error. */ public final int exitCode; - /** Constructs a UserError with an exit status and message to show the user. */ - public UserError(int exitCode, String msg) { + /** Constructs a UserException with an exit status and message to show the user. */ + public UserException(int exitCode, String msg) { super(msg); this.exitCode = exitCode; } diff --git a/core/src/main/java/org/elasticsearch/client/Client.java b/core/src/main/java/org/elasticsearch/client/Client.java index 47e8e43f37b..0cf22d7a2c4 100644 --- a/core/src/main/java/org/elasticsearch/client/Client.java +++ b/core/src/main/java/org/elasticsearch/client/Client.java @@ -42,12 +42,6 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.percolate.MultiPercolateRequest; -import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder; -import org.elasticsearch.action.percolate.MultiPercolateResponse; -import org.elasticsearch.action.percolate.PercolateRequest; -import org.elasticsearch.action.percolate.PercolateRequestBuilder; -import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollRequestBuilder; import org.elasticsearch.action.search.ClearScrollResponse; @@ -419,36 +413,6 @@ public interface Client extends ElasticsearchClient, Releasable { */ MultiTermVectorsRequestBuilder prepareMultiTermVectors(); - /** - * Percolates a request returning the matches documents. - */ - ActionFuture percolate(PercolateRequest request); - - /** - * Percolates a request returning the matches documents. - */ - void percolate(PercolateRequest request, ActionListener listener); - - /** - * Percolates a request returning the matches documents. - */ - PercolateRequestBuilder preparePercolate(); - - /** - * Performs multiple percolate requests. - */ - ActionFuture multiPercolate(MultiPercolateRequest request); - - /** - * Performs multiple percolate requests. - */ - void multiPercolate(MultiPercolateRequest request, ActionListener listener); - - /** - * Performs multiple percolate requests. - */ - MultiPercolateRequestBuilder prepareMultiPercolate(); - /** * Computes a score explanation for the specified request. * diff --git a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 37886239195..9e0d1a94119 100644 --- a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -39,6 +39,9 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequestBuilder; +import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -96,9 +99,6 @@ import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRespo import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequest; -import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequestBuilder; -import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateResponse; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder; import org.elasticsearch.action.ingest.GetPipelineRequest; @@ -112,6 +112,7 @@ import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.tasks.TaskId; /** * Administrative actions/operations against indices. @@ -303,6 +304,34 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ ListTasksRequestBuilder prepareListTasks(String... nodesIds); + /** + * Get a task. + * + * @param request the request + * @return the result future + * @see org.elasticsearch.client.Requests#getTaskRequest() + */ + ActionFuture getTask(GetTaskRequest request); + + /** + * Get a task. + * + * @param request the request + * @param listener A listener to be notified with the result + * @see org.elasticsearch.client.Requests#getTaskRequest() + */ + void getTask(GetTaskRequest request, ActionListener listener); + + /** + * Fetch a task by id. + */ + GetTaskRequestBuilder prepareGetTask(String taskId); + + /** + * Fetch a task by id. + */ + GetTaskRequestBuilder prepareGetTask(TaskId taskId); + /** * Cancel tasks * @@ -504,28 +533,6 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ SnapshotsStatusRequestBuilder prepareSnapshotStatus(); - - /** - * Return the rendered search request for a given search template. - * - * @param request The request - * @return The result future - */ - ActionFuture renderSearchTemplate(RenderSearchTemplateRequest request); - - /** - * Return the rendered search request for a given search template. - * - * @param request The request - * @param listener A listener to be notified of the result - */ - void renderSearchTemplate(RenderSearchTemplateRequest request, ActionListener listener); - - /** - * Return the rendered search request for a given search template. - */ - RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate(); - /** * Stores an ingest pipeline */ diff --git a/core/src/main/java/org/elasticsearch/client/FilterClient.java b/core/src/main/java/org/elasticsearch/client/FilterClient.java index d2ea209a8c2..d0f52282c76 100644 --- a/core/src/main/java/org/elasticsearch/client/FilterClient.java +++ b/core/src/main/java/org/elasticsearch/client/FilterClient.java @@ -24,6 +24,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; /** @@ -42,7 +44,15 @@ public abstract class FilterClient extends AbstractClient { * @see #in() */ public FilterClient(Client in) { - super(in.settings(), in.threadPool()); + this(in.settings(), in.threadPool(), in); + } + + /** + * A Constructor that allows to pass settings and threadpool separately. This is useful if the + * client is a proxy and not yet fully constructed ie. both dependencies are not available yet. + */ + protected FilterClient(Settings settings, ThreadPool threadPool, Client in) { + super(settings, threadPool); this.in = in; } diff --git a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index a475ce15d4e..24d190c68a1 100644 --- a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -80,6 +80,9 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequestBuilder; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder; @@ -92,6 +95,9 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRespons import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shrink.ShrinkRequest; +import org.elasticsearch.action.admin.indices.shrink.ShrinkRequestBuilder; +import org.elasticsearch.action.admin.indices.shrink.ShrinkResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -782,4 +788,34 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ GetSettingsRequestBuilder prepareGetSettings(String... indices); + /** + * Shrinks an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. + */ + ShrinkRequestBuilder prepareShrinkIndex(String sourceIndex, String targetIndex); + + /** + * Shrinks an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. + */ + ActionFuture shrinkIndex(ShrinkRequest request); + + /** + * Shrinks an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. + */ + void shrinkIndex(ShrinkRequest request, ActionListener listener); + + /** + * Swaps the index pointed to by an alias given all provided conditions are satisfied + */ + RolloverRequestBuilder prepareRolloverIndex(String sourceAlias); + + /** + * Swaps the index pointed to by an alias given all provided conditions are satisfied + */ + ActionFuture rolloversIndex(RolloverRequest request); + + /** + * Swaps the index pointed to by an alias given all provided conditions are satisfied + */ + void rolloverIndex(RolloverRequest request, ActionListener listener); + } diff --git a/core/src/main/java/org/elasticsearch/client/Requests.java b/core/src/main/java/org/elasticsearch/client/Requests.java index 276bd9d9062..6d652bf39d0 100644 --- a/core/src/main/java/org/elasticsearch/client/Requests.java +++ b/core/src/main/java/org/elasticsearch/client/Requests.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -406,6 +407,16 @@ public class Requests { return new ListTasksRequest(); } + /** + * Creates a get task request. + * + * @return The nodes tasks request + * @see org.elasticsearch.client.ClusterAdminClient#getTask(GetTaskRequest) + */ + public static GetTaskRequest getTaskRequest() { + return new GetTaskRequest(); + } + /** * Creates a nodes tasks request against one or more nodes. Pass null or an empty array for all nodes. * diff --git a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java index 3e9bed9e25d..e68b902e259 100644 --- a/core/src/main/java/org/elasticsearch/client/node/NodeClient.java +++ b/core/src/main/java/org/elasticsearch/client/node/NodeClient.java @@ -26,26 +26,28 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.GenericAction; import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.Client; import org.elasticsearch.client.support.AbstractClient; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskListener; import org.elasticsearch.threadpool.ThreadPool; import java.util.Map; -import static java.util.Collections.unmodifiableMap; - /** - * + * Client that executes actions on the local node. */ public class NodeClient extends AbstractClient { - private final Map actions; + private Map actions; - @Inject - public NodeClient(Settings settings, ThreadPool threadPool, Map actions) { + public NodeClient(Settings settings, ThreadPool threadPool) { super(settings, threadPool); - this.actions = unmodifiableMap(actions); + } + + public void intialize(Map actions) { + this.actions = actions; } @Override @@ -53,14 +55,50 @@ public class NodeClient extends AbstractClient { // nothing really to do } - @SuppressWarnings("unchecked") @Override - public , Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder> void doExecute( - Action action, Request request, ActionListener listener) { + public < Request extends ActionRequest, + Response extends ActionResponse, + RequestBuilder extends ActionRequestBuilder + > void doExecute(Action action, Request request, ActionListener listener) { + // Discard the task because the Client interface doesn't use it. + executeLocally(action, request, listener); + } + + /** + * Execute an {@link Action} locally, returning that {@link Task} used to track it, and linking an {@link ActionListener}. Prefer this + * method if you don't need access to the task when listening for the response. This is the method used to implement the {@link Client} + * interface. + */ + public < Request extends ActionRequest, + Response extends ActionResponse + > Task executeLocally(GenericAction action, Request request, ActionListener listener) { + return transportAction(action).execute(request, listener); + } + + /** + * Execute an {@link Action} locally, returning that {@link Task} used to track it, and linking an {@link TaskListener}. Prefer this + * method if you need access to the task when listening for the response. + */ + public < Request extends ActionRequest, + Response extends ActionResponse + > Task executeLocally(GenericAction action, Request request, TaskListener listener) { + return transportAction(action).execute(request, listener); + } + + /** + * Get the {@link TransportAction} for an {@link Action}, throwing exceptions if the action isn't available. + */ + @SuppressWarnings("unchecked") + private < Request extends ActionRequest, + Response extends ActionResponse + > TransportAction transportAction(GenericAction action) { + if (actions == null) { + throw new IllegalStateException("NodeClient has not been initialized"); + } TransportAction transportAction = actions.get(action); if (transportAction == null) { throw new IllegalStateException("failed to find action [" + action + "] to execute"); } - transportAction.execute(request, listener); + return transportAction; } } diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 6083422862c..c3816d8d37f 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -49,6 +49,10 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksActio import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction; +import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequestBuilder; +import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder; @@ -109,14 +113,22 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequestBuilder; +import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequestBuilder; +import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; +import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; -import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction; -import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequest; -import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateRequestBuilder; -import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateResponse; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; @@ -196,6 +208,10 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequestBuilder; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; @@ -212,6 +228,10 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBui import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; +import org.elasticsearch.action.admin.indices.shrink.ShrinkRequest; +import org.elasticsearch.action.admin.indices.shrink.ShrinkRequestBuilder; +import org.elasticsearch.action.admin.indices.shrink.ShrinkResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; @@ -268,18 +288,6 @@ import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptResponse; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptResponse; import org.elasticsearch.action.ingest.DeletePipelineAction; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder; @@ -295,14 +303,6 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.ingest.WritePipelineResponse; -import org.elasticsearch.action.percolate.MultiPercolateAction; -import org.elasticsearch.action.percolate.MultiPercolateRequest; -import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder; -import org.elasticsearch.action.percolate.MultiPercolateResponse; -import org.elasticsearch.action.percolate.PercolateAction; -import org.elasticsearch.action.percolate.PercolateRequest; -import org.elasticsearch.action.percolate.PercolateRequestBuilder; -import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollRequestBuilder; @@ -343,6 +343,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import java.util.Map; @@ -623,36 +624,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new MultiTermVectorsRequestBuilder(this, MultiTermVectorsAction.INSTANCE); } - @Override - public ActionFuture percolate(final PercolateRequest request) { - return execute(PercolateAction.INSTANCE, request); - } - - @Override - public void percolate(final PercolateRequest request, final ActionListener listener) { - execute(PercolateAction.INSTANCE, request, listener); - } - - @Override - public PercolateRequestBuilder preparePercolate() { - return new PercolateRequestBuilder(this, PercolateAction.INSTANCE); - } - - @Override - public MultiPercolateRequestBuilder prepareMultiPercolate() { - return new MultiPercolateRequestBuilder(this, MultiPercolateAction.INSTANCE); - } - - @Override - public void multiPercolate(MultiPercolateRequest request, ActionListener listener) { - execute(MultiPercolateAction.INSTANCE, request, listener); - } - - @Override - public ActionFuture multiPercolate(MultiPercolateRequest request) { - return execute(MultiPercolateAction.INSTANCE, request); - } - @Override public ExplainRequestBuilder prepareExplain(String index, String type, String id) { return new ExplainRequestBuilder(this, ExplainAction.INSTANCE, index, type, id); @@ -885,6 +856,25 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new ListTasksRequestBuilder(this, ListTasksAction.INSTANCE).setNodesIds(nodesIds); } + @Override + public ActionFuture getTask(final GetTaskRequest request) { + return execute(GetTaskAction.INSTANCE, request); + } + + @Override + public void getTask(final GetTaskRequest request, final ActionListener listener) { + execute(GetTaskAction.INSTANCE, request, listener); + } + + @Override + public GetTaskRequestBuilder prepareGetTask(String taskId) { + return prepareGetTask(new TaskId(taskId)); + } + + @Override + public GetTaskRequestBuilder prepareGetTask(TaskId taskId) { + return new GetTaskRequestBuilder(this, GetTaskAction.INSTANCE).setTaskId(taskId); + } @Override public ActionFuture cancelTasks(CancelTasksRequest request) { @@ -1079,21 +1069,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new SnapshotsStatusRequestBuilder(this, SnapshotsStatusAction.INSTANCE); } - @Override - public ActionFuture renderSearchTemplate(final RenderSearchTemplateRequest request) { - return execute(RenderSearchTemplateAction.INSTANCE, request); - } - - @Override - public void renderSearchTemplate(final RenderSearchTemplateRequest request, final ActionListener listener) { - execute(RenderSearchTemplateAction.INSTANCE, request, listener); - } - - @Override - public RenderSearchTemplateRequestBuilder prepareRenderSearchTemplate() { - return new RenderSearchTemplateRequestBuilder(this, RenderSearchTemplateAction.INSTANCE); - } - @Override public void putPipeline(PutPipelineRequest request, ActionListener listener) { execute(PutPipelineAction.INSTANCE, request, listener); @@ -1722,6 +1697,37 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new GetSettingsRequestBuilder(this, GetSettingsAction.INSTANCE, indices); } + @Override + public ShrinkRequestBuilder prepareShrinkIndex(String sourceIndex, String targetIndex) { + return new ShrinkRequestBuilder(this, ShrinkAction.INSTANCE).setSourceIndex(sourceIndex) + .setTargetIndex(new CreateIndexRequest(targetIndex)); + } + + @Override + public ActionFuture shrinkIndex(ShrinkRequest request) { + return execute(ShrinkAction.INSTANCE, request); + } + + @Override + public void shrinkIndex(ShrinkRequest request, ActionListener listener) { + execute(ShrinkAction.INSTANCE, request, listener); + } + + @Override + public RolloverRequestBuilder prepareRolloverIndex(String alias) { + return new RolloverRequestBuilder(this, RolloverAction.INSTANCE).setAlias(alias); + } + + @Override + public ActionFuture rolloversIndex(RolloverRequest request) { + return execute(RolloverAction.INSTANCE, request); + } + + @Override + public void rolloverIndex(RolloverRequest request, ActionListener listener) { + execute(RolloverAction.INSTANCE, request, listener); + } + @Override public ActionFuture getSettings(GetSettingsRequest request) { return execute(GetSettingsAction.INSTANCE, request); diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 269984a7858..5421691d515 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -19,17 +19,21 @@ package org.elasticsearch.client.transport; -import org.elasticsearch.Version; +import java.io.Closeable; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.support.AbstractClient; import org.elasticsearch.client.transport.support.TransportProxyClient; -import org.elasticsearch.cluster.ClusterNameModule; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; @@ -38,24 +42,22 @@ import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.indices.breaker.CircuitBreakerModule; -import org.elasticsearch.monitor.MonitorService; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.node.Node; import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPoolModule; +import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.netty.NettyTransport; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; /** * The transport client allows to create a client that is not part of the cluster, but simply connects to one @@ -106,7 +108,7 @@ public class TransportClient extends AbstractClient { private PluginsService newPluginService(final Settings settings) { final Settings.Builder settingsBuilder = Settings.builder() - .put(NettyTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval + .put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval .put(InternalSettingsPreparer.prepareSettings(settings)) .put(NetworkService.NETWORK_SERVER.getKey(), false) .put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE); @@ -119,48 +121,60 @@ public class TransportClient extends AbstractClient { public TransportClient build() { final PluginsService pluginsService = newPluginService(providedSettings); final Settings settings = pluginsService.updatedSettings(); - - Version version = Version.CURRENT; - + final List resourcesToClose = new ArrayList<>(); final ThreadPool threadPool = new ThreadPool(settings); + resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); final NetworkService networkService = new NetworkService(settings); NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(); - boolean success = false; try { + final List> additionalSettings = new ArrayList<>(); + final List additionalSettingsFilter = new ArrayList<>(); + additionalSettings.addAll(pluginsService.getPluginSettings()); + additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter()); + for (final ExecutorBuilder builder : threadPool.builders()) { + additionalSettings.addAll(builder.getRegisteredSettings()); + } + SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter); + ModulesBuilder modules = new ModulesBuilder(); - modules.add(new Version.Module(version)); // plugin modules must be added here, before others or we can get crazy injection errors... for (Module pluginModule : pluginsService.nodeModules()) { modules.add(pluginModule); } - modules.add(new PluginsModule(pluginsService)); - modules.add(new SettingsModule(settings)); modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry)); - modules.add(new ClusterNameModule(settings)); - modules.add(new ThreadPoolModule(threadPool)); - modules.add(new SearchModule(settings, namedWriteableRegistry) { - @Override - protected void configure() { - // noop - } - }); - modules.add(new ActionModule(false, true)); - modules.add(new CircuitBreakerModule(settings)); + modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool)); + modules.add(new SearchModule(settings, namedWriteableRegistry, true)); + ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(), + pluginsService.filterPlugins(ActionPlugin.class)); + modules.add(actionModule); pluginsService.processModules(modules); + CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(), + settingsModule.getClusterSettings()); + resourcesToClose.add(circuitBreakerService); + BigArrays bigArrays = new BigArrays(settings, circuitBreakerService); + resourcesToClose.add(bigArrays); + modules.add(settingsModule); + modules.add((b -> { + b.bind(BigArrays.class).toInstance(bigArrays); + b.bind(PluginsService.class).toInstance(pluginsService); + b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService); + })); Injector injector = modules.createInjector(); final TransportService transportService = injector.getInstance(TransportService.class); + final TransportClientNodesService nodesService = + new TransportClientNodesService(settings, transportService, threadPool); + final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, + actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList())); + transportService.start(); transportService.acceptIncomingRequests(); - - TransportClient transportClient = new TransportClient(injector); - success = true; + TransportClient transportClient = new TransportClient(injector, nodesService, proxy); + resourcesToClose.clear(); return transportClient; } finally { - if (!success) { - ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); - } + IOUtils.closeWhileHandlingException(resourcesToClose); } } } @@ -172,15 +186,11 @@ public class TransportClient extends AbstractClient { private final TransportClientNodesService nodesService; private final TransportProxyClient proxy; - private TransportClient(Injector injector) { + private TransportClient(Injector injector, TransportClientNodesService nodesService, TransportProxyClient proxy) { super(injector.getInstance(Settings.class), injector.getInstance(ThreadPool.class)); this.injector = injector; - nodesService = injector.getInstance(TransportClientNodesService.class); - proxy = injector.getInstance(TransportProxyClient.class); - } - - TransportClientNodesService nodeService() { - return nodesService; + this.nodesService = nodesService; + this.proxy = proxy; } /** @@ -255,24 +265,16 @@ public class TransportClient extends AbstractClient { */ @Override public void close() { - injector.getInstance(TransportClientNodesService.class).close(); - injector.getInstance(TransportService.class).close(); - try { - injector.getInstance(MonitorService.class).close(); - } catch (Exception e) { - // ignore, might not be bounded - } + List closeables = new ArrayList<>(); + closeables.add(nodesService); + closeables.add(injector.getInstance(TransportService.class)); for (Class plugin : injector.getInstance(PluginsService.class).nodeServices()) { - injector.getInstance(plugin).close(); + closeables.add(injector.getInstance(plugin)); } - try { - ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS); - } catch (Exception e) { - // ignore - } - - injector.getInstance(PageCacheRecycler.class).close(); + closeables.add(() -> ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS)); + closeables.add(injector.getInstance(BigArrays.class)); + IOUtils.closeWhileHandlingException(closeables); } @Override diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index e407a2e7ada..7bc0f546483 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -20,6 +20,7 @@ package org.elasticsearch.client.transport; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -33,6 +34,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -41,13 +43,14 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.FutureTransportResponseHandler; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import java.io.Closeable; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; @@ -66,7 +69,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; /** * */ -public class TransportClientNodesService extends AbstractComponent { +public class TransportClientNodesService extends AbstractComponent implements Closeable { private final TimeValue nodesSamplerInterval; @@ -94,7 +97,7 @@ public class TransportClientNodesService extends AbstractComponent { private volatile ScheduledFuture nodesSamplerFuture; - private final AtomicInteger randomNodeGenerator = new AtomicInteger(); + private final AtomicInteger randomNodeGenerator = new AtomicInteger(Randomness.get().nextInt()); private final boolean ignoreClusterName; @@ -110,14 +113,13 @@ public class TransportClientNodesService extends AbstractComponent { public static final Setting CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, Property.NodeScope); - @Inject - public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService, - ThreadPool threadPool, Version version) { + public TransportClientNodesService(Settings settings,TransportService transportService, + ThreadPool threadPool) { super(settings); - this.clusterName = clusterName; + this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.transportService = transportService; this.threadPool = threadPool; - this.minCompatibilityVersion = version.minimumCompatibilityVersion(); + this.minCompatibilityVersion = Version.CURRENT.minimumCompatibilityVersion(); this.nodesSamplerInterval = CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings); this.pingTimeout = CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis(); @@ -211,16 +213,27 @@ public class TransportClientNodesService extends AbstractComponent { } public void execute(NodeListenerCallback callback, ActionListener listener) { - List nodes = this.nodes; + // we first read nodes before checking the closed state; this + // is because otherwise we could be subject to a race where we + // read the state as not being closed, and then the client is + // closed and the nodes list is cleared, and then a + // NoNodeAvailableException is thrown + // it is important that the order of first setting the state of + // closed and then clearing the list of nodes is maintained in + // the close method + final List nodes = this.nodes; + if (closed) { + throw new IllegalStateException("transport client is closed"); + } ensureNodesAreAvailable(nodes); int index = getNodeNumber(); RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index); DiscoveryNode node = nodes.get((index) % nodes.size()); try { callback.doWithNode(node, retryListener); - } catch (Throwable t) { + } catch (Exception e) { //this exception can't come from the TransportService as it doesn't throw exception at all - listener.onFailure(t); + listener.onFailure(e); } } @@ -246,7 +259,7 @@ public class TransportClientNodesService extends AbstractComponent { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) { int i = ++this.i; if (i >= nodes.size()) { @@ -254,9 +267,10 @@ public class TransportClientNodesService extends AbstractComponent { } else { try { callback.doWithNode(nodes.get((index + i) % nodes.size()), this); - } catch(final Throwable t) { + } catch(final Exception inner) { + inner.addSuppressed(e); // this exception can't come from the TransportService as it doesn't throw exceptions at all - listener.onFailure(t); + listener.onFailure(inner); } } } else { @@ -267,6 +281,7 @@ public class TransportClientNodesService extends AbstractComponent { } + @Override public void close() { synchronized (mutex) { if (closed) { @@ -323,7 +338,7 @@ public class TransportClientNodesService extends AbstractComponent { try { logger.trace("connecting to node [{}]", node); transportService.connectToNode(node); - } catch (Throwable e) { + } catch (Exception e) { it.remove(); logger.debug("failed to connect to discovered node [{}]", e, node); } @@ -361,8 +376,9 @@ public class TransportClientNodesService extends AbstractComponent { // its a listed node, light connect to it... logger.trace("connecting to listed node (light) [{}]", listedNode); transportService.connectToNodeLight(listedNode); - } catch (Throwable e) { + } catch (Exception e) { logger.debug("failed to connect to node [{}], removed from nodes list", e, listedNode); + newFilteredNodes.add(listedNode); continue; } } @@ -383,16 +399,16 @@ public class TransportClientNodesService extends AbstractComponent { // use discovered information but do keep the original transport address, // so people can control which address is exactly used. DiscoveryNode nodeWithInfo = livenessResponse.getDiscoveryNode(); - newNodes.add(new DiscoveryNode(nodeWithInfo.getName(), nodeWithInfo.getId(), nodeWithInfo.getHostName(), - nodeWithInfo.getHostAddress(), listedNode.getAddress(), nodeWithInfo.getAttributes(), - nodeWithInfo.getRoles(), nodeWithInfo.getVersion())); + newNodes.add(new DiscoveryNode(nodeWithInfo.getName(), nodeWithInfo.getId(), nodeWithInfo.getEphemeralId(), + nodeWithInfo.getHostName(), nodeWithInfo.getHostAddress(), listedNode.getAddress(), + nodeWithInfo.getAttributes(), nodeWithInfo.getRoles(), nodeWithInfo.getVersion())); } else { // although we asked for one node, our target may not have completed // initialization yet and doesn't have cluster nodes logger.debug("node {} didn't return any discovery info, temporarily using transport discovery node", listedNode); newNodes.add(listedNode); } - } catch (Throwable e) { + } catch (Exception e) { logger.info("failed to get node info for {}, disconnecting...", e, listedNode); transportService.disconnectFromNode(listedNode); } @@ -446,7 +462,7 @@ public class TransportClientNodesService extends AbstractComponent { Requests.clusterStateRequest().clear().nodes(true).local(true), TransportRequestOptions.builder().withType(TransportRequestOptions.Type.STATE) .withTimeout(pingTimeout).build(), - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public ClusterStateResponse newInstance() { @@ -471,7 +487,7 @@ public class TransportClientNodesService extends AbstractComponent { latch.countDown(); } }); - } catch (Throwable e) { + } catch (Exception e) { logger.info("failed to get local cluster state info for {}, disconnecting...", e, listedNode); transportService.disconnectFromNode(listedNode); latch.countDown(); diff --git a/core/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java b/core/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java index 900876415e3..34833e9400a 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/support/TransportProxyClient.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.transport.TransportService; import java.util.HashMap; +import java.util.List; import java.util.Map; import static java.util.Collections.unmodifiableMap; @@ -45,11 +46,11 @@ public class TransportProxyClient { private final TransportClientNodesService nodesService; private final Map proxies; - @Inject - public TransportProxyClient(Settings settings, TransportService transportService, TransportClientNodesService nodesService, Map actions) { + public TransportProxyClient(Settings settings, TransportService transportService, + TransportClientNodesService nodesService, List actions) { this.nodesService = nodesService; Map proxies = new HashMap<>(); - for (GenericAction action : actions.values()) { + for (GenericAction action : actions) { if (action instanceof Action) { proxies.put((Action) action, new TransportActionNodeProxy(settings, action, transportService)); } @@ -59,11 +60,6 @@ public class TransportProxyClient { public > void execute(final Action action, final Request request, ActionListener listener) { final TransportActionNodeProxy proxy = proxies.get(action); - nodesService.execute(new TransportClientNodesService.NodeListenerCallback() { - @Override - public void doWithNode(DiscoveryNode node, ActionListener listener) { - proxy.execute(node, request, listener); - } - }, listener); + nodesService.execute((n, l) -> proxy.execute(n, request, l), listener); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java b/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java index cdd9b2204ff..148a1dea309 100644 --- a/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java +++ b/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java @@ -36,9 +36,9 @@ public interface AckedClusterStateTaskListener extends ClusterStateTaskListener * Called once all the nodes have acknowledged the cluster state update request. Must be * very lightweight execution, since it gets executed on the cluster service thread. * - * @param t optional error that might have been thrown + * @param e optional error that might have been thrown */ - void onAllNodesAcked(@Nullable Throwable t); + void onAllNodesAcked(@Nullable Exception e); /** * Called once the acknowledgement timeout defined by diff --git a/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java index b833f6e1879..faf2f30bb3e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java +++ b/core/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java @@ -58,9 +58,9 @@ public abstract class AckedClusterStateUpdateTask extends ClusterState * Called once all the nodes have acknowledged the cluster state update request. Must be * very lightweight execution, since it gets executed on the cluster service thread. * - * @param t optional error that might have been thrown + * @param e optional error that might have been thrown */ - public void onAllNodesAcked(@Nullable Throwable t) { + public void onAllNodesAcked(@Nullable Exception e) { listener.onResponse(newResponse(true)); } @@ -75,8 +75,8 @@ public abstract class AckedClusterStateUpdateTask extends ClusterState } @Override - public void onFailure(String source, Throwable t) { - listener.onFailure(t); + public void onFailure(String source, Exception e) { + listener.onFailure(e); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index d3a42a97ebb..efd525d313b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -148,18 +148,11 @@ public class ClusterChangedEvent { * has changed between the previous cluster state and the new cluster state. * Note that this is an object reference equality test, not an equals test. */ - public boolean indexMetaDataChanged(IndexMetaData current) { - MetaData previousMetaData = previousState.metaData(); - if (previousMetaData == null) { - return true; - } - IndexMetaData previousIndexMetaData = previousMetaData.index(current.getIndex()); + public static boolean indexMetaDataChanged(IndexMetaData metaData1, IndexMetaData metaData2) { + assert metaData1 != null && metaData2 != null; // no need to check on version, since disco modules will make sure to use the // same instance if its a version match - if (previousIndexMetaData == current) { - return false; - } - return true; + return metaData1 != metaData2; } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/ClusterInfoService.java index d4ceb844ec2..c17bc08ac0a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterInfoService.java @@ -26,16 +26,16 @@ package org.elasticsearch.cluster; public interface ClusterInfoService { /** The latest cluster information */ - public ClusterInfo getClusterInfo(); + ClusterInfo getClusterInfo(); /** Add a listener that will be called every time new information is gathered */ - public void addListener(Listener listener); + void addListener(Listener listener); /** * Interface for listeners to implement in order to perform actions when * new information about the cluster has been gathered */ - public interface Listener { - public void onNewInfo(ClusterInfo info); + interface Listener { + void onNewInfo(ClusterInfo info); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 47dd2ce9ae6..fac138ce820 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; -import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -32,8 +31,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.cluster.metadata.MetaDataMappingService; import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService; -import org.elasticsearch.cluster.node.DiscoveryNodeService; -import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.cluster.routing.DelayedAllocationService; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -49,6 +47,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDeci import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; @@ -62,6 +61,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.tasks.TaskPersistenceService; import java.util.Arrays; import java.util.Collections; @@ -79,6 +79,7 @@ public class ClusterModule extends AbstractModule { new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope); public static final List> DEFAULT_ALLOCATION_DECIDERS = Collections.unmodifiableList(Arrays.asList( + MaxRetryAllocationDecider.class, SameShardAllocationDecider.class, FilterAllocationDecider.class, ReplicaAfterPrimaryActiveAllocationDecider.class, @@ -97,17 +98,21 @@ public class ClusterModule extends AbstractModule { private final ExtensionPoint.SelectedType shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class); private final ExtensionPoint.ClassSet allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class); private final ExtensionPoint.ClassSet indexTemplateFilters = new ExtensionPoint.ClassSet<>("index_template_filter", IndexTemplateFilter.class); + private final ClusterService clusterService; + private final IndexNameExpressionResolver indexNameExpressionResolver; // pkg private so tests can mock Class clusterInfoServiceImpl = InternalClusterInfoService.class; - public ClusterModule(Settings settings) { + public ClusterModule(Settings settings, ClusterService clusterService) { this.settings = settings; for (Class decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) { registerAllocationDecider(decider); } registerShardsAllocator(ClusterModule.BALANCED_ALLOCATOR, BalancedShardsAllocator.class); registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class); + this.clusterService = clusterService; + indexNameExpressionResolver = new IndexNameExpressionResolver(settings); } public void registerAllocationDecider(Class allocationDecider) { @@ -122,6 +127,10 @@ public class ClusterModule extends AbstractModule { indexTemplateFilters.registerExtension(indexTemplateFilter); } + public IndexNameExpressionResolver getIndexNameExpressionResolver() { + return indexNameExpressionResolver; + } + @Override protected void configure() { // bind ShardsAllocator @@ -136,10 +145,8 @@ public class ClusterModule extends AbstractModule { bind(ClusterInfoService.class).to(clusterInfoServiceImpl).asEagerSingleton(); bind(GatewayAllocator.class).asEagerSingleton(); bind(AllocationService.class).asEagerSingleton(); - bind(DiscoveryNodeService.class).asEagerSingleton(); - bind(ClusterService.class).asEagerSingleton(); + bind(ClusterService.class).toInstance(clusterService); bind(NodeConnectionsService.class).asEagerSingleton(); - bind(OperationRouting.class).asEagerSingleton(); bind(MetaDataCreateIndexService.class).asEagerSingleton(); bind(MetaDataDeleteIndexService.class).asEagerSingleton(); bind(MetaDataIndexStateService.class).asEagerSingleton(); @@ -147,11 +154,12 @@ public class ClusterModule extends AbstractModule { bind(MetaDataIndexAliasesService.class).asEagerSingleton(); bind(MetaDataUpdateSettingsService.class).asEagerSingleton(); bind(MetaDataIndexTemplateService.class).asEagerSingleton(); - bind(IndexNameExpressionResolver.class).asEagerSingleton(); + bind(IndexNameExpressionResolver.class).toInstance(indexNameExpressionResolver); bind(RoutingService.class).asEagerSingleton(); + bind(DelayedAllocationService.class).asEagerSingleton(); bind(ShardStateAction.class).asEagerSingleton(); - bind(NodeIndexDeletedAction.class).asEagerSingleton(); bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); + bind(TaskPersistenceService.class).asEagerSingleton(); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterName.java b/core/src/main/java/org/elasticsearch/cluster/ClusterName.java index 09c64065dbd..36676300954 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterName.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterName.java @@ -21,37 +21,29 @@ package org.elasticsearch.cluster; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.IOException; +import java.util.Objects; -/** - * - */ -public class ClusterName implements Streamable { +public class ClusterName implements Writeable { - public static final Setting CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "elasticsearch", (s) -> { + public static final Setting CLUSTER_NAME_SETTING = new Setting<>("cluster.name", "elasticsearch", (s) -> { if (s.isEmpty()) { throw new IllegalArgumentException("[cluster.name] must not be empty"); } - return s; - }, Property.NodeScope); + return new ClusterName(s); + }, Setting.Property.NodeScope); + public static final ClusterName DEFAULT = CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); - public static final ClusterName DEFAULT = new ClusterName(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).intern()); + private final String value; - private String value; - - public static ClusterName clusterNameFromSettings(Settings settings) { - return new ClusterName(CLUSTER_NAME_SETTING.get(settings)); + public ClusterName(StreamInput input) throws IOException { + this(input.readString()); } - - private ClusterName() { - } - public ClusterName(String value) { this.value = value.intern(); } @@ -60,17 +52,6 @@ public class ClusterName implements Streamable { return this.value; } - public static ClusterName readClusterName(StreamInput in) throws IOException { - ClusterName clusterName = new ClusterName(); - clusterName.readFrom(in); - return clusterName; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - value = in.readString().intern(); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(value); @@ -90,7 +71,7 @@ public class ClusterName implements Streamable { @Override public int hashCode() { - return value != null ? value.hashCode() : 0; + return Objects.hash(value); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index 17c0b646760..abad2e9a8e4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -42,6 +42,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -90,7 +91,7 @@ import java.util.Set; */ public class ClusterState implements ToXContent, Diffable { - public static final ClusterState PROTO = builder(ClusterName.DEFAULT).build(); + public static final ClusterState PROTO = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); public static enum ClusterStateStatus { UNKNOWN((byte) 0), @@ -114,7 +115,7 @@ public class ClusterState implements ToXContent, Diffable { String type(); } - private final static Map customPrototypes = new HashMap<>(); + private static final Map customPrototypes = new HashMap<>(); /** * Register a custom index meta data factory. Make sure to call it from a static block. @@ -702,7 +703,7 @@ public class ClusterState implements ToXContent, Diffable { public static byte[] toBytes(ClusterState state) throws IOException { BytesStreamOutput os = new BytesStreamOutput(); state.writeTo(os); - return os.bytes().toBytes(); + return BytesReference.toBytes(os.bytes()); } /** @@ -711,6 +712,7 @@ public class ClusterState implements ToXContent, Diffable { */ public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException { return readFrom(StreamInput.wrap(data), localNode); + } /** @@ -734,7 +736,7 @@ public class ClusterState implements ToXContent, Diffable { } public ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { - ClusterName clusterName = ClusterName.readClusterName(in); + ClusterName clusterName = new ClusterName(in); Builder builder = new Builder(clusterName); builder.version = in.readLong(); builder.uuid = in.readString(); @@ -805,7 +807,7 @@ public class ClusterState implements ToXContent, Diffable { } public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException { - clusterName = ClusterName.readClusterName(in); + clusterName = new ClusterName(in); fromUuid = in.readString(); toUuid = in.readString(); toVersion = in.readLong(); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index d79a00dc3fe..228ac3f41b9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -274,7 +274,7 @@ public class ClusterStateObserver { } - public static abstract class ValidationPredicate implements ChangePredicate { + public abstract static class ValidationPredicate implements ChangePredicate { @Override public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus, ClusterState newState, ClusterState.ClusterStateStatus newStatus) { @@ -289,7 +289,7 @@ public class ClusterStateObserver { } } - public static abstract class EventPredicate implements ChangePredicate { + public abstract static class EventPredicate implements ChangePredicate { @Override public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus, ClusterState newState, ClusterState.ClusterStateStatus newStatus) { return previousState != newState || previousStatus != newStatus; @@ -298,8 +298,8 @@ public class ClusterStateObserver { } static class ObservingContext { - final public Listener listener; - final public ChangePredicate changePredicate; + public final Listener listener; + public final ChangePredicate changePredicate; public ObservingContext(Listener listener, ChangePredicate changePredicate) { this.listener = listener; @@ -308,8 +308,8 @@ public class ClusterStateObserver { } static class ObservedState { - final public ClusterState clusterState; - final public ClusterState.ClusterStateStatus status; + public final ClusterState clusterState; + public final ClusterState.ClusterStateStatus status; public ObservedState(ClusterState clusterState) { this.clusterState = clusterState; @@ -322,7 +322,7 @@ public class ClusterStateObserver { } } - private final static class ContextPreservingListener implements Listener { + private static final class ContextPreservingListener implements Listener { private final Listener delegate; private final ThreadContext.StoredContext tempContext; diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java index 2ef2438991e..0724c05ac21 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskConfig.java @@ -29,7 +29,7 @@ public interface ClusterStateTaskConfig { /** * The timeout for this cluster state update task configuration. If * the cluster state update task isn't processed within this - * timeout, the associated {@link ClusterStateTaskListener#onFailure(String, Throwable)} + * timeout, the associated {@link ClusterStateTaskListener#onFailure(String, Exception)} * is invoked. * * @return the timeout, or null if one is not set diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index be9381a7fe6..9324e075e90 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -40,8 +40,29 @@ public interface ClusterStateTaskExecutor { /** * Callback invoked after new cluster state is published. Note that * this method is not invoked if the cluster state was not updated. + * @param clusterChangedEvent the change event for this cluster state change, containing + * both old and new states */ - default void clusterStatePublished(ClusterState newClusterState) { + default void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + } + + /** + * Builds a concise description of a list of tasks (to be used in logging etc.). + * + * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(ClusterState, List)}. + * but are guaranteed to be a subset of them. This method can be called multiple times with different lists before execution. + * This allows groupd task description but the submitting source. + */ + default String describeTasks(List tasks) { + return tasks.stream().map(T::toString).reduce((s1,s2) -> { + if (s1.isEmpty()) { + return s2; + } else if (s2.isEmpty()) { + return s1; + } else { + return s1 + ", " + s2; + } + }).orElse(""); } /** @@ -49,8 +70,8 @@ public interface ClusterStateTaskExecutor { * @param the type of the cluster state update task */ class BatchResult { - final public ClusterState resultingState; - final public Map executionResults; + public final ClusterState resultingState; + public final Map executionResults; /** * Construct an execution result instance with a correspondence between the tasks and their execution result @@ -80,19 +101,20 @@ public interface ClusterStateTaskExecutor { return this; } - public Builder failure(T task, Throwable t) { - return result(task, TaskResult.failure(t)); + public Builder failure(T task, Exception e) { + return result(task, TaskResult.failure(e)); } - public Builder failures(Iterable tasks, Throwable t) { + public Builder failures(Iterable tasks, Exception e) { for (T task : tasks) { - failure(task, t); + failure(task, e); } return this; } private Builder result(T task, TaskResult executionResult) { - executionResults.put(task, executionResult); + TaskResult existing = executionResults.put(task, executionResult); + assert existing == null : task + " already has result " + existing; return this; } @@ -103,7 +125,7 @@ public interface ClusterStateTaskExecutor { } final class TaskResult { - private final Throwable failure; + private final Exception failure; private static final TaskResult SUCCESS = new TaskResult(null); @@ -111,11 +133,11 @@ public interface ClusterStateTaskExecutor { return SUCCESS; } - public static TaskResult failure(Throwable failure) { + public static TaskResult failure(Exception failure) { return new TaskResult(failure); } - private TaskResult(Throwable failure) { + private TaskResult(Exception failure) { this.failure = failure; } @@ -123,7 +145,7 @@ public interface ClusterStateTaskExecutor { return this == SUCCESS; } - public Throwable getFailure() { + public Exception getFailure() { assert !isSuccess(); return failure; } @@ -133,7 +155,7 @@ public interface ClusterStateTaskExecutor { * @param onSuccess handler to invoke on success * @param onFailure handler to invoke on failure; the throwable passed through will not be null */ - public void handle(Runnable onSuccess, Consumer onFailure) { + public void handle(Runnable onSuccess, Consumer onFailure) { if (failure == null) { onSuccess.run(); } else { diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java index 3bf7887cd1c..757c8b0c82e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java @@ -25,7 +25,7 @@ public interface ClusterStateTaskListener { /** * A callback called when execute fails. */ - void onFailure(String source, Throwable t); + void onFailure(String source, Exception e); /** * called when the task was rejected because the local node is no longer master diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java index 3e2881134f8..a679d098616 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -28,9 +28,9 @@ import java.util.List; /** * A task that can update the cluster state. */ -abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { +public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { - final private Priority priority; + private final Priority priority; public ClusterStateUpdateTask() { this(Priority.NORMAL); @@ -41,25 +41,30 @@ abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, } @Override - final public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + public final BatchResult execute(ClusterState currentState, List tasks) throws Exception { ClusterState result = execute(currentState); return BatchResult.builder().successes(tasks).build(result); } + @Override + public String describeTasks(List tasks) { + return ""; // one of task, source is enough + } + /** * Update the cluster state based on the current state. Return the *same instance* if no state * should be changed. */ - abstract public ClusterState execute(ClusterState currentState) throws Exception; + public abstract ClusterState execute(ClusterState currentState) throws Exception; /** * A callback called when execute fails. */ - abstract public void onFailure(String source, Throwable t); + public abstract void onFailure(String source, Exception e); /** * If the cluster state update task wasn't processed by the provided timeout, call - * {@link #onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default). + * {@link ClusterStateTaskListener#onFailure(String, Exception)}. May return null to indicate no timeout is needed (default). */ @Nullable public TimeValue timeout() { diff --git a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index fb9d7159105..1a3557890dd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/core/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -330,7 +330,7 @@ public final class DiffableUtils { * @param the type of map values * @param the map implementation type */ - public static abstract class MapDiff implements Diff { + public abstract static class MapDiff implements Diff { protected final List deletes; protected final Map> diffs; // incremental updates @@ -534,7 +534,7 @@ public final class DiffableUtils { * @param type of map keys * @param type of map values */ - public static abstract class DiffableValueSerializer> implements ValueSerializer { + public abstract static class DiffableValueSerializer> implements ValueSerializer { private static final DiffableValueSerializer WRITE_ONLY_INSTANCE = new DiffableValueSerializer() { @Override public Object read(StreamInput in, Object key) throws IOException { @@ -577,7 +577,7 @@ public final class DiffableUtils { * @param type of map keys * @param type of map values */ - public static abstract class NonDiffableValueSerializer implements ValueSerializer { + public abstract static class NonDiffableValueSerializer implements ValueSerializer { @Override public boolean supportsDiffableValues() { return false; diff --git a/core/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java index cc65106946a..b552508532c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/EmptyClusterInfoService.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.settings.Settings; * ClusterInfoService that provides empty maps for disk usage and shard sizes */ public class EmptyClusterInfoService extends AbstractComponent implements ClusterInfoService { - public final static EmptyClusterInfoService INSTANCE = new EmptyClusterInfoService(); + public static final EmptyClusterInfoService INSTANCE = new EmptyClusterInfoService(); private EmptyClusterInfoService() { super(Settings.EMPTY); diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index b1362538cb9..6c22ff171dd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -30,7 +29,6 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -280,7 +278,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu return latch; } - private final void maybeRefresh() { + private void maybeRefresh() { // Short-circuit if not enabled if (enabled) { refresh(); @@ -309,7 +307,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { if (e instanceof ReceiveTimeoutTransportException) { logger.error("NodeStatsAction timed out for ClusterInfoUpdateJob", e); } else { @@ -339,7 +337,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { if (e instanceof ReceiveTimeoutTransportException) { logger.error("IndicesStatsAction timed out for ClusterInfoUpdateJob", e); } else { @@ -404,7 +402,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu } } - static void fillDiskUsagePerNode(ESLogger logger, NodeStats[] nodeStatsArray, + static void fillDiskUsagePerNode(ESLogger logger, List nodeStatsArray, ImmutableOpenMap.Builder newLeastAvaiableUsages, ImmutableOpenMap.Builder newMostAvaiableUsages) { for (NodeStats nodeStats : nodeStatsArray) { diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index 698f9d1090c..a487bda0db4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -46,7 +46,7 @@ import static org.elasticsearch.common.settings.Setting.positiveTimeSetting; * to pings. This is done by {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}. Master fault detection * is done by {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection}. */ -public class NodeConnectionsService extends AbstractLifecycleComponent { +public class NodeConnectionsService extends AbstractLifecycleComponent { public static final Setting CLUSTER_NODE_RECONNECT_INTERVAL_SETTING = positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope); @@ -57,7 +57,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent nodes = ConcurrentCollections.newConcurrentMap(); - final private KeyedLock nodeLocks = new KeyedLock<>(); + private final KeyedLock nodeLocks = new KeyedLock<>(); private final TimeValue reconnectInterval; @@ -90,8 +90,8 @@ public class NodeConnectionsService extends AbstractLifecycleComponent implements Custo return this.entries; } - /** - * Returns currently running restore process with corresponding snapshot id or null if this snapshot is not being - * restored - * - * @param snapshotId snapshot id - * @return restore metadata or null - */ - public Entry snapshot(SnapshotId snapshotId) { - for (Entry entry : entries) { - if (snapshotId.equals(entry.snapshotId())) { - return entry; - } - } - return null; - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -111,22 +96,22 @@ public class RestoreInProgress extends AbstractDiffable implements Custo */ public static class Entry { private final State state; - private final SnapshotId snapshotId; + private final Snapshot snapshot; private final ImmutableOpenMap shards; private final List indices; /** * Creates new restore metadata * - * @param snapshotId snapshot id + * @param snapshot snapshot * @param state current state of the restore process * @param indices list of indices being restored * @param shards map of shards being restored to their current restore status */ - public Entry(SnapshotId snapshotId, State state, List indices, ImmutableOpenMap shards) { - this.snapshotId = snapshotId; - this.state = state; - this.indices = indices; + public Entry(Snapshot snapshot, State state, List indices, ImmutableOpenMap shards) { + this.snapshot = Objects.requireNonNull(snapshot); + this.state = Objects.requireNonNull(state); + this.indices = Objects.requireNonNull(indices); if (shards == null) { this.shards = ImmutableOpenMap.of(); } else { @@ -135,12 +120,12 @@ public class RestoreInProgress extends AbstractDiffable implements Custo } /** - * Returns snapshot id + * Returns snapshot * - * @return snapshot id + * @return snapshot */ - public SnapshotId snapshotId() { - return this.snapshotId; + public Snapshot snapshot() { + return this.snapshot; } /** @@ -172,26 +157,22 @@ public class RestoreInProgress extends AbstractDiffable implements Custo @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Entry entry = (Entry) o; - - if (!indices.equals(entry.indices)) return false; - if (!snapshotId.equals(entry.snapshotId)) return false; - if (!shards.equals(entry.shards)) return false; - if (state != entry.state) return false; - - return true; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + @SuppressWarnings("unchecked") Entry entry = (Entry) o; + return snapshot.equals(entry.snapshot) && + state == entry.state && + indices.equals(entry.indices) && + shards.equals(entry.shards); } @Override public int hashCode() { - int result = state.hashCode(); - result = 31 * result + snapshotId.hashCode(); - result = 31 * result + shards.hashCode(); - result = 31 * result + indices.hashCode(); - return result; + return Objects.hash(snapshot, state, indices, shards); } } @@ -301,31 +282,29 @@ public class RestoreInProgress extends AbstractDiffable implements Custo @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } - ShardRestoreStatus status = (ShardRestoreStatus) o; - - if (nodeId != null ? !nodeId.equals(status.nodeId) : status.nodeId != null) return false; - if (reason != null ? !reason.equals(status.reason) : status.reason != null) return false; - if (state != status.state) return false; - - return true; + @SuppressWarnings("unchecked") ShardRestoreStatus status = (ShardRestoreStatus) o; + return state == status.state && + Objects.equals(nodeId, status.nodeId) && + Objects.equals(reason, status.reason); } @Override public int hashCode() { - int result = state != null ? state.hashCode() : 0; - result = 31 * result + (nodeId != null ? nodeId.hashCode() : 0); - result = 31 * result + (reason != null ? reason.hashCode() : 0); - return result; + return Objects.hash(state, nodeId, reason); } } /** * Shard restore process state */ - public static enum State { + public enum State { /** * Initializing state */ @@ -409,7 +388,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo public RestoreInProgress readFrom(StreamInput in) throws IOException { Entry[] entries = new Entry[in.readVInt()]; for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + Snapshot snapshot = new Snapshot(in); State state = State.fromValue(in.readByte()); int indices = in.readVInt(); List indexBuilder = new ArrayList<>(); @@ -423,7 +402,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); builder.put(shardId, shardState); } - entries[i] = new Entry(snapshotId, state, Collections.unmodifiableList(indexBuilder), builder.build()); + entries[i] = new Entry(snapshot, state, Collections.unmodifiableList(indexBuilder), builder.build()); } return new RestoreInProgress(entries); } @@ -435,7 +414,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo public void writeTo(StreamOutput out) throws IOException { out.writeVInt(entries.size()); for (Entry entry : entries) { - entry.snapshotId().writeTo(out); + entry.snapshot().writeTo(out); out.writeByte(entry.state().value()); out.writeVInt(entry.indices().size()); for (String index : entry.indices()) { @@ -471,8 +450,8 @@ public class RestoreInProgress extends AbstractDiffable implements Custo */ public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); - builder.field("snapshot", entry.snapshotId().getSnapshot()); - builder.field("repository", entry.snapshotId().getRepository()); + builder.field("snapshot", entry.snapshot().getSnapshotId().getName()); + builder.field("repository", entry.snapshot().getRepository()); builder.field("state", entry.state()); builder.startArray("indices"); { diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 5432c1f0f19..f0a0fdec665 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -23,13 +23,13 @@ import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.cluster.ClusterState.Custom; -import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.snapshots.Snapshot; import java.io.IOException; import java.util.ArrayList; @@ -66,7 +66,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus public static class Entry { private final State state; - private final SnapshotId snapshotId; + private final Snapshot snapshot; private final boolean includeGlobalState; private final boolean partial; private final ImmutableOpenMap shards; @@ -74,9 +74,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus private final ImmutableOpenMap> waitingIndices; private final long startTime; - public Entry(SnapshotId snapshotId, boolean includeGlobalState, boolean partial, State state, List indices, long startTime, ImmutableOpenMap shards) { + public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, long startTime, + ImmutableOpenMap shards) { this.state = state; - this.snapshotId = snapshotId; + this.snapshot = snapshot; this.includeGlobalState = includeGlobalState; this.partial = partial; this.indices = indices; @@ -91,15 +92,15 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus } public Entry(Entry entry, State state, ImmutableOpenMap shards) { - this(entry.snapshotId, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards); + this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards); } public Entry(Entry entry, ImmutableOpenMap shards) { this(entry, entry.state, shards); } - public SnapshotId snapshotId() { - return this.snapshotId; + public Snapshot snapshot() { + return this.snapshot; } public ImmutableOpenMap shards() { @@ -142,7 +143,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus if (startTime != entry.startTime) return false; if (!indices.equals(entry.indices)) return false; if (!shards.equals(entry.shards)) return false; - if (!snapshotId.equals(entry.snapshotId)) return false; + if (!snapshot.equals(entry.snapshot)) return false; if (state != entry.state) return false; if (!waitingIndices.equals(entry.waitingIndices)) return false; @@ -152,7 +153,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus @Override public int hashCode() { int result = state.hashCode(); - result = 31 * result + snapshotId.hashCode(); + result = 31 * result + snapshot.hashCode(); result = 31 * result + (includeGlobalState ? 1 : 0); result = 31 * result + (partial ? 1 : 0); result = 31 * result + shards.hashCode(); @@ -162,6 +163,11 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return result; } + @Override + public String toString() { + return snapshot.toString(); + } + private ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) { Map> waitingIndicesMap = new HashMap<>(); for (ObjectObjectCursor entry : shards) { @@ -277,7 +283,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus } } - public static enum State { + public enum State { INIT((byte) 0, false, false), STARTED((byte) 1, false, false), SUCCESS((byte) 2, true, false), @@ -347,9 +353,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return this.entries; } - public Entry snapshot(SnapshotId snapshotId) { + public Entry snapshot(final Snapshot snapshot) { for (Entry entry : entries) { - if (snapshotId.equals(entry.snapshotId())) { + final Snapshot curr = entry.snapshot(); + if (curr.equals(snapshot)) { return entry; } } @@ -365,7 +372,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus public SnapshotsInProgress readFrom(StreamInput in) throws IOException { Entry[] entries = new Entry[in.readVInt()]; for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + Snapshot snapshot = new Snapshot(in); boolean includeGlobalState = in.readBoolean(); boolean partial = in.readBoolean(); State state = State.fromValue(in.readByte()); @@ -383,7 +390,13 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus State shardState = State.fromValue(in.readByte()); builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); } - entries[i] = new Entry(snapshotId, includeGlobalState, partial, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build()); + entries[i] = new Entry(snapshot, + includeGlobalState, + partial, + state, + Collections.unmodifiableList(indexBuilder), + startTime, + builder.build()); } return new SnapshotsInProgress(entries); } @@ -392,7 +405,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus public void writeTo(StreamOutput out) throws IOException { out.writeVInt(entries.size()); for (Entry entry : entries) { - entry.snapshotId().writeTo(out); + entry.snapshot().writeTo(out); out.writeBoolean(entry.includeGlobalState()); out.writeBoolean(entry.partial()); out.writeByte(entry.state().value()); @@ -410,25 +423,24 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus } } - static final class Fields { - static final String REPOSITORY = "repository"; - static final String SNAPSHOTS = "snapshots"; - static final String SNAPSHOT = "snapshot"; - static final String INCLUDE_GLOBAL_STATE = "include_global_state"; - static final String PARTIAL = "partial"; - static final String STATE = "state"; - static final String INDICES = "indices"; - static final String START_TIME_MILLIS = "start_time_millis"; - static final String START_TIME = "start_time"; - static final String SHARDS = "shards"; - static final String INDEX = "index"; - static final String SHARD = "shard"; - static final String NODE = "node"; - } + private static final String REPOSITORY = "repository"; + private static final String SNAPSHOTS = "snapshots"; + private static final String SNAPSHOT = "snapshot"; + private static final String UUID = "uuid"; + private static final String INCLUDE_GLOBAL_STATE = "include_global_state"; + private static final String PARTIAL = "partial"; + private static final String STATE = "state"; + private static final String INDICES = "indices"; + private static final String START_TIME_MILLIS = "start_time_millis"; + private static final String START_TIME = "start_time"; + private static final String SHARDS = "shards"; + private static final String INDEX = "index"; + private static final String SHARD = "shard"; + private static final String NODE = "node"; @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray(Fields.SNAPSHOTS); + builder.startArray(SNAPSHOTS); for (Entry entry : entries) { toXContent(entry, builder, params); } @@ -438,30 +450,31 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); - builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); - builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); - builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); - builder.field(Fields.PARTIAL, entry.partial()); - builder.field(Fields.STATE, entry.state()); - builder.startArray(Fields.INDICES); + builder.field(REPOSITORY, entry.snapshot().getRepository()); + builder.field(SNAPSHOT, entry.snapshot().getSnapshotId().getName()); + builder.field(UUID, entry.snapshot().getSnapshotId().getUUID()); + builder.field(INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); + builder.field(PARTIAL, entry.partial()); + builder.field(STATE, entry.state()); + builder.startArray(INDICES); { for (String index : entry.indices()) { builder.value(index); } } builder.endArray(); - builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime()); - builder.startArray(Fields.SHARDS); + builder.timeValueField(START_TIME_MILLIS, START_TIME, entry.startTime()); + builder.startArray(SHARDS); { for (ObjectObjectCursor shardEntry : entry.shards) { ShardId shardId = shardEntry.key; ShardSnapshotStatus status = shardEntry.value; builder.startObject(); { - builder.field(Fields.INDEX, shardId.getIndex()); - builder.field(Fields.SHARD, shardId.getId()); - builder.field(Fields.STATE, status.state()); - builder.field(Fields.NODE, status.nodeId()); + builder.field(INDEX, shardId.getIndex()); + builder.field(SHARD, shardId.getId()); + builder.field(STATE, status.state()); + builder.field(NODE, status.nodeId()); } builder.endObject(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java deleted file mode 100644 index 377addd48f3..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.action.index; - -import org.apache.lucene.store.LockObtainFailedException; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.EmptyTransportResponseHandler; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.TimeUnit; - -/** - * - */ -public class NodeIndexDeletedAction extends AbstractComponent { - - public static final String INDEX_DELETED_ACTION_NAME = "internal:cluster/node/index/deleted"; - public static final String INDEX_STORE_DELETED_ACTION_NAME = "internal:cluster/node/index_store/deleted"; - - private final ThreadPool threadPool; - private final TransportService transportService; - private final List listeners = new CopyOnWriteArrayList<>(); - private final IndicesService indicesService; - - @Inject - public NodeIndexDeletedAction(Settings settings, ThreadPool threadPool, TransportService transportService, IndicesService indicesService) { - super(settings); - this.threadPool = threadPool; - this.transportService = transportService; - transportService.registerRequestHandler(INDEX_DELETED_ACTION_NAME, NodeIndexDeletedMessage::new, ThreadPool.Names.SAME, new NodeIndexDeletedTransportHandler()); - transportService.registerRequestHandler(INDEX_STORE_DELETED_ACTION_NAME, NodeIndexStoreDeletedMessage::new, ThreadPool.Names.SAME, new NodeIndexStoreDeletedTransportHandler()); - this.indicesService = indicesService; - } - - public void add(Listener listener) { - listeners.add(listener); - } - - public void remove(Listener listener) { - listeners.remove(listener); - } - - public void nodeIndexDeleted(final ClusterState clusterState, final Index index, final IndexSettings indexSettings, final String nodeId) { - final DiscoveryNodes nodes = clusterState.nodes(); - transportService.sendRequest(clusterState.nodes().getMasterNode(), - INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME); - if (nodes.getLocalNode().isDataNode() == false) { - logger.trace("[{}] not acking store deletion (not a data node)", index); - return; - } - threadPool.generic().execute(new AbstractRunnable() { - @Override - public void onFailure(Throwable t) { - logger.warn("[{}] failed to ack index store deleted for index", t, index); - } - - @Override - protected void doRun() throws Exception { - lockIndexAndAck(index, nodes, nodeId, clusterState, indexSettings); - } - }); - } - - private void lockIndexAndAck(Index index, DiscoveryNodes nodes, String nodeId, ClusterState clusterState, IndexSettings indexSettings) throws IOException { - try { - // we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store to the - // master. If we can't acquire the locks here immediately there might be a shard of this index still holding on to the lock - // due to a "currently canceled recovery" or so. The shard will delete itself BEFORE the lock is released so it's guaranteed to be - // deleted by the time we get the lock - indicesService.processPendingDeletes(indexSettings.getIndex(), indexSettings, new TimeValue(30, TimeUnit.MINUTES)); - transportService.sendRequest(clusterState.nodes().getMasterNode(), - INDEX_STORE_DELETED_ACTION_NAME, new NodeIndexStoreDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME); - } catch (LockObtainFailedException exc) { - logger.warn("[{}] failed to lock all shards for index - timed out after 30 seconds", index); - } catch (InterruptedException e) { - logger.warn("[{}] failed to lock all shards for index - interrupted", index); - } - } - - public interface Listener { - void onNodeIndexDeleted(Index index, String nodeId); - - void onNodeIndexStoreDeleted(Index index, String nodeId); - } - - private class NodeIndexDeletedTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(NodeIndexDeletedMessage message, TransportChannel channel) throws Exception { - for (Listener listener : listeners) { - listener.onNodeIndexDeleted(message.index, message.nodeId); - } - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - - private class NodeIndexStoreDeletedTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(NodeIndexStoreDeletedMessage message, TransportChannel channel) throws Exception { - for (Listener listener : listeners) { - listener.onNodeIndexStoreDeleted(message.index, message.nodeId); - } - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - - public static class NodeIndexDeletedMessage extends TransportRequest { - - Index index; - String nodeId; - - public NodeIndexDeletedMessage() { - } - - NodeIndexDeletedMessage(Index index, String nodeId) { - this.index = index; - this.nodeId = nodeId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - index.writeTo(out); - out.writeString(nodeId); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = new Index(in); - nodeId = in.readString(); - } - } - - public static class NodeIndexStoreDeletedMessage extends TransportRequest { - - Index index; - String nodeId; - - public NodeIndexStoreDeletedMessage() { - } - - NodeIndexStoreDeletedMessage(Index index, String nodeId) { - this.index = index; - this.nodeId = nodeId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - index.writeTo(out); - out.writeString(nodeId); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = new Index(in); - nodeId = in.readString(); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index 0645accb42a..b1bf01018c9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataMappingService; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -58,13 +59,12 @@ public class NodeMappingRefreshAction extends AbstractComponent { transportService.registerRequestHandler(ACTION_NAME, NodeMappingRefreshRequest::new, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); } - public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) { - final DiscoveryNodes nodes = state.nodes(); - if (nodes.getMasterNode() == null) { + public void nodeMappingRefresh(final DiscoveryNode masterNode, final NodeMappingRefreshRequest request) { + if (masterNode == null) { logger.warn("can't send mapping refresh for [{}], no master known.", request.index()); return; } - transportService.sendRequest(nodes.getMasterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); + transportService.sendRequest(masterNode, ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); } private class NodeMappingRefreshTransportHandler implements TransportRequestHandler { diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 5a44a645467..e6a6dea7def 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.action.shard; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateTaskConfig; @@ -30,7 +31,7 @@ import org.elasticsearch.cluster.MasterNodeChangePredicate; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -69,8 +70,6 @@ import java.util.Locale; import java.util.Map; import java.util.stream.Collectors; -import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; - public class ShardStateAction extends AbstractComponent { public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started"; @@ -112,7 +111,7 @@ public class ShardStateAction extends AbstractComponent { waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener); } else { logger.warn("{} unexpected failure while sending request [{}] to [{}] for shard [{}]", exp, shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode, shardRoutingEntry); - listener.onFailure(exp instanceof RemoteTransportException ? exp.getCause() : exp); + listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp); } } }); @@ -132,14 +131,13 @@ public class ShardStateAction extends AbstractComponent { /** * Send a shard failed request to the master node to update the * cluster state. - * - * @param shardRouting the shard to fail + * @param shardRouting the shard to fail * @param sourceShardRouting the source shard requesting the failure (must be the shard itself, or the primary shard) * @param message the reason for the failure * @param failure the underlying cause of the failure * @param listener callback upon completion of the request */ - public void shardFailed(final ShardRouting shardRouting, ShardRouting sourceShardRouting, final String message, @Nullable final Throwable failure, Listener listener) { + public void shardFailed(final ShardRouting shardRouting, ShardRouting sourceShardRouting, final String message, @Nullable final Exception failure, Listener listener) { ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, sourceShardRouting, message, failure); sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardRoutingEntry, listener); @@ -185,18 +183,19 @@ public class ShardStateAction extends AbstractComponent { public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { logger.warn("{} received shard failed for {}", request.failure, request.shardRouting.shardId(), request); clusterService.submitStateUpdateTask( - "shard-failed (" + request.shardRouting + "), message [" + request.message + "]", + "shard-failed", request, ClusterStateTaskConfig.build(Priority.HIGH), shardFailedClusterStateTaskExecutor, new ClusterStateTaskListener() { @Override - public void onFailure(String source, Throwable t) { - logger.error("{} unexpected failure while failing shard [{}]", t, request.shardRouting.shardId(), request.shardRouting); + public void onFailure(String source, Exception e) { + logger.error("{} unexpected failure while failing shard [{}]", e, request.shardRouting.shardId(), request.shardRouting); try { - channel.sendResponse(t); - } catch (Throwable channelThrowable) { - logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelThrowable, request.shardRouting.shardId(), t, request.shardRouting); + channel.sendResponse(e); + } catch (Exception channelException) { + channelException.addSuppressed(e); + logger.warn("{} failed to send failure [{}] while failing shard [{}]", channelException, request.shardRouting.shardId(), e, request.shardRouting); } } @@ -205,8 +204,8 @@ public class ShardStateAction extends AbstractComponent { logger.error("{} no longer master while failing shard [{}]", request.shardRouting.shardId(), request.shardRouting); try { channel.sendResponse(new NotMasterException(source)); - } catch (Throwable channelThrowable) { - logger.warn("{} failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting.shardId(), request.shardRouting); + } catch (Exception channelException) { + logger.warn("{} failed to send no longer master while failing shard [{}]", channelException, request.shardRouting.shardId(), request.shardRouting); } } @@ -214,8 +213,8 @@ public class ShardStateAction extends AbstractComponent { public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { try { channel.sendResponse(TransportResponse.Empty.INSTANCE); - } catch (Throwable channelThrowable) { - logger.warn("{} failed to send response while failing shard [{}]", channelThrowable, request.shardRouting.shardId(), request.shardRouting); + } catch (Exception channelException) { + logger.warn("{} failed to send response while failing shard [{}]", channelException, request.shardRouting.shardId(), request.shardRouting); } } } @@ -234,6 +233,11 @@ public class ShardStateAction extends AbstractComponent { this.logger = logger; } + @Override + public String describeTasks(List tasks) { + return tasks.stream().map(entry -> entry.getShardRouting().toString()).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); + } + @Override public BatchResult execute(ClusterState currentState, List tasks) throws Exception { BatchResult.Builder batchResultBuilder = BatchResult.builder(); @@ -260,10 +264,10 @@ public class ShardStateAction extends AbstractComponent { maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build(); } batchResultBuilder.successes(tasksToFail); - } catch (Throwable t) { + } catch (Exception e) { // failures are communicated back to the requester // cluster state will not be updated in this case - batchResultBuilder.failures(tasksToFail, t); + batchResultBuilder.failures(tasksToFail, e); } partition @@ -303,21 +307,19 @@ public class ShardStateAction extends AbstractComponent { } } - RoutingNodes.RoutingNodeIterator routingNodeIterator = - currentState.getRoutingNodes().routingNodeIter(task.getShardRouting().currentNodeId()); - if (routingNodeIterator != null) { - for (ShardRouting maybe : routingNodeIterator) { - if (task.getShardRouting().isSameAllocation(maybe)) { - return ValidationResult.VALID; - } + RoutingNode routingNode = currentState.getRoutingNodes().node(task.getShardRouting().currentNodeId()); + if (routingNode != null) { + ShardRouting maybe = routingNode.getByShardId(task.getShardRouting().shardId()); + if (maybe != null && maybe.isSameAllocation(task.getShardRouting())) { + return ValidationResult.VALID; } } return ValidationResult.SHARD_MISSING; } @Override - public void clusterStatePublished(ClusterState newClusterState) { - int numberOfUnassignedShards = newClusterState.getRoutingNodes().unassigned().size(); + public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + int numberOfUnassignedShards = clusterChangedEvent.state().getRoutingNodes().unassigned().size(); if (numberOfUnassignedShards > 0) { String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); if (logger.isTraceEnabled()) { @@ -349,7 +351,7 @@ public class ShardStateAction extends AbstractComponent { public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { logger.debug("{} received shard started for [{}]", request.shardRouting.shardId(), request); clusterService.submitStateUpdateTask( - "shard-started (" + request.shardRouting + "), reason [" + request.message + "]", + "shard-started", request, ClusterStateTaskConfig.build(Priority.URGENT), shardStartedClusterStateTaskExecutor, @@ -367,6 +369,11 @@ public class ShardStateAction extends AbstractComponent { this.logger = logger; } + @Override + public String describeTasks(List tasks) { + return tasks.stream().map(entry -> entry.getShardRouting().toString()).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); + } + @Override public BatchResult execute(ClusterState currentState, List tasks) throws Exception { BatchResult.Builder builder = BatchResult.builder(); @@ -382,16 +389,16 @@ public class ShardStateAction extends AbstractComponent { maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build(); } builder.successes(tasks); - } catch (Throwable t) { - builder.failures(tasks, t); + } catch (Exception e) { + builder.failures(tasks, e); } return builder.build(maybeUpdatedState); } @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure during [{}]", t, source); + public void onFailure(String source, Exception e) { + logger.error("unexpected failure during [{}]", e, source); } } @@ -399,12 +406,12 @@ public class ShardStateAction extends AbstractComponent { ShardRouting shardRouting; ShardRouting sourceShardRouting; String message; - Throwable failure; + Exception failure; public ShardRoutingEntry() { } - ShardRoutingEntry(ShardRouting shardRouting, ShardRouting sourceShardRouting, String message, @Nullable Throwable failure) { + ShardRoutingEntry(ShardRouting shardRouting, ShardRouting sourceShardRouting, String message, @Nullable Exception failure) { this.shardRouting = shardRouting; this.sourceShardRouting = sourceShardRouting; this.message = message; @@ -418,10 +425,10 @@ public class ShardStateAction extends AbstractComponent { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardRouting = readShardRoutingEntry(in); - sourceShardRouting = readShardRoutingEntry(in); + shardRouting = new ShardRouting(in); + sourceShardRouting = new ShardRouting(in); message = in.readString(); - failure = in.readThrowable(); + failure = in.readException(); } @Override @@ -430,7 +437,7 @@ public class ShardStateAction extends AbstractComponent { shardRouting.writeTo(out); sourceShardRouting.writeTo(out); out.writeString(message); - out.writeThrowable(failure); + out.writeException(failure); } @Override @@ -464,9 +471,9 @@ public class ShardStateAction extends AbstractComponent { * Any other exception is communicated to the requester via * this notification. * - * @param t the unexpected cause of the failure on the master + * @param e the unexpected cause of the failure on the master */ - default void onFailure(final Throwable t) { + default void onFailure(final Exception e) { } } diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index aae2abc0f15..e6f04c8702c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -150,8 +150,12 @@ public class ClusterBlocks extends AbstractDiffable { } } + private boolean globalBlocked(ClusterBlockLevel level) { + return global(level).isEmpty() == false; + } + public ClusterBlockException globalBlockedException(ClusterBlockLevel level) { - if (global(level).isEmpty()) { + if (globalBlocked(level) == false) { return null; } return new ClusterBlockException(global(level)); @@ -175,10 +179,7 @@ public class ClusterBlocks extends AbstractDiffable { } public boolean indexBlocked(ClusterBlockLevel level, String index) { - if (!global(level).isEmpty()) { - return true; - } - return !blocksForIndex(level, index).isEmpty(); + return globalBlocked(level) || blocksForIndex(level, index).isEmpty() == false; } public ClusterBlockException indicesBlockedException(ClusterBlockLevel level, String[] indices) { @@ -188,7 +189,7 @@ public class ClusterBlocks extends AbstractDiffable { indexIsBlocked = true; } } - if (!indexIsBlocked) { + if (globalBlocked(level) == false && indexIsBlocked == false) { return null; } Function> blocksForIndexAtLevel = index -> blocksForIndex(level, index).stream(); diff --git a/core/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java b/core/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java index 6d3e136eb1a..a261d28f537 100644 --- a/core/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java +++ b/core/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java @@ -20,10 +20,16 @@ package org.elasticsearch.cluster.health; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + /** * */ -public enum ClusterHealthStatus { +public enum ClusterHealthStatus implements Writeable { GREEN((byte) 0), YELLOW((byte) 1), RED((byte) 2); @@ -38,7 +44,21 @@ public enum ClusterHealthStatus { return value; } - public static ClusterHealthStatus fromValue(byte value) { + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(value); + } + + /** + * Read from a stream. + * + * @throws IllegalArgumentException if the value is unrecognized + */ + public static ClusterHealthStatus readFrom(StreamInput in) throws IOException { + return fromValue(in.readByte()); + } + + public static ClusterHealthStatus fromValue(byte value) throws IOException { switch (value) { case 0: return GREEN; diff --git a/core/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java b/core/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java index 9116e9689e8..9bc483ace68 100644 --- a/core/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java +++ b/core/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java @@ -24,7 +24,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -34,34 +34,20 @@ import java.util.Iterator; import java.util.Locale; import java.util.Map; -import static org.elasticsearch.cluster.health.ClusterShardHealth.readClusterShardHealth; - -public final class ClusterIndexHealth implements Iterable, Streamable, ToXContent { - - private String index; - - private int numberOfShards; - - private int numberOfReplicas; - - private int activeShards = 0; - - private int relocatingShards = 0; - - private int initializingShards = 0; - - private int unassignedShards = 0; - - private int activePrimaryShards = 0; - - private ClusterHealthStatus status = ClusterHealthStatus.RED; +public final class ClusterIndexHealth implements Iterable, Writeable, ToXContent { + private final String index; + private final int numberOfShards; + private final int numberOfReplicas; + private final int activeShards; + private final int relocatingShards; + private final int initializingShards; + private final int unassignedShards; + private final int activePrimaryShards; + private final ClusterHealthStatus status; private final Map shards = new HashMap<>(); - private ClusterIndexHealth() { - } - - public ClusterIndexHealth(IndexMetaData indexMetaData, IndexRoutingTable indexRoutingTable) { + public ClusterIndexHealth(final IndexMetaData indexMetaData, final IndexRoutingTable indexRoutingTable) { this.index = indexMetaData.getIndex().getName(); this.numberOfShards = indexMetaData.getNumberOfShards(); this.numberOfReplicas = indexMetaData.getNumberOfReplicas(); @@ -72,26 +58,55 @@ public final class ClusterIndexHealth implements Iterable, S } // update the index status - status = ClusterHealthStatus.GREEN; - + ClusterHealthStatus computeStatus = ClusterHealthStatus.GREEN; + int computeActivePrimaryShards = 0; + int computeActiveShards = 0; + int computeRelocatingShards = 0; + int computeInitializingShards = 0; + int computeUnassignedShards = 0; for (ClusterShardHealth shardHealth : shards.values()) { if (shardHealth.isPrimaryActive()) { - activePrimaryShards++; + computeActivePrimaryShards++; } - activeShards += shardHealth.getActiveShards(); - relocatingShards += shardHealth.getRelocatingShards(); - initializingShards += shardHealth.getInitializingShards(); - unassignedShards += shardHealth.getUnassignedShards(); + computeActiveShards += shardHealth.getActiveShards(); + computeRelocatingShards += shardHealth.getRelocatingShards(); + computeInitializingShards += shardHealth.getInitializingShards(); + computeUnassignedShards += shardHealth.getUnassignedShards(); if (shardHealth.getStatus() == ClusterHealthStatus.RED) { - status = ClusterHealthStatus.RED; - } else if (shardHealth.getStatus() == ClusterHealthStatus.YELLOW && status != ClusterHealthStatus.RED) { + computeStatus = ClusterHealthStatus.RED; + } else if (shardHealth.getStatus() == ClusterHealthStatus.YELLOW && computeStatus != ClusterHealthStatus.RED) { // do not override an existing red - status = ClusterHealthStatus.YELLOW; + computeStatus = ClusterHealthStatus.YELLOW; } } if (shards.isEmpty()) { // might be since none has been created yet (two phase index creation) - status = ClusterHealthStatus.RED; + computeStatus = ClusterHealthStatus.RED; + } + + this.status = computeStatus; + this.activePrimaryShards = computeActivePrimaryShards; + this.activeShards = computeActiveShards; + this.relocatingShards = computeRelocatingShards; + this.initializingShards = computeInitializingShards; + this.unassignedShards = computeUnassignedShards; + } + + public ClusterIndexHealth(final StreamInput in) throws IOException { + index = in.readString(); + numberOfShards = in.readVInt(); + numberOfReplicas = in.readVInt(); + activePrimaryShards = in.readVInt(); + activeShards = in.readVInt(); + relocatingShards = in.readVInt(); + initializingShards = in.readVInt(); + unassignedShards = in.readVInt(); + status = ClusterHealthStatus.fromValue(in.readByte()); + + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + ClusterShardHealth shardHealth = new ClusterShardHealth(in); + shards.put(shardHealth.getId(), shardHealth); } } @@ -140,33 +155,8 @@ public final class ClusterIndexHealth implements Iterable, S return shards.values().iterator(); } - public static ClusterIndexHealth readClusterIndexHealth(StreamInput in) throws IOException { - ClusterIndexHealth indexHealth = new ClusterIndexHealth(); - indexHealth.readFrom(in); - return indexHealth; - } - @Override - public void readFrom(StreamInput in) throws IOException { - index = in.readString(); - numberOfShards = in.readVInt(); - numberOfReplicas = in.readVInt(); - activePrimaryShards = in.readVInt(); - activeShards = in.readVInt(); - relocatingShards = in.readVInt(); - initializingShards = in.readVInt(); - unassignedShards = in.readVInt(); - status = ClusterHealthStatus.fromValue(in.readByte()); - - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - ClusterShardHealth shardHealth = readClusterShardHealth(in); - shards.put(shardHealth.getId(), shardHealth); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeTo(final StreamOutput out) throws IOException { out.writeString(index); out.writeVInt(numberOfShards); out.writeVInt(numberOfReplicas); @@ -183,42 +173,40 @@ public final class ClusterIndexHealth implements Iterable, S } } - static final class Fields { - static final String STATUS = "status"; - static final String NUMBER_OF_SHARDS = "number_of_shards"; - static final String NUMBER_OF_REPLICAS = "number_of_replicas"; - static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards"; - static final String ACTIVE_SHARDS = "active_shards"; - static final String RELOCATING_SHARDS = "relocating_shards"; - static final String INITIALIZING_SHARDS = "initializing_shards"; - static final String UNASSIGNED_SHARDS = "unassigned_shards"; - static final String SHARDS = "shards"; - static final String PRIMARY_ACTIVE = "primary_active"; - } + private static final String STATUS = "status"; + private static final String NUMBER_OF_SHARDS = "number_of_shards"; + private static final String NUMBER_OF_REPLICAS = "number_of_replicas"; + private static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards"; + private static final String ACTIVE_SHARDS = "active_shards"; + private static final String RELOCATING_SHARDS = "relocating_shards"; + private static final String INITIALIZING_SHARDS = "initializing_shards"; + private static final String UNASSIGNED_SHARDS = "unassigned_shards"; + private static final String SHARDS = "shards"; + private static final String PRIMARY_ACTIVE = "primary_active"; @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.STATUS, getStatus().name().toLowerCase(Locale.ROOT)); - builder.field(Fields.NUMBER_OF_SHARDS, getNumberOfShards()); - builder.field(Fields.NUMBER_OF_REPLICAS, getNumberOfReplicas()); - builder.field(Fields.ACTIVE_PRIMARY_SHARDS, getActivePrimaryShards()); - builder.field(Fields.ACTIVE_SHARDS, getActiveShards()); - builder.field(Fields.RELOCATING_SHARDS, getRelocatingShards()); - builder.field(Fields.INITIALIZING_SHARDS, getInitializingShards()); - builder.field(Fields.UNASSIGNED_SHARDS, getUnassignedShards()); + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.field(STATUS, getStatus().name().toLowerCase(Locale.ROOT)); + builder.field(NUMBER_OF_SHARDS, getNumberOfShards()); + builder.field(NUMBER_OF_REPLICAS, getNumberOfReplicas()); + builder.field(ACTIVE_PRIMARY_SHARDS, getActivePrimaryShards()); + builder.field(ACTIVE_SHARDS, getActiveShards()); + builder.field(RELOCATING_SHARDS, getRelocatingShards()); + builder.field(INITIALIZING_SHARDS, getInitializingShards()); + builder.field(UNASSIGNED_SHARDS, getUnassignedShards()); if ("shards".equals(params.param("level", "indices"))) { - builder.startObject(Fields.SHARDS); + builder.startObject(SHARDS); for (ClusterShardHealth shardHealth : shards.values()) { builder.startObject(Integer.toString(shardHealth.getId())); - builder.field(Fields.STATUS, shardHealth.getStatus().name().toLowerCase(Locale.ROOT)); - builder.field(Fields.PRIMARY_ACTIVE, shardHealth.isPrimaryActive()); - builder.field(Fields.ACTIVE_SHARDS, shardHealth.getActiveShards()); - builder.field(Fields.RELOCATING_SHARDS, shardHealth.getRelocatingShards()); - builder.field(Fields.INITIALIZING_SHARDS, shardHealth.getInitializingShards()); - builder.field(Fields.UNASSIGNED_SHARDS, shardHealth.getUnassignedShards()); + builder.field(STATUS, shardHealth.getStatus().name().toLowerCase(Locale.ROOT)); + builder.field(PRIMARY_ACTIVE, shardHealth.isPrimaryActive()); + builder.field(ACTIVE_SHARDS, shardHealth.getActiveShards()); + builder.field(RELOCATING_SHARDS, shardHealth.getRelocatingShards()); + builder.field(INITIALIZING_SHARDS, shardHealth.getInitializingShards()); + builder.field(UNASSIGNED_SHARDS, shardHealth.getUnassignedShards()); builder.endObject(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java b/core/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java index 725f89121b7..2c3479b94d8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java +++ b/core/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java @@ -23,57 +23,69 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; -public final class ClusterShardHealth implements Streamable { +public final class ClusterShardHealth implements Writeable { - private int shardId; + private final int shardId; + private final ClusterHealthStatus status; + private final int activeShards; + private final int relocatingShards; + private final int initializingShards; + private final int unassignedShards; + private final boolean primaryActive; - ClusterHealthStatus status = ClusterHealthStatus.RED; - - private int activeShards = 0; - - private int relocatingShards = 0; - - private int initializingShards = 0; - - private int unassignedShards = 0; - - private boolean primaryActive = false; - - private ClusterShardHealth() { - - } - - public ClusterShardHealth(int shardId, final IndexShardRoutingTable shardRoutingTable) { + public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable) { this.shardId = shardId; + int computeActiveShards = 0; + int computeRelocatingShards = 0; + int computeInitializingShards = 0; + int computeUnassignedShards = 0; + boolean computePrimaryActive = false; for (ShardRouting shardRouting : shardRoutingTable) { if (shardRouting.active()) { - activeShards++; + computeActiveShards++; if (shardRouting.relocating()) { // the shard is relocating, the one it is relocating to will be in initializing state, so we don't count it - relocatingShards++; + computeRelocatingShards++; } if (shardRouting.primary()) { - primaryActive = true; + computePrimaryActive = true; } } else if (shardRouting.initializing()) { - initializingShards++; + computeInitializingShards++; } else if (shardRouting.unassigned()) { - unassignedShards++; + computeUnassignedShards++; } } - if (primaryActive) { - if (activeShards == shardRoutingTable.size()) { - status = ClusterHealthStatus.GREEN; + ClusterHealthStatus computeStatus; + if (computePrimaryActive) { + if (computeActiveShards == shardRoutingTable.size()) { + computeStatus = ClusterHealthStatus.GREEN; } else { - status = ClusterHealthStatus.YELLOW; + computeStatus = ClusterHealthStatus.YELLOW; } } else { - status = ClusterHealthStatus.RED; + computeStatus = ClusterHealthStatus.RED; } + this.status = computeStatus; + this.activeShards = computeActiveShards; + this.relocatingShards = computeRelocatingShards; + this.initializingShards = computeInitializingShards; + this.unassignedShards = computeUnassignedShards; + this.primaryActive = computePrimaryActive; + } + + public ClusterShardHealth(final StreamInput in) throws IOException { + shardId = in.readVInt(); + status = ClusterHealthStatus.fromValue(in.readByte()); + activeShards = in.readVInt(); + relocatingShards = in.readVInt(); + initializingShards = in.readVInt(); + unassignedShards = in.readVInt(); + primaryActive = in.readBoolean(); } public int getId() { @@ -104,25 +116,8 @@ public final class ClusterShardHealth implements Streamable { return unassignedShards; } - static ClusterShardHealth readClusterShardHealth(StreamInput in) throws IOException { - ClusterShardHealth ret = new ClusterShardHealth(); - ret.readFrom(in); - return ret; - } - @Override - public void readFrom(StreamInput in) throws IOException { - shardId = in.readVInt(); - status = ClusterHealthStatus.fromValue(in.readByte()); - activeShards = in.readVInt(); - relocatingShards = in.readVInt(); - initializingShards = in.readVInt(); - unassignedShards = in.readVInt(); - primaryActive = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(shardId); out.writeByte(status.value()); out.writeVInt(activeShards); diff --git a/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java b/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java index 5855ef9445c..8aeb110c370 100644 --- a/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java +++ b/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java @@ -18,16 +18,13 @@ */ package org.elasticsearch.cluster.health; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -37,46 +34,25 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import static org.elasticsearch.cluster.health.ClusterIndexHealth.readClusterIndexHealth; +public final class ClusterStateHealth implements Iterable, Writeable { -public final class ClusterStateHealth implements Iterable, Streamable { - private int numberOfNodes = 0; - private int numberOfDataNodes = 0; - private int activeShards = 0; - private int relocatingShards = 0; - private int activePrimaryShards = 0; - private int initializingShards = 0; - private int unassignedShards = 0; - private double activeShardsPercent = 100; - private ClusterHealthStatus status = ClusterHealthStatus.RED; - private Map indices = new HashMap<>(); - - public static ClusterStateHealth readClusterHealth(StreamInput in) throws IOException { - ClusterStateHealth clusterStateHealth = new ClusterStateHealth(); - clusterStateHealth.readFrom(in); - return clusterStateHealth; - } - - ClusterStateHealth() { - // only intended for serialization - } - - /** - * Creates a new ClusterStateHealth instance based on cluster meta data and its routing table as a convenience. - * - * @param clusterMetaData Current cluster meta data. Must not be null. - * @param routingTables Current routing table. Must not be null. - */ - public ClusterStateHealth(MetaData clusterMetaData, RoutingTable routingTables) { - this(ClusterState.builder(ClusterName.DEFAULT).metaData(clusterMetaData).routingTable(routingTables).build()); - } + private final int numberOfNodes; + private final int numberOfDataNodes; + private final int activeShards; + private final int relocatingShards; + private final int activePrimaryShards; + private final int initializingShards; + private final int unassignedShards; + private final double activeShardsPercent; + private final ClusterHealthStatus status; + private final Map indices = new HashMap<>(); /** * Creates a new ClusterStateHealth instance considering the current cluster state and all indices in the cluster. * * @param clusterState The current cluster state. Must not be null. */ - public ClusterStateHealth(ClusterState clusterState) { + public ClusterStateHealth(final ClusterState clusterState) { this(clusterState, clusterState.metaData().getConcreteAllIndices()); } @@ -86,7 +62,7 @@ public final class ClusterStateHealth implements Iterable, S * @param clusterState The current cluster state. Must not be null. * @param concreteIndices An array of index names to consider. Must not be null but may be empty. */ - public ClusterStateHealth(ClusterState clusterState, String[] concreteIndices) { + public ClusterStateHealth(final ClusterState clusterState, final String[] concreteIndices) { numberOfNodes = clusterState.nodes().getSize(); numberOfDataNodes = clusterState.nodes().getDataNodes().size(); @@ -102,27 +78,39 @@ public final class ClusterStateHealth implements Iterable, S indices.put(indexHealth.getIndex(), indexHealth); } - status = ClusterHealthStatus.GREEN; + ClusterHealthStatus computeStatus = ClusterHealthStatus.GREEN; + int computeActivePrimaryShards = 0; + int computeActiveShards = 0; + int computeRelocatingShards = 0; + int computeInitializingShards = 0; + int computeUnassignedShards = 0; for (ClusterIndexHealth indexHealth : indices.values()) { - activePrimaryShards += indexHealth.getActivePrimaryShards(); - activeShards += indexHealth.getActiveShards(); - relocatingShards += indexHealth.getRelocatingShards(); - initializingShards += indexHealth.getInitializingShards(); - unassignedShards += indexHealth.getUnassignedShards(); + computeActivePrimaryShards += indexHealth.getActivePrimaryShards(); + computeActiveShards += indexHealth.getActiveShards(); + computeRelocatingShards += indexHealth.getRelocatingShards(); + computeInitializingShards += indexHealth.getInitializingShards(); + computeUnassignedShards += indexHealth.getUnassignedShards(); if (indexHealth.getStatus() == ClusterHealthStatus.RED) { - status = ClusterHealthStatus.RED; - } else if (indexHealth.getStatus() == ClusterHealthStatus.YELLOW && status != ClusterHealthStatus.RED) { - status = ClusterHealthStatus.YELLOW; + computeStatus = ClusterHealthStatus.RED; + } else if (indexHealth.getStatus() == ClusterHealthStatus.YELLOW && computeStatus != ClusterHealthStatus.RED) { + computeStatus = ClusterHealthStatus.YELLOW; } } if (clusterState.blocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE)) { - status = ClusterHealthStatus.RED; + computeStatus = ClusterHealthStatus.RED; } + this.status = computeStatus; + this.activePrimaryShards = computeActivePrimaryShards; + this.activeShards = computeActiveShards; + this.relocatingShards = computeRelocatingShards; + this.initializingShards = computeInitializingShards; + this.unassignedShards = computeUnassignedShards; + // shortcut on green - if (status.equals(ClusterHealthStatus.GREEN)) { + if (computeStatus.equals(ClusterHealthStatus.GREEN)) { this.activeShardsPercent = 100; } else { List shardRoutings = clusterState.getRoutingTable().allShards(); @@ -136,6 +124,23 @@ public final class ClusterStateHealth implements Iterable, S } } + public ClusterStateHealth(final StreamInput in) throws IOException { + activePrimaryShards = in.readVInt(); + activeShards = in.readVInt(); + relocatingShards = in.readVInt(); + initializingShards = in.readVInt(); + unassignedShards = in.readVInt(); + numberOfNodes = in.readVInt(); + numberOfDataNodes = in.readVInt(); + status = ClusterHealthStatus.fromValue(in.readByte()); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + ClusterIndexHealth indexHealth = new ClusterIndexHealth(in); + indices.put(indexHealth.getIndex(), indexHealth); + } + activeShardsPercent = in.readDouble(); + } + public int getActiveShards() { return activeShards; } @@ -182,25 +187,7 @@ public final class ClusterStateHealth implements Iterable, S } @Override - public void readFrom(StreamInput in) throws IOException { - activePrimaryShards = in.readVInt(); - activeShards = in.readVInt(); - relocatingShards = in.readVInt(); - initializingShards = in.readVInt(); - unassignedShards = in.readVInt(); - numberOfNodes = in.readVInt(); - numberOfDataNodes = in.readVInt(); - status = ClusterHealthStatus.fromValue(in.readByte()); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - ClusterIndexHealth indexHealth = readClusterIndexHealth(in); - indices.put(indexHealth.getIndex(), indexHealth); - } - activeShardsPercent = in.readDouble(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(activePrimaryShards); out.writeVInt(activeShards); out.writeVInt(relocatingShards); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index a3d776a4edb..d98187cc6c1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.InvalidAliasNameException; import java.io.IOException; +import java.util.Optional; /** * Validator for an alias, to be used before adding an alias to the index metadata @@ -83,7 +84,7 @@ public class AliasValidator extends AbstractComponent { if (Strings.hasLength(alias.filter())) { try (XContentParser parser = XContentFactory.xContent(alias.filter()).createParser(alias.filter())) { parser.map(); - } catch (Throwable e) { + } catch (Exception e) { throw new IllegalArgumentException("failed to parse filter for alias [" + alias.name() + "]", e); } } @@ -120,7 +121,7 @@ public class AliasValidator extends AbstractComponent { assert queryShardContext != null; try (XContentParser parser = XContentFactory.xContent(filter).createParser(filter)) { validateAliasFilter(parser, queryShardContext); - } catch (Throwable e) { + } catch (Exception e) { throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); } } @@ -134,20 +135,17 @@ public class AliasValidator extends AbstractComponent { assert queryShardContext != null; try (XContentParser parser = XContentFactory.xContent(filter).createParser(filter)) { validateAliasFilter(parser, queryShardContext); - } catch (Throwable e) { + } catch (Exception e) { throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); } } - private void validateAliasFilter(XContentParser parser, QueryShardContext queryShardContext) throws IOException { - try { - queryShardContext.reset(); - QueryParseContext queryParseContext = queryShardContext.newParseContext(parser); - QueryBuilder queryBuilder = QueryBuilder.rewriteQuery(queryParseContext.parseInnerQueryBuilder(), queryShardContext); + private static void validateAliasFilter(XContentParser parser, QueryShardContext queryShardContext) throws IOException { + QueryParseContext queryParseContext = queryShardContext.newParseContext(parser); + Optional parseInnerQueryBuilder = queryParseContext.parseInnerQueryBuilder(); + if (parseInnerQueryBuilder.isPresent()) { + QueryBuilder queryBuilder = QueryBuilder.rewriteQuery(parseInnerQueryBuilder.get(), queryShardContext); queryBuilder.toFilter(queryShardContext); - } finally { - queryShardContext.reset(); - parser.close(); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java index 07bb941cc86..94dd3c63da5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java @@ -54,7 +54,7 @@ import java.util.function.BiFunction; * tombstones remain in the cluster state for a fixed period of time, after which * they are purged. */ -final public class IndexGraveyard implements MetaData.Custom { +public final class IndexGraveyard implements MetaData.Custom { /** * Setting for the maximum tombstones allowed in the cluster state; @@ -123,6 +123,18 @@ final public class IndexGraveyard implements MetaData.Custom { return tombstones; } + /** + * Returns true if the graveyard contains a tombstone for the given index. + */ + public boolean containsIndex(final Index index) { + for (Tombstone tombstone : tombstones) { + if (tombstone.getIndex().equals(index)) { + return true; + } + } + return false; + } + @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startArray(TOMBSTONES_FIELD.getPreferredName()); @@ -176,7 +188,7 @@ final public class IndexGraveyard implements MetaData.Custom { /** * A class to build an IndexGraveyard. */ - final public static class Builder { + public static final class Builder { private List tombstones; private int numPurged = -1; private final long currentTime = System.currentTimeMillis(); @@ -207,8 +219,10 @@ final public class IndexGraveyard implements MetaData.Custom { /** * Add a set of deleted indexes to the list of tombstones in the cluster state. */ - public Builder addTombstones(final Set indices) { - indices.stream().forEach(this::addTombstone); + public Builder addTombstones(final Index[] indices) { + for (Index index : indices) { + addTombstone(index); + } return this; } @@ -259,7 +273,7 @@ final public class IndexGraveyard implements MetaData.Custom { /** * A class representing a diff of two IndexGraveyard objects. */ - final public static class IndexGraveyardDiff implements Diff { + public static final class IndexGraveyardDiff implements Diff { private final List added; private final int removedCount; @@ -340,7 +354,7 @@ final public class IndexGraveyard implements MetaData.Custom { /** * An individual tombstone entry for representing a deleted index. */ - final public static class Tombstone implements ToXContent, Writeable { + public static final class Tombstone implements ToXContent, Writeable { private static final String INDEX_KEY = "index"; private static final String DELETE_DATE_IN_MILLIS_KEY = "delete_date_in_millis"; @@ -435,7 +449,7 @@ final public class IndexGraveyard implements MetaData.Custom { /** * A builder for building tombstone entries. */ - final private static class Builder { + private static final class Builder { private Index index; private long deleteDateInMillis = -1L; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 52721411f46..b49b893f232 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -54,6 +54,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -215,6 +216,8 @@ public class IndexMetaData implements Diffable, FromXContentBuild Setting.groupSetting("index.routing.allocation.include.", Property.Dynamic, Property.IndexScope); public static final Setting INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.exclude.", Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.initial_recovery."); // this is only setable internally not a registered setting!! public static final IndexMetaData PROTO = IndexMetaData.builder("") .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) @@ -222,6 +225,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations"; static final String KEY_VERSION = "version"; + static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards"; static final String KEY_SETTINGS = "settings"; static final String KEY_STATE = "state"; static final String KEY_MAPPINGS = "mappings"; @@ -229,6 +233,8 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String KEY_PRIMARY_TERMS = "primary_terms"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; + private final int routingNumShards; + private final int routingFactor; private final int numberOfShards; private final int numberOfReplicas; @@ -249,11 +255,12 @@ public class IndexMetaData implements Diffable, FromXContentBuild private final ImmutableOpenIntMap> activeAllocationIds; - private transient final int totalNumberOfShards; + private final transient int totalNumberOfShards; private final DiscoveryNodeFilters requireFilters; private final DiscoveryNodeFilters includeFilters; private final DiscoveryNodeFilters excludeFilters; + private final DiscoveryNodeFilters initialRecoveryFilters; private final Version indexCreatedVersion; private final Version indexUpgradedVersion; @@ -262,8 +269,9 @@ public class IndexMetaData implements Diffable, FromXContentBuild private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, ImmutableOpenMap customs, ImmutableOpenIntMap> activeAllocationIds, - DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, - Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion) { + DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, + Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion, + int routingNumShards) { this.index = index; this.version = version; @@ -281,9 +289,13 @@ public class IndexMetaData implements Diffable, FromXContentBuild this.requireFilters = requireFilters; this.includeFilters = includeFilters; this.excludeFilters = excludeFilters; + this.initialRecoveryFilters = initialRecoveryFilters; this.indexCreatedVersion = indexCreatedVersion; this.indexUpgradedVersion = indexUpgradedVersion; this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion; + this.routingNumShards = routingNumShards; + this.routingFactor = routingNumShards / numberOfShards; + assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } public Index getIndex() { @@ -383,6 +395,14 @@ public class IndexMetaData implements Diffable, FromXContentBuild return mappings.get(mappingType); } + public static final Setting INDEX_SHRINK_SOURCE_UUID = Setting.simpleString("index.shrink.source.uuid"); + public static final Setting INDEX_SHRINK_SOURCE_NAME = Setting.simpleString("index.shrink.source.name"); + + + public Index getMergeSourceIndex() { + return INDEX_SHRINK_SOURCE_UUID.exists(settings) ? new Index(INDEX_SHRINK_SOURCE_NAME.get(settings), INDEX_SHRINK_SOURCE_UUID.get(settings)) : null; + } + /** * Sometimes, the default mapping exists and an actual mapping is not created yet (introduced), * in this case, we want to return the default mapping in case it has some default mapping definitions. @@ -422,6 +442,11 @@ public class IndexMetaData implements Diffable, FromXContentBuild return requireFilters; } + @Nullable + public DiscoveryNodeFilters getInitialRecoveryFilters() { + return initialRecoveryFilters; + } + @Nullable public DiscoveryNodeFilters includeFilters() { return includeFilters; @@ -465,7 +490,12 @@ public class IndexMetaData implements Diffable, FromXContentBuild if (!customs.equals(that.customs)) { return false; } - + if (routingNumShards != that.routingNumShards) { + return false; + } + if (routingFactor != that.routingFactor) { + return false; + } if (Arrays.equals(primaryTerms, that.primaryTerms) == false) { return false; } @@ -484,6 +514,8 @@ public class IndexMetaData implements Diffable, FromXContentBuild result = 31 * result + settings.hashCode(); result = 31 * result + mappings.hashCode(); result = 31 * result + customs.hashCode(); + result = 31 * result + Long.hashCode(routingFactor); + result = 31 * result + Long.hashCode(routingNumShards); result = 31 * result + Arrays.hashCode(primaryTerms); result = 31 * result + activeAllocationIds.hashCode(); return result; @@ -514,6 +546,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild private static class IndexMetaDataDiff implements Diff { private final String index; + private final int routingNumShards; private final long version; private final long[] primaryTerms; private final State state; @@ -526,6 +559,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { index = after.index.getName(); version = after.version; + routingNumShards = after.routingNumShards; state = after.state; settings = after.settings; primaryTerms = after.primaryTerms; @@ -538,6 +572,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild public IndexMetaDataDiff(StreamInput in) throws IOException { index = in.readString(); + routingNumShards = in.readInt(); version = in.readLong(); state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); @@ -563,6 +598,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); + out.writeInt(routingNumShards); out.writeLong(version); out.writeByte(state.id); Settings.writeSettingsToStream(settings, out); @@ -577,6 +613,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild public IndexMetaData apply(IndexMetaData part) { Builder builder = builder(index); builder.version(version); + builder.setRoutingNumShards(routingNumShards); builder.state(state); builder.settings(settings); builder.primaryTerms(primaryTerms); @@ -592,6 +629,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild public IndexMetaData readFrom(StreamInput in) throws IOException { Builder builder = new Builder(in.readString()); builder.version(in.readLong()); + builder.setRoutingNumShards(in.readInt()); builder.state(State.fromId(in.readByte())); builder.settings(readSettingsFromStream(in)); builder.primaryTerms(in.readVLongArray()); @@ -624,6 +662,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild public void writeTo(StreamOutput out) throws IOException { out.writeString(index.getName()); // uuid will come as part of settings out.writeLong(version); + out.writeInt(routingNumShards); out.writeByte(state.id()); writeSettingsToStream(settings, out); out.writeVLongArray(primaryTerms); @@ -666,6 +705,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild private final ImmutableOpenMap.Builder aliases; private final ImmutableOpenMap.Builder customs; private final ImmutableOpenIntMap.Builder> activeAllocationIds; + private Integer routingNumShards; public Builder(String index) { this.index = index; @@ -684,6 +724,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings); this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases); this.customs = ImmutableOpenMap.builder(indexMetaData.customs); + this.routingNumShards = indexMetaData.routingNumShards; this.activeAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.activeAllocationIds); } @@ -701,6 +742,26 @@ public class IndexMetaData implements Diffable, FromXContentBuild return this; } + /** + * Sets the number of shards that should be used for routing. This should only be used if the number of shards in + * an index has changed ie if the index is shrunk. + */ + public Builder setRoutingNumShards(int routingNumShards) { + this.routingNumShards = routingNumShards; + return this; + } + + /** + * Returns number of shards that should be used for routing. By default this method will return the number of shards + * for this index. + * + * @see #setRoutingNumShards(int) + * @see #numberOfShards() + */ + public int getRoutingNumShards() { + return routingNumShards == null ? numberOfShards() : routingNumShards; + } + public int numberOfShards() { return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1); } @@ -884,6 +945,13 @@ public class IndexMetaData implements Diffable, FromXContentBuild } else { excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap); } + Map initialRecoveryMap = INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.get(settings).getAsMap(); + final DiscoveryNodeFilters initialRecoveryFilters; + if (initialRecoveryMap.isEmpty()) { + initialRecoveryFilters = null; + } else { + initialRecoveryFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, initialRecoveryMap); + } Version indexCreatedVersion = Version.indexCreated(settings); Version indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion); String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE); @@ -907,14 +975,15 @@ public class IndexMetaData implements Diffable, FromXContentBuild final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), - tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters, - indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion); + tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, + indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion, getRoutingNumShards()); } public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(indexMetaData.getIndex().getName()); builder.field(KEY_VERSION, indexMetaData.getVersion()); + builder.field(KEY_ROUTING_NUM_SHARDS, indexMetaData.getRoutingNumShards()); builder.field(KEY_STATE, indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH)); boolean binary = params.paramAsBoolean("binary", false); @@ -1075,6 +1144,8 @@ public class IndexMetaData implements Diffable, FromXContentBuild builder.state(State.fromString(parser.text())); } else if (KEY_VERSION.equals(currentFieldName)) { builder.version(parser.longValue()); + } else if (KEY_ROUTING_NUM_SHARDS.equals(currentFieldName)) { + builder.setRoutingNumShards(parser.intValue()); } else { throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]"); } @@ -1149,4 +1220,68 @@ public class IndexMetaData implements Diffable, FromXContentBuild return Builder.fromXContent(parser); } }; + + /** + * Returns the number of shards that should be used for routing. This basically defines the hash space we use in + * {@link org.elasticsearch.cluster.routing.OperationRouting#generateShardId(IndexMetaData, String, String)} to route documents + * to shards based on their ID or their specific routing value. The default value is {@link #getNumberOfShards()}. This value only + * changes if and index is shrunk. + */ + public int getRoutingNumShards() { + return routingNumShards; + } + + /** + * Returns the routing factor for this index. The default is 1. + * + * @see #getRoutingFactor(IndexMetaData, int) for details + */ + public int getRoutingFactor() { + return routingFactor; + } + + /** + * Returns the source shard ids to shrink into the given shard id. + * @param shardId the id of the target shard to shrink to + * @param sourceIndexMetadata the source index metadata + * @param numTargetShards the total number of shards in the target index + * @return a set of shard IDs to shrink into the given shard ID. + */ + public static Set selectShrinkShards(int shardId, IndexMetaData sourceIndexMetadata, int numTargetShards) { + if (shardId >= numTargetShards) { + throw new IllegalArgumentException("the number of target shards (" + numTargetShards + ") must be greater than the shard id: " + + shardId); + } + int routingFactor = getRoutingFactor(sourceIndexMetadata, numTargetShards); + Set shards = new HashSet<>(routingFactor); + for (int i = shardId * routingFactor; i < routingFactor*shardId + routingFactor; i++) { + shards.add(new ShardId(sourceIndexMetadata.getIndex(), i)); + } + return shards; + } + + /** + * Returns the routing factor for and shrunk index with the given number of target shards. + * This factor is used in the hash function in + * {@link org.elasticsearch.cluster.routing.OperationRouting#generateShardId(IndexMetaData, String, String)} to guarantee consistent + * hashing / routing of documents even if the number of shards changed (ie. a shrunk index). + * + * @param sourceIndexMetadata the metadata of the source index + * @param targetNumberOfShards the total number of shards in the target index + * @return the routing factor for and shrunk index with the given number of target shards. + * @throws IllegalArgumentException if the number of source shards is greater than the number of target shards or if the source shards + * are not divisible by the number of target shards. + */ + public static int getRoutingFactor(IndexMetaData sourceIndexMetadata, int targetNumberOfShards) { + int sourceNumberOfShards = sourceIndexMetadata.getNumberOfShards(); + if (sourceNumberOfShards < targetNumberOfShards) { + throw new IllegalArgumentException("the number of target shards must be less that the number of source shards"); + } + int factor = sourceNumberOfShards / targetNumberOfShards; + if (factor * targetNumberOfShards != sourceNumberOfShards || factor <= 1) { + throw new IllegalArgumentException("the number of source shards [" + sourceNumberOfShards + "] must be a must be a multiple of [" + + targetNumberOfShards + "]"); + } + return factor; + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 2abbea04d51..df53395fe27 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.regex.Regex; @@ -57,7 +56,6 @@ public class IndexNameExpressionResolver extends AbstractComponent { private final List expressionResolvers; private final DateMathExpressionResolver dateMathExpressionResolver; - @Inject public IndexNameExpressionResolver(Settings settings) { super(settings); expressionResolvers = Arrays.asList( @@ -490,7 +488,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { return false; } - final static class Context { + static final class Context { private final ClusterState state; private final IndicesOptions options; @@ -553,7 +551,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { /** * Resolves alias/index name expressions with wildcards into the corresponding concrete indices/aliases */ - final static class WildcardExpressionResolver implements ExpressionResolver { + static final class WildcardExpressionResolver implements ExpressionResolver { @Override public List resolve(Context context, List expressions) { @@ -740,7 +738,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { } } - final static class DateMathExpressionResolver implements ExpressionResolver { + static final class DateMathExpressionResolver implements ExpressionResolver { private static final String EXPRESSION_LEFT_BOUND = "<"; private static final String EXPRESSION_RIGHT_BOUND = ">"; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java index 95accc4db80..0ec3c5f863b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateFilter.java @@ -32,7 +32,7 @@ public interface IndexTemplateFilter { */ boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template); - static class Compound implements IndexTemplateFilter { + class Compound implements IndexTemplateFilter { private IndexTemplateFilter[] filters; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index 981e6dc1914..78206cd1182 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -19,11 +19,8 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; import org.elasticsearch.action.TimestampParsingException; import org.elasticsearch.cluster.AbstractDiffable; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -34,11 +31,9 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import java.io.IOException; -import java.util.Arrays; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; @@ -82,8 +77,6 @@ public class MappingMetaData extends AbstractDiffable { public static class Timestamp { - private static final FormatDateTimeFormatter EPOCH_MILLIS_PARSER = Joda.forPattern("epoch_millis"); - public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter) throws TimestampParsingException { try { return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString)); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index a8a9e1e1251..6fdecf542f5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -1187,7 +1187,7 @@ public class MetaData implements Iterable, Diffable, Fr } } - private final static ToXContent.Params FORMAT_PARAMS; + private static final ToXContent.Params FORMAT_PARAMS; static { Map params = new HashMap<>(2); params.put("binary", "true"); @@ -1198,7 +1198,7 @@ public class MetaData implements Iterable, Diffable, Fr /** * State format for {@link MetaData} to write to and load from disk */ - public final static MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, GLOBAL_STATE_FILE_PREFIX) { + public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, GLOBAL_STATE_FILE_PREFIX) { @Override public void toXContent(XContentBuilder builder, MetaData state) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index c8c352fc46d..4f7aa68d7de 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -31,11 +31,15 @@ import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData.Custom; import org.elasticsearch.cluster.metadata.IndexMetaData.State; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; @@ -50,11 +54,10 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; @@ -65,7 +68,6 @@ import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndexCreationException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexNameException; -import org.elasticsearch.script.ScriptService; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -80,6 +82,8 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Predicate; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; @@ -93,13 +97,12 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_C */ public class MetaDataCreateIndexService extends AbstractComponent { - public final static int MAX_INDEX_NAME_BYTES = 255; + public static final int MAX_INDEX_NAME_BYTES = 255; private static final DefaultIndexTemplateFilter DEFAULT_INDEX_TEMPLATE_FILTER = new DefaultIndexTemplateFilter(); private final ClusterService clusterService; private final IndicesService indicesService; private final AllocationService allocationService; - private final Version version; private final AliasValidator aliasValidator; private final IndexTemplateFilter indexTemplateFilter; private final Environment env; @@ -110,13 +113,12 @@ public class MetaDataCreateIndexService extends AbstractComponent { @Inject public MetaDataCreateIndexService(Settings settings, ClusterService clusterService, IndicesService indicesService, AllocationService allocationService, - Version version, AliasValidator aliasValidator, + AliasValidator aliasValidator, Set indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings) { super(settings); this.clusterService = clusterService; this.indicesService = indicesService; this.allocationService = allocationService; - this.version = version; this.aliasValidator = aliasValidator; this.env = env; this.nodeServicesProvider = nodeServicesProvider; @@ -212,7 +214,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { List templateNames = new ArrayList<>(); for (Map.Entry entry : request.mappings().entrySet()) { - mappings.put(entry.getKey(), parseMapping(entry.getValue())); + mappings.put(entry.getKey(), MapperService.parseMapping(entry.getValue())); } for (Map.Entry entry : request.customs().entrySet()) { @@ -224,9 +226,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { templateNames.add(template.getName()); for (ObjectObjectCursor cursor : template.mappings()) { if (mappings.containsKey(cursor.key)) { - XContentHelper.mergeDefaults(mappings.get(cursor.key), parseMapping(cursor.value.string())); + XContentHelper.mergeDefaults(mappings.get(cursor.key), MapperService.parseMapping(cursor.value.string())); } else { - mappings.put(cursor.key, parseMapping(cursor.value.string())); + mappings.put(cursor.key, MapperService.parseMapping(cursor.value.string())); } } // handle custom @@ -264,7 +266,6 @@ public class MetaDataCreateIndexService extends AbstractComponent { templatesAliases.put(aliasMetaData.alias(), aliasMetaData); } } - Settings.Builder indexSettingsBuilder = Settings.builder(); // apply templates, here, in reverse order, since first ones are better matching for (int i = templates.size() - 1; i >= 0; i--) { @@ -284,7 +285,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) { DiscoveryNodes nodes = currentState.nodes(); - final Version createdVersion = Version.smallest(version, nodes.getSmallestNonClientNodeVersion()); + final Version createdVersion = Version.smallest(Version.CURRENT, nodes.getSmallestNonClientNodeVersion()); indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion); } @@ -293,36 +294,30 @@ public class MetaDataCreateIndexService extends AbstractComponent { } indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); + final Index shrinkFromIndex = request.shrinkFrom(); + int routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());; + if (shrinkFromIndex != null) { + prepareShrinkIndexSettings(currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex, + request.index()); + IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex); + routingNumShards = sourceMetaData.getRoutingNumShards(); + } Settings actualIndexSettings = indexSettingsBuilder.build(); - + IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index()) + .setRoutingNumShards(routingNumShards); // Set up everything, now locally create the index to see that things are ok, and apply - final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build(); + final IndexMetaData tmpImd = tmpImdBuilder.settings(actualIndexSettings).build(); // create the index here (on the master) to validate it can be created, as well as adding the mapping final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); createdIndex = indexService.index(); // now add the mappings MapperService mapperService = indexService.mapperService(); - // first, add the default mapping - if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) { - try { - mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(XContentFactory.jsonBuilder().map(mappings.get(MapperService.DEFAULT_MAPPING)).string()), MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes()); - } catch (Exception e) { - removalReason = "failed on parsing default mapping on index creation"; - throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, MapperService.DEFAULT_MAPPING, e.getMessage()); - } - } - for (Map.Entry> entry : mappings.entrySet()) { - if (entry.getKey().equals(MapperService.DEFAULT_MAPPING)) { - continue; - } - try { - // apply the default here, its the first time we parse it - mapperService.merge(entry.getKey(), new CompressedXContent(XContentFactory.jsonBuilder().map(entry.getValue()).string()), MapperService.MergeReason.MAPPING_UPDATE, request.updateAllTypes()); - } catch (Exception e) { - removalReason = "failed on parsing mappings on index creation"; - throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage()); - } + try { + mapperService.merge(mappings, request.updateAllTypes()); + } catch (MapperParsingException mpe) { + removalReason = "failed on parsing default mapping/mappings on index creation"; + throw mpe; } final QueryShardContext queryShardContext = indexService.newQueryShardContext(); @@ -344,7 +339,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { mappingsMetaData.put(mapper.type(), mappingMd); } - final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()).settings(actualIndexSettings); + final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()) + .settings(actualIndexSettings) + .setRoutingNumShards(routingNumShards); for (MappingMetaData mappingMd : mappingsMetaData.values()) { indexMetaDataBuilder.putMapping(mappingMd); } @@ -414,12 +411,6 @@ public class MetaDataCreateIndexService extends AbstractComponent { }); } - private Map parseMapping(String mappingSource) throws Exception { - try (XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource)) { - return parser.map(); - } - } - private List findTemplates(CreateIndexClusterStateUpdateRequest request, ClusterState state, IndexTemplateFilter indexTemplateFilter) throws IOException { List templates = new ArrayList<>(); for (ObjectCursor cursor : state.metaData().templates().values()) { @@ -481,4 +472,82 @@ public class MetaDataCreateIndexService extends AbstractComponent { return Regex.simpleMatch(template.template(), request.index()); } } + + /** + * Validates the settings and mappings for shrinking an index. + * @return the list of nodes at least one instance of the source index shards are allocated + */ + static List validateShrinkIndex(ClusterState state, String sourceIndex, + Set targetIndexMappingsTypes, String targetIndexName, + Settings targetIndexSettings) { + if (state.metaData().hasIndex(targetIndexName)) { + throw new IndexAlreadyExistsException(state.metaData().index(targetIndexName).getIndex()); + } + final IndexMetaData sourceMetaData = state.metaData().index(sourceIndex); + if (sourceMetaData == null) { + throw new IndexNotFoundException(sourceIndex); + } + // ensure index is read-only + if (state.blocks().indexBlocked(ClusterBlockLevel.WRITE, sourceIndex) == false) { + throw new IllegalStateException("index " + sourceIndex + " must be read-only to shrink index. use \"index.blocks.write=true\""); + } + + if (sourceMetaData.getNumberOfShards() == 1) { + throw new IllegalArgumentException("can't shrink an index with only one shard"); + } + + + if ((targetIndexMappingsTypes.size() > 1 || + (targetIndexMappingsTypes.isEmpty() || targetIndexMappingsTypes.contains(MapperService.DEFAULT_MAPPING)) == false)) { + throw new IllegalArgumentException("mappings are not allowed when shrinking indices" + + ", all mappings are copied from the source index"); + } + if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { + // this method applies all necessary checks ie. if the target shards are less than the source shards + // of if the source shards are divisible by the number of target shards + IndexMetaData.getRoutingFactor(sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); + } + + // now check that index is all on one node + final IndexRoutingTable table = state.routingTable().index(sourceIndex); + Map nodesToNumRouting = new HashMap<>(); + int numShards = sourceMetaData.getNumberOfShards(); + for (ShardRouting routing : table.shardsWithState(ShardRoutingState.STARTED)) { + nodesToNumRouting.computeIfAbsent(routing.currentNodeId(), (s) -> new AtomicInteger(0)).incrementAndGet(); + } + List nodesToAllocateOn = new ArrayList<>(); + for (Map.Entry entries : nodesToNumRouting.entrySet()) { + int numAllocations = entries.getValue().get(); + assert numAllocations <= numShards : "wait what? " + numAllocations + " is > than num shards " + numShards; + if (numAllocations == numShards) { + nodesToAllocateOn.add(entries.getKey()); + } + } + if (nodesToAllocateOn.isEmpty()) { + throw new IllegalStateException("index " + sourceIndex + + " must have all shards allocated on the same node to shrink index"); + } + return nodesToAllocateOn; + } + + static void prepareShrinkIndexSettings(ClusterState currentState, Set mappingKeys, Settings.Builder indexSettingsBuilder, Index shrinkFromIndex, String shrinkIntoName) { + final IndexMetaData sourceMetaData = currentState.metaData().index(shrinkFromIndex.getName()); + final List nodesToAllocateOn = validateShrinkIndex(currentState, shrinkFromIndex.getName(), + mappingKeys, shrinkIntoName, indexSettingsBuilder.build()); + final Predicate analysisSimilarityPredicate = (s) -> s.startsWith("index.similarity.") + || s.startsWith("index.analysis."); + indexSettingsBuilder + // we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away + // once we are allocated. + .put("index.routing.allocation.initial_recovery._id", + Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray())) + // we only try once and then give up with a shrink index + .put("index.allocation.max_retries", 1) + // now copy all similarity / analysis settings - this overrides all settings from the user unless they + // wanna add extra settings + .put(sourceMetaData.getSettings().filter(analysisSimilarityPredicate)) + .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName()) + .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID()); + } + } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java index 9f373fcda97..7e97d792430 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java @@ -19,10 +19,11 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexClusterStateUpdateRequest; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -32,16 +33,11 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.index.Index; import org.elasticsearch.snapshots.SnapshotsService; -import org.elasticsearch.threadpool.ThreadPool; +import java.util.Arrays; import java.util.Set; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; /** @@ -49,46 +45,37 @@ import java.util.stream.Collectors; */ public class MetaDataDeleteIndexService extends AbstractComponent { - private final ThreadPool threadPool; - private final ClusterService clusterService; private final AllocationService allocationService; - private final NodeIndexDeletedAction nodeIndexDeletedAction; - @Inject - public MetaDataDeleteIndexService(Settings settings, ThreadPool threadPool, ClusterService clusterService, AllocationService allocationService, - NodeIndexDeletedAction nodeIndexDeletedAction) { + public MetaDataDeleteIndexService(Settings settings, ClusterService clusterService, AllocationService allocationService) { super(settings); - this.threadPool = threadPool; this.clusterService = clusterService; this.allocationService = allocationService; - this.nodeIndexDeletedAction = nodeIndexDeletedAction; } - public void deleteIndices(final Request request, final Listener userListener) { - final DeleteIndexListener listener = new DeleteIndexListener(userListener); + public void deleteIndices(final DeleteIndexClusterStateUpdateRequest request, final ActionListener listener) { + if (request.indices() == null || request.indices().length == 0) { + throw new IllegalArgumentException("Index name is required"); + } - clusterService.submitStateUpdateTask("delete-index " + request.indices, new ClusterStateUpdateTask(Priority.URGENT) { + clusterService.submitStateUpdateTask("delete-index " + request.indices(), + new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override - public TimeValue timeout() { - return request.masterTimeout; - } - - @Override - public void onFailure(String source, Throwable t) { - listener.onFailure(t); + protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { + return new ClusterStateUpdateResponse(acknowledged); } @Override public ClusterState execute(final ClusterState currentState) { final MetaData meta = currentState.metaData(); - final Set metaDatas = request.indices.stream().map(i -> meta.getIndexSafe(i)).collect(Collectors.toSet()); + final Index[] indices = request.indices(); + final Set metaDatas = Arrays.asList(indices).stream().map(i -> meta.getIndexSafe(i)).collect(Collectors.toSet()); // Check if index deletion conflicts with any running snapshots SnapshotsService.checkIndexDeletion(currentState, metaDatas); - final Set indices = request.indices; RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); MetaData.Builder metaDataBuilder = MetaData.builder(meta); ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks()); @@ -108,40 +95,6 @@ public class MetaDataDeleteIndexService extends AbstractComponent { logger.trace("{} tombstones purged from the cluster state. Previous tombstone size: {}. Current tombstone size: {}.", graveyardBuilder.getNumPurged(), previousGraveyardSize, currentGraveyard.getTombstones().size()); - // wait for events from all nodes that it has been removed from their respective metadata... - int count = currentState.nodes().getSize(); - // add the notifications that the store was deleted from *data* nodes - count += currentState.nodes().getDataNodes().size(); - final AtomicInteger counter = new AtomicInteger(count * indices.size()); - - // this listener will be notified once we get back a notification based on the cluster state change below. - final NodeIndexDeletedAction.Listener nodeIndexDeleteListener = new NodeIndexDeletedAction.Listener() { - @Override - public void onNodeIndexDeleted(Index deleted, String nodeId) { - if (indices.contains(deleted)) { - if (counter.decrementAndGet() == 0) { - listener.onResponse(new Response(true)); - nodeIndexDeletedAction.remove(this); - } - } - } - - @Override - public void onNodeIndexStoreDeleted(Index deleted, String nodeId) { - if (indices.contains(deleted)) { - if (counter.decrementAndGet() == 0) { - listener.onResponse(new Response(true)); - nodeIndexDeletedAction.remove(this); - } - } - } - }; - nodeIndexDeletedAction.add(nodeIndexDeleteListener); - listener.future = threadPool.schedule(request.timeout, ThreadPool.Names.SAME, () -> { - listener.onResponse(new Response(false)); - nodeIndexDeletedAction.remove(nodeIndexDeleteListener); - }); - MetaData newMetaData = metaDataBuilder.build(); ClusterBlocks blocks = clusterBlocksBuilder.build(); RoutingAllocation.Result routingResult = allocationService.reroute( @@ -149,78 +102,6 @@ public class MetaDataDeleteIndexService extends AbstractComponent { "deleted indices [" + indices + "]"); return ClusterState.builder(currentState).routingResult(routingResult).metaData(newMetaData).blocks(blocks).build(); } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - } }); } - - class DeleteIndexListener implements Listener { - - private final AtomicBoolean notified = new AtomicBoolean(); - private final Listener listener; - volatile ScheduledFuture future; - - private DeleteIndexListener(Listener listener) { - this.listener = listener; - } - - @Override - public void onResponse(final Response response) { - if (notified.compareAndSet(false, true)) { - FutureUtils.cancel(future); - listener.onResponse(response); - } - } - - @Override - public void onFailure(Throwable t) { - if (notified.compareAndSet(false, true)) { - FutureUtils.cancel(future); - listener.onFailure(t); - } - } - } - - public interface Listener { - - void onResponse(Response response); - - void onFailure(Throwable t); - } - - public static class Request { - - final Set indices; - - TimeValue timeout = TimeValue.timeValueSeconds(10); - TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; - - public Request(Set indices) { - this.indices = indices; - } - - public Request timeout(TimeValue timeout) { - this.timeout = timeout; - return this; - } - - public Request masterTimeout(TimeValue masterTimeout) { - this.masterTimeout = masterTimeout; - return this; - } - } - - public static class Response { - private final boolean acknowledged; - - public Response(boolean acknowledged) { - this.acknowledged = acknowledged; - } - - public boolean acknowledged() { - return acknowledged; - } - } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index e6e7084e4d9..53a0ede809a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -172,7 +172,7 @@ public class MetaDataIndexStateService extends AbstractComponent { // We need to check that this index can be upgraded to the current version indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData); try { - indicesService.verifyIndexMetadata(nodeServiceProvider, indexMetaData); + indicesService.verifyIndexMetadata(nodeServiceProvider, indexMetaData, indexMetaData); } catch (Exception e) { throw new ElasticsearchException("Failed to verify index " + indexMetaData.getIndex(), e); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 0b3652b3672..101f59f3aec 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.ClusterState; @@ -26,17 +27,25 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.NodeServicesProvider; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.IndexTemplateAlreadyExistsException; import org.elasticsearch.indices.IndexTemplateMissingException; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexTemplateException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -51,14 +60,18 @@ public class MetaDataIndexTemplateService extends AbstractComponent { private final ClusterService clusterService; private final AliasValidator aliasValidator; + private final IndicesService indicesService; private final MetaDataCreateIndexService metaDataCreateIndexService; + private final NodeServicesProvider nodeServicesProvider; @Inject - public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService, MetaDataCreateIndexService metaDataCreateIndexService, AliasValidator aliasValidator) { + public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService, MetaDataCreateIndexService metaDataCreateIndexService, AliasValidator aliasValidator, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) { super(settings); this.clusterService = clusterService; this.aliasValidator = aliasValidator; + this.indicesService = indicesService; this.metaDataCreateIndexService = metaDataCreateIndexService; + this.nodeServicesProvider = nodeServicesProvider; } public void removeTemplates(final RemoveRequest request, final RemoveListener listener) { @@ -70,8 +83,8 @@ public class MetaDataIndexTemplateService extends AbstractComponent { } @Override - public void onFailure(String source, Throwable t) { - listener.onFailure(t); + public void onFailure(String source, Exception e) { + listener.onFailure(e); } @Override @@ -121,33 +134,12 @@ public class MetaDataIndexTemplateService extends AbstractComponent { try { validate(request); - } catch (Throwable e) { + } catch (Exception e) { listener.onFailure(e); return; } - IndexTemplateMetaData.Builder templateBuilder; - try { - templateBuilder = IndexTemplateMetaData.builder(request.name); - templateBuilder.order(request.order); - templateBuilder.template(request.template); - templateBuilder.settings(request.settings); - for (Map.Entry entry : request.mappings.entrySet()) { - templateBuilder.putMapping(entry.getKey(), entry.getValue()); - } - for (Alias alias : request.aliases) { - AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()) - .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); - templateBuilder.putAlias(aliasMetaData); - } - for (Map.Entry entry : request.customs.entrySet()) { - templateBuilder.putCustom(entry.getKey(), entry.getValue()); - } - } catch (Throwable e) { - listener.onFailure(e); - return; - } - final IndexTemplateMetaData template = templateBuilder.build(); + final IndexTemplateMetaData.Builder templateBuilder = IndexTemplateMetaData.builder(request.name); clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", new ClusterStateUpdateTask(Priority.URGENT) { @@ -158,15 +150,28 @@ public class MetaDataIndexTemplateService extends AbstractComponent { } @Override - public void onFailure(String source, Throwable t) { - listener.onFailure(t); + public void onFailure(String source, Exception e) { + listener.onFailure(e); } @Override - public ClusterState execute(ClusterState currentState) { + public ClusterState execute(ClusterState currentState) throws Exception { if (request.create && currentState.metaData().templates().containsKey(request.name)) { throw new IndexTemplateAlreadyExistsException(request.name); } + + validateAndAddTemplate(request, templateBuilder, indicesService, nodeServicesProvider); + + for (Alias alias : request.aliases) { + AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()) + .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); + templateBuilder.putAlias(aliasMetaData); + } + for (Map.Entry entry : request.customs.entrySet()) { + templateBuilder.putCustom(entry.getKey(), entry.getValue()); + } + IndexTemplateMetaData template = templateBuilder.build(); + MetaData.Builder builder = MetaData.builder(currentState.metaData()).put(template); return ClusterState.builder(currentState).metaData(builder).build(); @@ -174,11 +179,53 @@ public class MetaDataIndexTemplateService extends AbstractComponent { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - listener.onResponse(new PutResponse(true, template)); + listener.onResponse(new PutResponse(true, templateBuilder.build())); } }); } + private static void validateAndAddTemplate(final PutRequest request, IndexTemplateMetaData.Builder templateBuilder, IndicesService indicesService, + NodeServicesProvider nodeServicesProvider) throws Exception { + Index createdIndex = null; + final String temporaryIndexName = UUIDs.randomBase64UUID(); + try { + + //create index service for parsing and validating "mappings" + Settings dummySettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(request.settings) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .build(); + + final IndexMetaData tmpIndexMetadata = IndexMetaData.builder(temporaryIndexName).settings(dummySettings).build(); + IndexService dummyIndexService = indicesService.createIndex(nodeServicesProvider, tmpIndexMetadata, Collections.emptyList()); + createdIndex = dummyIndexService.index(); + + templateBuilder.order(request.order); + templateBuilder.template(request.template); + templateBuilder.settings(request.settings); + + Map> mappingsForValidation = new HashMap<>(); + for (Map.Entry entry : request.mappings.entrySet()) { + try { + templateBuilder.putMapping(entry.getKey(), entry.getValue()); + } catch (Exception e) { + throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage()); + } + mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(entry.getValue())); + } + + dummyIndexService.mapperService().merge(mappingsForValidation, false); + + } finally { + if (createdIndex != null) { + indicesService.removeIndex(createdIndex, " created for parsing template mapping"); + } + } + } + private void validate(PutRequest request) { List validationErrors = new ArrayList<>(); if (request.name.contains(" ")) { @@ -209,7 +256,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { validationErrors.add("template must not start with '_'"); } if (!Strings.validFileNameExcludingAstrix(request.template)) { - validationErrors.add("template must not container the following characters " + Strings.INVALID_FILENAME_CHARS); + validationErrors.add("template must not contain the following characters " + Strings.INVALID_FILENAME_CHARS); } List indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings); @@ -229,11 +276,11 @@ public class MetaDataIndexTemplateService extends AbstractComponent { } } - public static interface PutListener { + public interface PutListener { void onResponse(PutResponse response); - void onFailure(Throwable t); + void onFailure(Exception e); } public static class PutRequest { @@ -344,10 +391,10 @@ public class MetaDataIndexTemplateService extends AbstractComponent { } } - public static interface RemoveListener { + public interface RemoveListener { void onResponse(RemoveResponse response); - void onFailure(Throwable t); + void onFailure(Exception e); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index cdfb5487019..d1141aeb9f4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -34,8 +34,6 @@ import org.elasticsearch.indices.mapper.MapperRegistry; import java.util.Collections; -import static org.elasticsearch.common.util.set.Sets.newHashSet; - /** * This service is responsible for upgrading legacy index metadata to the current version *

    @@ -88,15 +86,13 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { } /** - * Elasticsearch 3.0 no longer supports indices with pre Lucene v5.0 (Elasticsearch v2.0.0.beta1) segments. All indices - * that were created before Elasticsearch v2.0.0.beta1 should be upgraded using upgrade API before they can - * be open by this version of elasticsearch. - */ + * Elasticsearch 5.0 no longer supports indices with pre Lucene v5.0 (Elasticsearch v2.0.0.beta1) segments. All indices + * that were created before Elasticsearch v2.0.0.beta1 should be reindexed in Elasticsearch 2.x + * before they can be opened by this version of elasticsearch. */ private void checkSupportedVersion(IndexMetaData indexMetaData) { if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData) == false) { - throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v2.0.0.beta1 and wasn't upgraded." - + " This index should be open using a version before " + Version.CURRENT.minimumCompatibilityVersion() - + " and upgraded using the upgrade API."); + throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created before v2.0.0.beta1." + + " It should be reindexed in Elasticsearch 2.x before upgrading to " + Version.CURRENT + "."); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 311ffae71fd..c44fee0fb2b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -42,7 +42,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; @@ -83,6 +82,11 @@ public class MetaDataMappingService extends AbstractComponent { this.index = index; this.indexUUID = indexUUID; } + + @Override + public String toString() { + return "[" + index + "][" + indexUUID + "]"; + } } class RefreshTaskExecutor implements ClusterStateTaskExecutor { @@ -188,8 +192,8 @@ public class MetaDataMappingService extends AbstractComponent { builder.putMapping(new MappingMetaData(mapper)); } } - } catch (Throwable t) { - logger.warn("[{}] failed to refresh-mapping in cluster state", t, index); + } catch (Exception e) { + logger.warn("[{}] failed to refresh-mapping in cluster state", e, index); } return dirty; } @@ -199,11 +203,11 @@ public class MetaDataMappingService extends AbstractComponent { */ public void refreshMapping(final String index, final String indexUUID) { final RefreshTask refreshTask = new RefreshTask(index, indexUUID); - clusterService.submitStateUpdateTask("refresh-mapping [" + index + "]", + clusterService.submitStateUpdateTask("refresh-mapping", refreshTask, ClusterStateTaskConfig.build(Priority.HIGH), refreshExecutor, - (source, t) -> logger.warn("failure during [{}]", t, source) + (source, e) -> logger.warn("failure during [{}]", e, source) ); } @@ -234,8 +238,8 @@ public class MetaDataMappingService extends AbstractComponent { } currentState = applyRequest(currentState, request); builder.success(request); - } catch (Throwable t) { - builder.failure(request, t); + } catch (Exception e) { + builder.failure(request, e); } } return builder.build(currentState); @@ -281,8 +285,11 @@ public class MetaDataMappingService extends AbstractComponent { // Also the order of the mappings may be backwards. if (newMapper.parentFieldMapper().active()) { for (ObjectCursor mapping : indexMetaData.getMappings().values()) { - if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { - throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); + String parentType = newMapper.parentFieldMapper().type(); + if (parentType.equals(mapping.value.type()) && + indexService.mapperService().getParentTypes().contains(parentType) == false) { + throw new IllegalArgumentException("can't add a _parent field that points to an " + + "already existing type, that isn't already a parent"); } } } @@ -297,7 +304,7 @@ public class MetaDataMappingService extends AbstractComponent { assert mappingType != null; if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && mappingType.charAt(0) == '_') { - throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); + throw new InvalidTypeNameException("Document mapping type name can't start with '_', found: [" + mappingType + "]"); } MetaData.Builder builder = MetaData.builder(metaData); for (Tuple toUpdate : updateList) { @@ -345,18 +352,23 @@ public class MetaDataMappingService extends AbstractComponent { return ClusterState.builder(currentState).metaData(builder).build(); } + + @Override + public String describeTasks(List tasks) { + return tasks.stream().map(PutMappingClusterStateUpdateRequest::type).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); + } } public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener listener) { - clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", + clusterService.submitStateUpdateTask("put-mapping", request, ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()), putMappingExecutor, new AckedClusterStateTaskListener() { @Override - public void onFailure(String source, Throwable t) { - listener.onFailure(t); + public void onFailure(String source, Exception e) { + listener.onFailure(e); } @Override @@ -365,7 +377,7 @@ public class MetaDataMappingService extends AbstractComponent { } @Override - public void onAllNodesAcked(@Nullable Throwable t) { + public void onAllNodesAcked(@Nullable Exception e) { listener.onResponse(new ClusterStateUpdateResponse(true)); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 3daf6c4dd3a..9db777a4794 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; @@ -43,7 +44,10 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.NodeServicesProvider; +import org.elasticsearch.indices.IndicesService; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -61,17 +65,20 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements private final AllocationService allocationService; - private final IndexNameExpressionResolver indexNameExpressionResolver; private final IndexScopedSettings indexScopedSettings; + private final IndicesService indicesService; + private final NodeServicesProvider nodeServiceProvider; @Inject - public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, IndexScopedSettings indexScopedSettings, IndexNameExpressionResolver indexNameExpressionResolver) { + public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterService, AllocationService allocationService, + IndexScopedSettings indexScopedSettings, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) { super(settings); this.clusterService = clusterService; - this.indexNameExpressionResolver = indexNameExpressionResolver; this.clusterService.add(this); this.allocationService = allocationService; this.indexScopedSettings = indexScopedSettings; + this.indicesService = indicesService; + this.nodeServiceProvider = nodeServicesProvider; } @Override @@ -139,7 +146,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements } @Override - public void onFailure(Throwable t) { + public void onFailure(Exception t) { for (Index index : indices) { logger.warn("{} fail to auto expand replicas to [{}]", index, fNumberOfReplicas); } @@ -266,11 +273,19 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements // now, reroute in case things change that require it (like number of replicas) RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update"); updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build(); - for (Index index : openIndices) { - indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings()); - } - for (Index index : closeIndices) { - indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings()); + try { + for (Index index : openIndices) { + final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index); + final IndexMetaData updatedMetaData = updatedState.metaData().getIndexSafe(index); + indicesService.verifyIndexMetadata(nodeServiceProvider, currentMetaData, updatedMetaData); + } + for (Index index : closeIndices) { + final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index); + final IndexMetaData updatedMetaData = updatedState.metaData().getIndexSafe(index); + indicesService.verifyIndexMetadata(nodeServiceProvider, currentMetaData, updatedMetaData); + } + } catch (IOException ex) { + throw ExceptionsHelper.convertToElastic(ex); } return updatedState; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java b/core/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java deleted file mode 100644 index 88c60f1f07e..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; - -import java.io.IOException; - -/** - * Snapshot ID - repository name + snapshot name - */ -public class SnapshotId implements Streamable { - - private String repository; - - private String snapshot; - - // Caching hash code - private int hashCode; - - private SnapshotId() { - } - - /** - * Constructs new snapshot id - * - * @param repository repository name - * @param snapshot snapshot name - */ - public SnapshotId(String repository, String snapshot) { - this.repository = repository; - this.snapshot = snapshot; - this.hashCode = computeHashCode(); - } - - /** - * Returns repository name - * - * @return repository name - */ - public String getRepository() { - return repository; - } - - /** - * Returns snapshot name - * - * @return snapshot name - */ - public String getSnapshot() { - return snapshot; - } - - @Override - public String toString() { - return repository + ":" + snapshot; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null) return false; - SnapshotId snapshotId = (SnapshotId) o; - return snapshot.equals(snapshotId.snapshot) && repository.equals(snapshotId.repository); - } - - @Override - public int hashCode() { - return hashCode; - } - - private int computeHashCode() { - int result = repository != null ? repository.hashCode() : 0; - result = 31 * result + snapshot.hashCode(); - return result; - } - - /** - * Reads snapshot id from stream input - * - * @param in stream input - * @return snapshot id - */ - public static SnapshotId readSnapshotId(StreamInput in) throws IOException { - SnapshotId snapshot = new SnapshotId(); - snapshot.readFrom(in); - return snapshot; - } - - /** - * {@inheritDoc} - */ - @Override - public void readFrom(StreamInput in) throws IOException { - repository = in.readString(); - snapshot = in.readString(); - hashCode = computeHashCode(); - } - - /** - * {@inheritDoc} - */ - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(repository); - out.writeString(snapshot); - } -} diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index ff37b6e6d4c..329405849e9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.node; import org.elasticsearch.Version; -import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -35,9 +35,11 @@ import java.io.IOException; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.function.Predicate; +import java.util.function.Supplier; import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream; @@ -64,7 +66,15 @@ public class DiscoveryNode implements Writeable, ToXContent { } public static boolean nodeRequiresLocalStorage(Settings settings) { - return Node.NODE_DATA_SETTING.get(settings) || Node.NODE_MASTER_SETTING.get(settings); + boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings); + if (localStorageEnable == false && + (Node.NODE_DATA_SETTING.get(settings) || + Node.NODE_MASTER_SETTING.get(settings)) + ) { + // TODO: make this a proper setting validation logic, requiring multi-settings validation + throw new IllegalArgumentException("storage can not be disabled for master and data nodes"); + } + return localStorageEnable; } public static boolean isMasterNode(Settings settings) { @@ -81,6 +91,7 @@ public class DiscoveryNode implements Writeable, ToXContent { private final String nodeName; private final String nodeId; + private final String ephemeralId; private final String hostName; private final String hostAddress; private final TransportAddress address; @@ -97,14 +108,15 @@ public class DiscoveryNode implements Writeable, ToXContent { * and updated. *

    * - * @param nodeId the nodes unique id. - * @param address the nodes transport address - * @param attributes node attributes - * @param roles node roles - * @param version the version of the node. + * @param id the nodes unique (persistent) node id. This constructor will auto generate a random ephemeral id. + * @param address the nodes transport address + * @param attributes node attributes + * @param roles node roles + * @param version the version of the node */ - public DiscoveryNode(String nodeId, TransportAddress address, Map attributes, Set roles, Version version) { - this("", nodeId, address.getHost(), address.getAddress(), address, attributes, roles, version); + public DiscoveryNode(String id, TransportAddress address, Map attributes, Set roles, + Version version) { + this("", id, address, attributes, roles, version); } /** @@ -116,16 +128,16 @@ public class DiscoveryNode implements Writeable, ToXContent { * and updated. *

    * - * @param nodeName the nodes name - * @param nodeId the nodes unique id. - * @param address the nodes transport address - * @param attributes node attributes - * @param roles node roles - * @param version the version of the node. + * @param nodeName the nodes name + * @param nodeId the nodes unique persistent id. An ephemeral id will be auto generated. + * @param address the nodes transport address + * @param attributes node attributes + * @param roles node roles + * @param version the version of the node */ - public DiscoveryNode(String nodeName, String nodeId, TransportAddress address, Map attributes, - Set roles, Version version) { - this(nodeName, nodeId, address.getHost(), address.getAddress(), address, attributes, roles, version); + public DiscoveryNode(String nodeName, String nodeId, TransportAddress address, + Map attributes, Set roles, Version version) { + this(nodeName, nodeId, UUIDs.randomBase64UUID(), address.getHost(), address.getAddress(), address, attributes, roles, version); } /** @@ -137,23 +149,24 @@ public class DiscoveryNode implements Writeable, ToXContent { * and updated. *

    * - * @param nodeName the nodes name - * @param nodeId the nodes unique id. - * @param hostName the nodes hostname - * @param hostAddress the nodes host address - * @param address the nodes transport address - * @param attributes node attributes - * @param roles node roles - * @param version the version of the node. + * @param nodeName the nodes name + * @param nodeId the nodes unique persistent id + * @param ephemeralId the nodes unique ephemeral id + * @param hostAddress the nodes host address + * @param address the nodes transport address + * @param attributes node attributes + * @param roles node roles + * @param version the version of the node */ - public DiscoveryNode(String nodeName, String nodeId, String hostName, String hostAddress, TransportAddress address, - Map attributes, Set roles, Version version) { + public DiscoveryNode(String nodeName, String nodeId, String ephemeralId, String hostName, String hostAddress, + TransportAddress address, Map attributes, Set roles, Version version) { if (nodeName != null) { this.nodeName = nodeName.intern(); } else { this.nodeName = ""; } this.nodeId = nodeId.intern(); + this.ephemeralId = ephemeralId.intern(); this.hostName = hostName.intern(); this.hostAddress = hostAddress.intern(); this.address = address; @@ -176,6 +189,24 @@ public class DiscoveryNode implements Writeable, ToXContent { this.roles = Collections.unmodifiableSet(rolesSet); } + /** Creates a DiscoveryNode representing the local node. */ + public static DiscoveryNode createLocal(Settings settings, TransportAddress publishAddress, String nodeIdSupplier) { + Map attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(settings).getAsMap()); + Set roles = new HashSet<>(); + if (Node.NODE_INGEST_SETTING.get(settings)) { + roles.add(DiscoveryNode.Role.INGEST); + } + if (Node.NODE_MASTER_SETTING.get(settings)) { + roles.add(DiscoveryNode.Role.MASTER); + } + if (Node.NODE_DATA_SETTING.get(settings)) { + roles.add(DiscoveryNode.Role.DATA); + } + + return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeIdSupplier, publishAddress, + attributes, roles, Version.CURRENT); + } + /** * Creates a new {@link DiscoveryNode} by reading from the stream provided as argument * @param in the stream @@ -184,6 +215,7 @@ public class DiscoveryNode implements Writeable, ToXContent { public DiscoveryNode(StreamInput in) throws IOException { this.nodeName = in.readString().intern(); this.nodeId = in.readString().intern(); + this.ephemeralId = in.readString().intern(); this.hostName = in.readString().intern(); this.hostAddress = in.readString().intern(); this.address = TransportAddressSerializers.addressFromStream(in); @@ -208,6 +240,7 @@ public class DiscoveryNode implements Writeable, ToXContent { public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeName); out.writeString(nodeId); + out.writeString(ephemeralId); out.writeString(hostName); out.writeString(hostAddress); addressToStream(out, address); @@ -237,6 +270,17 @@ public class DiscoveryNode implements Writeable, ToXContent { return nodeId; } + /** + * The unique ephemeral id of the node. Ephemeral ids are meant to be attached the the life span + * of a node process. When ever a node is restarted, it's ephemeral id is required to change (while it's {@link #getId()} + * will be read from the data folder and will remain the same across restarts). Since all node attributes and addresses + * are maintained during the life span of a node process, we can (and are) using the ephemeralId in + * {@link DiscoveryNode#equals(Object)}. + */ + public String getEphemeralId() { + return ephemeralId; + } + /** * The name of the node. */ @@ -293,18 +337,25 @@ public class DiscoveryNode implements Writeable, ToXContent { } @Override - public boolean equals(Object obj) { - if (!(obj instanceof DiscoveryNode)) { + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { return false; } - DiscoveryNode other = (DiscoveryNode) obj; - return this.nodeId.equals(other.nodeId); + DiscoveryNode that = (DiscoveryNode) o; + + return ephemeralId.equals(that.ephemeralId); } @Override public int hashCode() { - return nodeId.hashCode(); + // we only need to hash the id because it's highly unlikely that two nodes + // in our system will have the same id but be different + // This is done so that this class can be used efficiently as a key in a map + return ephemeralId.hashCode(); } @Override @@ -313,15 +364,10 @@ public class DiscoveryNode implements Writeable, ToXContent { if (nodeName.length() > 0) { sb.append('{').append(nodeName).append('}'); } - if (nodeId != null) { - sb.append('{').append(nodeId).append('}'); - } - if (Strings.hasLength(hostName)) { - sb.append('{').append(hostName).append('}'); - } - if (address != null) { - sb.append('{').append(address).append('}'); - } + sb.append('{').append(nodeId).append('}'); + sb.append('{').append(ephemeralId).append('}'); + sb.append('{').append(hostName).append('}'); + sb.append('{').append(address).append('}'); if (!attributes.isEmpty()) { sb.append(attributes); } @@ -332,6 +378,7 @@ public class DiscoveryNode implements Writeable, ToXContent { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(getId()); builder.field("name", getName()); + builder.field("ephemeral_id", getEphemeralId()); builder.field("transport_address", getAddress().toString()); builder.startObject("attributes"); diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java deleted file mode 100644 index 177c67f2986..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.node; - -import org.elasticsearch.Version; -import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.node.Node; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArrayList; - -/** - */ -public class DiscoveryNodeService extends AbstractComponent { - - public static final Setting NODE_ID_SEED_SETTING = - // don't use node.id.seed so it won't be seen as an attribute - Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, Property.NodeScope); - private final List customAttributesProviders = new CopyOnWriteArrayList<>(); - private final Version version; - - @Inject - public DiscoveryNodeService(Settings settings, Version version) { - super(settings); - this.version = version; - } - - public static String generateNodeId(Settings settings) { - Random random = Randomness.get(settings, NODE_ID_SEED_SETTING); - return UUIDs.randomBase64UUID(random); - } - - public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) { - customAttributesProviders.add(customAttributesProvider); - return this; - } - - public DiscoveryNode buildLocalNode(TransportAddress publishAddress) { - final String nodeId = generateNodeId(settings); - Map attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(this.settings).getAsMap()); - Set roles = new HashSet<>(); - if (Node.NODE_INGEST_SETTING.get(settings)) { - roles.add(DiscoveryNode.Role.INGEST); - } - if (Node.NODE_MASTER_SETTING.get(settings)) { - roles.add(DiscoveryNode.Role.MASTER); - } - if (Node.NODE_DATA_SETTING.get(settings)) { - roles.add(DiscoveryNode.Role.DATA); - } - - for (CustomAttributesProvider provider : customAttributesProviders) { - try { - Map customAttributes = provider.buildAttributes(); - if (customAttributes != null) { - for (Map.Entry entry : customAttributes.entrySet()) { - if (!attributes.containsKey(entry.getKey())) { - attributes.put(entry.getKey(), entry.getValue()); - } - } - } - } catch (Exception e) { - logger.warn("failed to build custom attributes from provider [{}]", e, provider); - } - } - return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes, - roles, version); - } - - public interface CustomAttributesProvider { - - Map buildAttributes(); - } -} diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index ed4cfbd9134..68dedc433da 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -153,7 +153,7 @@ public class DiscoveryNodes extends AbstractDiffable implements } /** - * Determine if a given node exists + * Determine if a given node id exists * * @param nodeId id of the node which existence should be verified * @return true if the node exists. Otherwise false @@ -162,6 +162,18 @@ public class DiscoveryNodes extends AbstractDiffable implements return nodes.containsKey(nodeId); } + /** + * Determine if a given node exists + * + * @param node of the node which existence should be verified + * @return true if the node exists. Otherwise false + */ + public boolean nodeExists(DiscoveryNode node) { + DiscoveryNode existing = nodes.get(node.getId()); + return existing != null && existing.equals(node); + } + + /** * Get the id of the master node * @@ -225,7 +237,7 @@ public class DiscoveryNodes extends AbstractDiffable implements * @return the oldest version in the cluster */ public Version getSmallestVersion() { - return minNodeVersion; + return minNodeVersion; } /** @@ -245,9 +257,10 @@ public class DiscoveryNodes extends AbstractDiffable implements * @throws IllegalArgumentException if more than one node matches the request or no nodes have been resolved */ public DiscoveryNode resolveNode(String node) { - String[] resolvedNodeIds = resolveNodesIds(node); + String[] resolvedNodeIds = resolveNodes(node); if (resolvedNodeIds.length > 1) { - throw new IllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node"); + throw new IllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + + "] nodes, where expected to be resolved to a single node"); } if (resolvedNodeIds.length == 0) { throw new IllegalArgumentException("failed to resolve [" + node + "], no matching nodes"); @@ -255,17 +268,25 @@ public class DiscoveryNodes extends AbstractDiffable implements return nodes.get(resolvedNodeIds[0]); } - public String[] resolveNodesIds(String... nodesIds) { - if (isAllNodes(nodesIds)) { + /** + * resolves a set of node "descriptions" to concrete and existing node ids. "descriptions" can be (resolved in this order): + * - "_local" or "_master" for the relevant nodes + * - a node id + * - a wild card pattern that will be matched against node names + * - a "attr:value" pattern, where attr can be a node role (master, data, ingest etc.) in which case the value can be true of false + * or a generic node attribute name in which case value will be treated as a wildcard and matched against the node attribute values. + */ + public String[] resolveNodes(String... nodes) { + if (isAllNodes(nodes)) { int index = 0; - nodesIds = new String[nodes.size()]; + nodes = new String[this.nodes.size()]; for (DiscoveryNode node : this) { - nodesIds[index++] = node.getId(); + nodes[index++] = node.getId(); } - return nodesIds; + return nodes; } else { - ObjectHashSet resolvedNodesIds = new ObjectHashSet<>(nodesIds.length); - for (String nodeId : nodesIds) { + ObjectHashSet resolvedNodesIds = new ObjectHashSet<>(nodes.length); + for (String nodeId : nodes) { if (nodeId.equals("_local")) { String localNodeId = getLocalNodeId(); if (localNodeId != null) { @@ -353,12 +374,12 @@ public class DiscoveryNodes extends AbstractDiffable implements List removed = new ArrayList<>(); List added = new ArrayList<>(); for (DiscoveryNode node : other) { - if (!this.nodeExists(node.getId())) { + if (!this.nodeExists(node)) { removed.add(node); } } for (DiscoveryNode node : this) { - if (!other.nodeExists(node.getId())) { + if (!other.nodeExists(node)) { added.add(node); } } @@ -370,7 +391,8 @@ public class DiscoveryNodes extends AbstractDiffable implements newMasterNode = getMasterNode(); } } - return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed), Collections.unmodifiableList(added)); + return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed), + Collections.unmodifiableList(added)); } @Override @@ -412,7 +434,8 @@ public class DiscoveryNodes extends AbstractDiffable implements this(null, null, localNodeId, removed, added); } - public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, List removed, List added) { + public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, + List removed, List added) { this.previousMasterNode = previousMasterNode; this.newMasterNode = newMasterNode; this.localNodeId = localNodeId; @@ -530,7 +553,10 @@ public class DiscoveryNodes extends AbstractDiffable implements // reuse the same instance of our address and local node id for faster equality node = localNode; } - builder.put(node); + // some one already built this and validated it's OK, skip the n2 scans + assert builder.validatePut(node) == null : "building disco nodes from network doesn't pass preflight: " + + builder.validatePut(node); + builder.putUnsafe(node); } return builder.build(); } @@ -564,16 +590,36 @@ public class DiscoveryNodes extends AbstractDiffable implements this.nodes = ImmutableOpenMap.builder(nodes.getNodes()); } + /** + * adds a disco node to the builder. Will throw an {@link IllegalArgumentException} if + * the supplied node doesn't pass the pre-flight checks performed by {@link #validatePut(DiscoveryNode)} + */ public Builder put(DiscoveryNode node) { - nodes.put(node.getId(), node); + final String preflight = validatePut(node); + if (preflight != null) { + throw new IllegalArgumentException(preflight); + } + putUnsafe(node); return this; } + private void putUnsafe(DiscoveryNode node) { + nodes.put(node.getId(), node); + } + public Builder remove(String nodeId) { nodes.remove(nodeId); return this; } + public Builder remove(DiscoveryNode node) { + if (node.equals(nodes.get(node.getId()))) { + nodes.remove(node.getId()); + } + return this; + } + + public Builder masterNodeId(String masterNodeId) { this.masterNodeId = masterNodeId; return this; @@ -584,6 +630,30 @@ public class DiscoveryNodes extends AbstractDiffable implements return this; } + /** + * Checks that a node can be safely added to this node collection. + * + * @return null if all is OK or an error message explaining why a node can not be added. + * + * Note: if this method returns a non-null value, calling {@link #put(DiscoveryNode)} will fail with an + * exception + */ + private String validatePut(DiscoveryNode node) { + for (ObjectCursor cursor : nodes.values()) { + final DiscoveryNode existingNode = cursor.value; + if (node.getAddress().equals(existingNode.getAddress()) && + node.getId().equals(existingNode.getId()) == false) { + return "can't add node " + node + ", found existing node " + existingNode + " with same address"; + } + if (node.getId().equals(existingNode.getId()) && + node.getAddress().equals(existingNode.getAddress()) == false) { + return "can't add node " + node + ", found existing node " + existingNode + + " with the same id, but a different address"; + } + } + return null; + } + public DiscoveryNodes build() { ImmutableOpenMap.Builder dataNodesBuilder = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder masterNodesBuilder = ImmutableOpenMap.builder(); @@ -614,5 +684,9 @@ public class DiscoveryNodes extends AbstractDiffable implements public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { return PROTO.readFrom(in, localNode); } + + public boolean isLocalNodeElectedMaster() { + return masterNodeId != null && masterNodeId.equals(localNodeId); + } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java b/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java index 100752289f7..cb0fb487693 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -43,7 +44,7 @@ import java.util.Objects; * relocationId. Once relocation is done, the new allocation id is set to the relocationId. This is similar * behavior to how ShardRouting#currentNodeId is used. */ -public class AllocationId implements ToXContent { +public class AllocationId implements ToXContent, Writeable { private static final String ID_KEY = "id"; private static final String RELOCATION_ID_KEY = "relocation_id"; @@ -81,6 +82,7 @@ public class AllocationId implements ToXContent { this.relocationId = in.readOptionalString(); } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(this.id); out.writeOptionalString(this.relocationId); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java new file mode 100644 index 00000000000..29d74dd8933 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java @@ -0,0 +1,225 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +/** + * The {@link DelayedAllocationService} listens to cluster state changes and checks + * if there are unassigned shards with delayed allocation (unassigned shards that have + * the delay marker). These are shards that have become unassigned due to a node leaving + * and which were assigned the delay marker based on the index delay setting + * {@link UnassignedInfo#INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING} + * (see {@link AllocationService#deassociateDeadNodes(RoutingAllocation)}). + * This class is responsible for choosing the next (closest) delay expiration of a + * delayed shard to schedule a reroute to remove the delay marker. + * The actual removal of the delay marker happens in + * {@link AllocationService#removeDelayMarkers(RoutingAllocation)}, triggering yet + * another cluster change event. + */ +public class DelayedAllocationService extends AbstractLifecycleComponent implements ClusterStateListener { + + static final String CLUSTER_UPDATE_TASK_SOURCE = "delayed_allocation_reroute"; + + final ThreadPool threadPool; + private final ClusterService clusterService; + private final AllocationService allocationService; + + AtomicReference delayedRerouteTask = new AtomicReference<>(); // package private to access from tests + + /** + * represents a delayed scheduling of the reroute action that can be cancelled. + */ + class DelayedRerouteTask extends ClusterStateUpdateTask { + final TimeValue nextDelay; // delay until submitting the reroute command + final long baseTimestampNanos; // timestamp (in nanos) upon which delay was calculated + volatile ScheduledFuture future; + final AtomicBoolean cancelScheduling = new AtomicBoolean(); + + DelayedRerouteTask(TimeValue nextDelay, long baseTimestampNanos) { + this.nextDelay = nextDelay; + this.baseTimestampNanos = baseTimestampNanos; + } + + public long scheduledTimeToRunInNanos() { + return baseTimestampNanos + nextDelay.nanos(); + } + + public void cancelScheduling() { + cancelScheduling.set(true); + FutureUtils.cancel(future); + removeIfSameTask(this); + } + + public void schedule() { + future = threadPool.schedule(nextDelay, ThreadPool.Names.SAME, new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + if (cancelScheduling.get()) { + return; + } + clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE, DelayedRerouteTask.this); + } + + @Override + public void onFailure(Exception e) { + logger.warn("failed to submit schedule/execute reroute post unassigned shard", e); + removeIfSameTask(DelayedRerouteTask.this); + } + }); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + removeIfSameTask(this); + RoutingAllocation.Result routingResult = allocationService.reroute(currentState, "assign delayed unassigned shards"); + if (routingResult.changed()) { + return ClusterState.builder(currentState).routingResult(routingResult).build(); + } else { + return currentState; + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (oldState == newState) { + // no state changed, check when we should remove the delay flag from the shards the next time. + // if cluster state changed, we can leave the scheduling of the next delay up to the clusterChangedEvent + // this should not be needed, but we want to be extra safe here + scheduleIfNeeded(currentNanoTime(), newState); + } + } + + @Override + public void onFailure(String source, Exception e) { + removeIfSameTask(this); + logger.warn("failed to schedule/execute reroute post unassigned shard", e); + } + } + + @Inject + public DelayedAllocationService(Settings settings, ThreadPool threadPool, ClusterService clusterService, + AllocationService allocationService) { + super(settings); + this.threadPool = threadPool; + this.clusterService = clusterService; + this.allocationService = allocationService; + clusterService.addFirst(this); + } + + @Override + protected void doStart() { + } + + @Override + protected void doStop() { + } + + @Override + protected void doClose() { + clusterService.remove(this); + removeTaskAndCancel(); + } + + /** override this to control time based decisions during delayed allocation */ + protected long currentNanoTime() { + return System.nanoTime(); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + long currentNanoTime = currentNanoTime(); + if (event.state().nodes().isLocalNodeElectedMaster()) { + scheduleIfNeeded(currentNanoTime, event.state()); + } + } + + private void removeTaskAndCancel() { + DelayedRerouteTask existingTask = delayedRerouteTask.getAndSet(null); + if (existingTask != null) { + logger.trace("cancelling existing delayed reroute task"); + existingTask.cancelScheduling(); + } + } + + private void removeIfSameTask(DelayedRerouteTask expectedTask) { + delayedRerouteTask.compareAndSet(expectedTask, null); + } + + /** + * Figure out if an existing scheduled reroute is good enough or whether we need to cancel and reschedule. + */ + private void scheduleIfNeeded(long currentNanoTime, ClusterState state) { + assertClusterStateThread(); + long nextDelayNanos = UnassignedInfo.findNextDelayedAllocation(currentNanoTime, state); + if (nextDelayNanos < 0) { + logger.trace("no need to schedule reroute - no delayed unassigned shards"); + removeTaskAndCancel(); + } else { + TimeValue nextDelay = TimeValue.timeValueNanos(nextDelayNanos); + final boolean earlierRerouteNeeded; + DelayedRerouteTask existingTask = delayedRerouteTask.get(); + DelayedRerouteTask newTask = new DelayedRerouteTask(nextDelay, currentNanoTime); + if (existingTask == null) { + earlierRerouteNeeded = true; + } else if (newTask.scheduledTimeToRunInNanos() < existingTask.scheduledTimeToRunInNanos()) { + // we need an earlier delayed reroute + logger.trace("cancelling existing delayed reroute task as delayed reroute has to happen [{}] earlier", + TimeValue.timeValueNanos(existingTask.scheduledTimeToRunInNanos() - newTask.scheduledTimeToRunInNanos())); + existingTask.cancelScheduling(); + earlierRerouteNeeded = true; + } else { + earlierRerouteNeeded = false; + } + + if (earlierRerouteNeeded) { + logger.info("scheduling reroute for delayed shards in [{}] ({} delayed shards)", nextDelay, + UnassignedInfo.getNumberOfDelayedUnassigned(state)); + DelayedRerouteTask currentTask = delayedRerouteTask.getAndSet(newTask); + assert existingTask == currentTask || currentTask == null; + newTask.schedule(); + } else { + logger.trace("no need to reschedule delayed reroute - currently scheduled delayed reroute in [{}] is enough", nextDelay); + } + } + } + + // protected so that it can be overridden (and disabled) by unit tests + protected void assertClusterStateThread() { + ClusterService.assertClusterStateThread(); + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java b/core/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java index af1d4195075..67e7b8b016b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IllegalShardRoutingStateException.java @@ -42,7 +42,7 @@ public class IllegalShardRoutingStateException extends RoutingException { public IllegalShardRoutingStateException(StreamInput in) throws IOException { super(in); - shard = ShardRouting.readShardRoutingEntry(in); + shard = new ShardRouting(in); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 6111f317c76..0fe49369177 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -76,7 +76,6 @@ public class IndexRoutingTable extends AbstractDiffable imple List allActiveShards = new ArrayList<>(); for (IntObjectCursor cursor : shards) { for (ShardRouting shardRouting : cursor.value) { - shardRouting.freeze(); if (shardRouting.active()) { allActiveShards.add(shardRouting); } @@ -377,14 +376,20 @@ public class IndexRoutingTable extends AbstractDiffable imple * Initializes a new empty index, to be restored from a snapshot */ public Builder initializeAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards) { - return initializeAsRestore(indexMetaData, restoreSource, ignoreShards, true, new UnassignedInfo(UnassignedInfo.Reason.NEW_INDEX_RESTORED, "restore_source[" + restoreSource.snapshotId().getRepository() + "/" + restoreSource.snapshotId().getSnapshot() + "]")); + final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NEW_INDEX_RESTORED, + "restore_source[" + restoreSource.snapshot().getRepository() + "/" + + restoreSource.snapshot().getSnapshotId().getName() + "]"); + return initializeAsRestore(indexMetaData, restoreSource, ignoreShards, true, unassignedInfo); } /** * Initializes an existing index, to be restored from a snapshot */ public Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) { - return initializeAsRestore(indexMetaData, restoreSource, null, false, new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, "restore_source[" + restoreSource.snapshotId().getRepository() + "/" + restoreSource.snapshotId().getSnapshot() + "]")); + final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, + "restore_source[" + restoreSource.snapshot().getRepository() + "/" + + restoreSource.snapshot().getSnapshotId().getName() + "]"); + return initializeAsRestore(indexMetaData, restoreSource, null, false, unassignedInfo); } /** @@ -395,17 +400,18 @@ public class IndexRoutingTable extends AbstractDiffable imple if (!shards.isEmpty()) { throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); } - for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) { - IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId)); + for (int shardNumber = 0; shardNumber < indexMetaData.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) { - if (asNew && ignoreShards.contains(shardId)) { + if (asNew && ignoreShards.contains(shardNumber)) { // This shards wasn't completely snapshotted - restore it as new shard - indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo)); + indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, null, i == 0, unassignedInfo)); } else { - indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, i == 0 ? restoreSource : null, i == 0, unassignedInfo)); + indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, i == 0 ? restoreSource : null, i == 0, unassignedInfo)); } } - shards.put(shardId, indexShardRoutingBuilder.build()); + shards.put(shardNumber, indexShardRoutingBuilder.build()); } return this; } @@ -418,22 +424,24 @@ public class IndexRoutingTable extends AbstractDiffable imple if (!shards.isEmpty()) { throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); } - for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) { - IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId)); + for (int shardNumber = 0; shardNumber < indexMetaData.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) { - indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo)); + indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, null, i == 0, unassignedInfo)); } - shards.put(shardId, indexShardRoutingBuilder.build()); + shards.put(shardNumber, indexShardRoutingBuilder.build()); } return this; } public Builder addReplica() { for (IntCursor cursor : shards.keys()) { - int shardId = cursor.value; + int shardNumber = cursor.value; + ShardId shardId = new ShardId(index, shardNumber); // version 0, will get updated when reroute will happen - ShardRouting shard = ShardRouting.newUnassigned(index, shardId, null, false, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null)); - shards.put(shardId, + ShardRouting shard = ShardRouting.newUnassigned(shardId, null, false, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null)); + shards.put(shardNumber, new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build() ); } @@ -451,7 +459,7 @@ public class IndexRoutingTable extends AbstractDiffable imple // re-add all the current ones IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(indexShard.shardId()); for (ShardRouting shardRouting : indexShard) { - builder.addShard(new ShardRouting(shardRouting)); + builder.addShard(shardRouting); } // first check if there is one that is not assigned to a node, and remove it boolean removed = false; @@ -484,12 +492,12 @@ public class IndexRoutingTable extends AbstractDiffable imple * Adds a new shard routing (makes a copy of it), with reference data used from the index shard routing table * if it needs to be created. */ - public Builder addShard(IndexShardRoutingTable refData, ShardRouting shard) { + public Builder addShard(ShardRouting shard) { IndexShardRoutingTable indexShard = shards.get(shard.id()); if (indexShard == null) { - indexShard = new IndexShardRoutingTable.Builder(refData.shardId()).addShard(new ShardRouting(shard)).build(); + indexShard = new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard).build(); } else { - indexShard = new IndexShardRoutingTable.Builder(indexShard).addShard(new ShardRouting(shard)).build(); + indexShard = new IndexShardRoutingTable.Builder(indexShard).addShard(shard).build(); } shards.put(indexShard.shardId().id(), indexShard); return this; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index d504a74dc3c..cddf6f98a54 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -36,6 +36,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; @@ -59,7 +60,7 @@ public class IndexShardRoutingTable implements Iterable { final List shards; final List activeShards; final List assignedShards; - final static List NO_SHARDS = Collections.emptyList(); + static final List NO_SHARDS = Collections.emptyList(); final boolean allShardsStarted; private volatile Map activeShardsByAttributes = emptyMap(); @@ -331,15 +332,13 @@ public class IndexShardRoutingTable implements Iterable { public ShardIterator onlyNodeActiveInitializingShardsIt(String nodeId) { ArrayList ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size()); - // fill it in a randomized fashion - for (int i = 0; i < activeShards.size(); i++) { - ShardRouting shardRouting = activeShards.get(i); + int seed = shuffler.nextSeed(); + for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) { if (nodeId.equals(shardRouting.currentNodeId())) { ordered.add(shardRouting); } } - for (int i = 0; i < allInitializingShards.size(); i++) { - ShardRouting shardRouting = allInitializingShards.get(i); + for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) { if (nodeId.equals(shardRouting.currentNodeId())) { ordered.add(shardRouting); } @@ -347,45 +346,56 @@ public class IndexShardRoutingTable implements Iterable { return new PlainShardIterator(shardId, ordered); } + public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttributes, DiscoveryNodes discoveryNodes) { + return onlyNodeSelectorActiveInitializingShardsIt(new String[] {nodeAttributes}, discoveryNodes); + } + /** * Returns shards based on nodeAttributes given such as node name , node attribute, node IP * Supports node specifications in cluster API */ - public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String nodeAttribute, DiscoveryNodes discoveryNodes) { + public ShardIterator onlyNodeSelectorActiveInitializingShardsIt(String[] nodeAttributes, DiscoveryNodes discoveryNodes) { ArrayList ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size()); - Set selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodesIds(nodeAttribute)); - - for (ShardRouting shardRouting : activeShards) { + Set selectedNodes = Sets.newHashSet(discoveryNodes.resolveNodes(nodeAttributes)); + int seed = shuffler.nextSeed(); + for (ShardRouting shardRouting : shuffler.shuffle(activeShards, seed)) { if (selectedNodes.contains(shardRouting.currentNodeId())) { ordered.add(shardRouting); } } - for (ShardRouting shardRouting : allInitializingShards) { + for (ShardRouting shardRouting : shuffler.shuffle(allInitializingShards, seed)) { if (selectedNodes.contains(shardRouting.currentNodeId())) { ordered.add(shardRouting); } } if (ordered.isEmpty()) { - throw new IllegalArgumentException("No data node with criteria [" + nodeAttribute + "] found"); + final String message = String.format( + Locale.ROOT, + "no data nodes with %s [%s] found for shard: %s", + nodeAttributes.length == 1 ? "criteria" : "criterion", + String.join(",", nodeAttributes), + shardId()); + throw new IllegalArgumentException(message); } return new PlainShardIterator(shardId, ordered); } - public ShardIterator preferNodeActiveInitializingShardsIt(String nodeId) { - ArrayList ordered = new ArrayList<>(activeShards.size() + allInitializingShards.size()); + public ShardIterator preferNodeActiveInitializingShardsIt(Set nodeIds) { + ArrayList preferred = new ArrayList<>(activeShards.size() + allInitializingShards.size()); + ArrayList notPreferred = new ArrayList<>(activeShards.size() + allInitializingShards.size()); // fill it in a randomized fashion for (ShardRouting shardRouting : shuffler.shuffle(activeShards)) { - ordered.add(shardRouting); - if (nodeId.equals(shardRouting.currentNodeId())) { - // switch, its the matching node id - ordered.set(ordered.size() - 1, ordered.get(0)); - ordered.set(0, shardRouting); + if (nodeIds.contains(shardRouting.currentNodeId())) { + preferred.add(shardRouting); + } else { + notPreferred.add(shardRouting); } } + preferred.addAll(notPreferred); if (!allInitializingShards.isEmpty()) { - ordered.addAll(allInitializingShards); + preferred.addAll(allInitializingShards); } - return new PlainShardIterator(shardId, ordered); + return new PlainShardIterator(shardId, preferred); } @Override @@ -590,11 +600,12 @@ public class IndexShardRoutingTable implements Iterable { public static IndexShardRoutingTable readFromThin(StreamInput in, Index index) throws IOException { int iShardId = in.readVInt(); - Builder builder = new Builder(new ShardId(index, iShardId)); + ShardId shardId = new ShardId(index, iShardId); + Builder builder = new Builder(shardId); int size = in.readVInt(); for (int i = 0; i < size; i++) { - ShardRouting shard = ShardRouting.readShardRoutingEntry(in, index, iShardId); + ShardRouting shard = new ShardRouting(shardId, in); builder.addShard(shard); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 70246026894..ef3fae48301 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -26,37 +26,40 @@ import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationD import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; -/** - * - */ public class OperationRouting extends AbstractComponent { + private String[] awarenessAttributes; - private final AwarenessAllocationDecider awarenessAllocationDecider; - - @Inject - public OperationRouting(Settings settings, AwarenessAllocationDecider awarenessAllocationDecider) { + public OperationRouting(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.awarenessAllocationDecider = awarenessAllocationDecider; + this.awarenessAttributes = AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, + this::setAwarenessAttributes); } - public ShardIterator indexShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) { + private void setAwarenessAttributes(String[] awarenessAttributes) { + this.awarenessAttributes = awarenessAttributes; + } + + public ShardIterator indexShards(ClusterState clusterState, String index, String id, @Nullable String routing) { return shards(clusterState, index, id, routing).shardsIt(); } - public ShardIterator getShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing, @Nullable String preference) { + public ShardIterator getShards(ClusterState clusterState, String index, String id, @Nullable String routing, @Nullable String preference) { return preferenceActiveShardIterator(shards(clusterState, index, id, routing), clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference); } @@ -113,7 +116,6 @@ public class OperationRouting extends AbstractComponent { private ShardIterator preferenceActiveShardIterator(IndexShardRoutingTable indexShard, String localNodeId, DiscoveryNodes nodes, @Nullable String preference) { if (preference == null || preference.isEmpty()) { - String[] awarenessAttributes = awarenessAllocationDecider.awarenessAttributes(); if (awarenessAttributes.length == 0) { return indexShard.activeInitializingShardsRandomIt(); } else { @@ -145,7 +147,6 @@ public class OperationRouting extends AbstractComponent { } // no more preference if (index == -1 || index == preference.length() - 1) { - String[] awarenessAttributes = awarenessAllocationDecider.awarenessAttributes(); if (awarenessAttributes.length == 0) { return indexShard.activeInitializingShardsRandomIt(); } else { @@ -158,10 +159,14 @@ public class OperationRouting extends AbstractComponent { } preferenceType = Preference.parse(preference); switch (preferenceType) { - case PREFER_NODE: - return indexShard.preferNodeActiveInitializingShardsIt(preference.substring(Preference.PREFER_NODE.type().length() + 1)); + case PREFER_NODES: + final Set nodesIds = + Arrays.stream( + preference.substring(Preference.PREFER_NODES.type().length() + 1).split(",") + ).collect(Collectors.toSet()); + return indexShard.preferNodeActiveInitializingShardsIt(nodesIds); case LOCAL: - return indexShard.preferNodeActiveInitializingShardsIt(localNodeId); + return indexShard.preferNodeActiveInitializingShardsIt(Collections.singleton(localNodeId)); case PRIMARY: return indexShard.primaryActiveInitializingShardIt(); case REPLICA: @@ -172,19 +177,14 @@ public class OperationRouting extends AbstractComponent { return indexShard.replicaFirstActiveInitializingShardsIt(); case ONLY_LOCAL: return indexShard.onlyNodeActiveInitializingShardsIt(localNodeId); - case ONLY_NODE: - String nodeId = preference.substring(Preference.ONLY_NODE.type().length() + 1); - ensureNodeIdExists(nodes, nodeId); - return indexShard.onlyNodeActiveInitializingShardsIt(nodeId); case ONLY_NODES: - String nodeAttribute = preference.substring(Preference.ONLY_NODES.type().length() + 1); - return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttribute, nodes); + String nodeAttributes = preference.substring(Preference.ONLY_NODES.type().length() + 1); + return indexShard.onlyNodeSelectorActiveInitializingShardsIt(nodeAttributes.split(","), nodes); default: throw new IllegalArgumentException("unknown preference [" + preferenceType + "]"); } } // if not, then use it as the index - String[] awarenessAttributes = awarenessAllocationDecider.awarenessAttributes(); if (awarenessAttributes.length == 0) { return indexShard.activeInitializingShardsIt(Murmur3HashFunction.hash(preference)); } else { @@ -218,20 +218,15 @@ public class OperationRouting extends AbstractComponent { return new ShardId(indexMetaData.getIndex(), generateShardId(indexMetaData, id, routing)); } - private int generateShardId(IndexMetaData indexMetaData, String id, @Nullable String routing) { + static int generateShardId(IndexMetaData indexMetaData, String id, @Nullable String routing) { final int hash; if (routing == null) { hash = Murmur3HashFunction.hash(id); } else { hash = Murmur3HashFunction.hash(routing); } - return Math.floorMod(hash, indexMetaData.getNumberOfShards()); + // we don't use IMD#getNumberOfShards since the index might have been shrunk such that we need to use the size + // of original index to hash documents + return Math.floorMod(hash, indexMetaData.getRoutingNumShards()) / indexMetaData.getRoutingFactor(); } - - private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) { - if (!nodes.getDataNodes().keys().contains(nodeId)) { - throw new IllegalArgumentException("No data node with id[" + nodeId + "] found"); - } - } - } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/Preference.java b/core/src/main/java/org/elasticsearch/cluster/routing/Preference.java index 6de251b9d52..d4685d7aead 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/Preference.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/Preference.java @@ -30,9 +30,9 @@ public enum Preference { SHARDS("_shards"), /** - * Route to preferred node, if possible + * Route to preferred nodes, if possible */ - PREFER_NODE("_prefer_node"), + PREFER_NODES("_prefer_nodes"), /** * Route to local node, if possible @@ -64,11 +64,6 @@ public enum Preference { */ ONLY_LOCAL("_only_local"), - /** - * Route to specific node only - */ - ONLY_NODE("_only_node"), - /** * Route to only node with attribute */ @@ -98,10 +93,8 @@ public enum Preference { switch (preferenceType) { case "_shards": return SHARDS; - case "_prefer_node": - return PREFER_NODE; - case "_only_node": - return ONLY_NODE; + case "_prefer_nodes": + return PREFER_NODES; case "_local": return LOCAL; case "_primary": @@ -123,6 +116,7 @@ public enum Preference { throw new IllegalArgumentException("no Preference for [" + preferenceType + "]"); } } + } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java b/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java index c091f71798b..f80e55ed8b3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -28,13 +28,14 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Objects; /** * Represents snapshot and index from which a recovering index should be restored */ public class RestoreSource implements Streamable, ToXContent { - private SnapshotId snapshotId; + private Snapshot snapshot; private String index; @@ -43,14 +44,14 @@ public class RestoreSource implements Streamable, ToXContent { RestoreSource() { } - public RestoreSource(SnapshotId snapshotId, Version version, String index) { - this.snapshotId = snapshotId; - this.version = version; - this.index = index; + public RestoreSource(Snapshot snapshot, Version version, String index) { + this.snapshot = Objects.requireNonNull(snapshot); + this.version = Objects.requireNonNull(version); + this.index = Objects.requireNonNull(index); } - public SnapshotId snapshotId() { - return snapshotId; + public Snapshot snapshot() { + return snapshot; } public String index() { @@ -61,26 +62,20 @@ public class RestoreSource implements Streamable, ToXContent { return version; } - public static RestoreSource readRestoreSource(StreamInput in) throws IOException { - RestoreSource restoreSource = new RestoreSource(); - restoreSource.readFrom(in); - return restoreSource; - } - public static RestoreSource readOptionalRestoreSource(StreamInput in) throws IOException { return in.readOptionalStreamable(RestoreSource::new); } @Override public void readFrom(StreamInput in) throws IOException { - snapshotId = SnapshotId.readSnapshotId(in); + snapshot = new Snapshot(in); version = Version.readVersion(in); index = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { - snapshotId.writeTo(out); + snapshot.writeTo(out); Version.writeVersion(version, out); out.writeString(index); } @@ -88,8 +83,8 @@ public class RestoreSource implements Streamable, ToXContent { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.startObject() - .field("repository", snapshotId.getRepository()) - .field("snapshot", snapshotId.getSnapshot()) + .field("repository", snapshot.getRepository()) + .field("snapshot", snapshot.getSnapshotId().getName()) .field("version", version.toString()) .field("index", index) .endObject(); @@ -97,26 +92,24 @@ public class RestoreSource implements Streamable, ToXContent { @Override public String toString() { - return snapshotId.toString(); + return snapshot.toString(); } @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } - RestoreSource that = (RestoreSource) o; - - if (!index.equals(that.index)) return false; - if (!snapshotId.equals(that.snapshotId)) return false; - - return true; + @SuppressWarnings("unchecked") RestoreSource that = (RestoreSource) o; + return snapshot.equals(that.snapshot) && index.equals(that.index); } @Override public int hashCode() { - int result = snapshotId.hashCode(); - result = 31 * result + index.hashCode(); - return result; + return Objects.hash(snapshot, index); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java index 77ae7b41d91..8403f45a550 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java @@ -19,13 +19,18 @@ package org.elasticsearch.cluster.routing; +import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.index.shard.ShardId; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; /** * A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards @@ -37,25 +42,33 @@ public class RoutingNode implements Iterable { private final DiscoveryNode node; - private final List shards; + private final LinkedHashMap shards; // LinkedHashMap to preserve order - public RoutingNode(String nodeId, DiscoveryNode node) { - this(nodeId, node, new ArrayList()); + public RoutingNode(String nodeId, DiscoveryNode node, ShardRouting... shards) { + this(nodeId, node, buildShardRoutingMap(shards)); } - public RoutingNode(String nodeId, DiscoveryNode node, List shards) { + RoutingNode(String nodeId, DiscoveryNode node, LinkedHashMap shards) { this.nodeId = nodeId; this.node = node; this.shards = shards; } - @Override - public Iterator iterator() { - return Collections.unmodifiableCollection(shards).iterator(); + private static LinkedHashMap buildShardRoutingMap(ShardRouting... shardRoutings) { + final LinkedHashMap shards = new LinkedHashMap<>(); + for (ShardRouting shardRouting : shardRoutings) { + ShardRouting previousValue = shards.put(shardRouting.shardId(), shardRouting); + if (previousValue != null) { + throw new IllegalArgumentException("Cannot have two different shards with same shard id " + shardRouting.shardId() + + " on same node "); + } + } + return shards; } - Iterator mutableIterator() { - return shards.iterator(); + @Override + public Iterator iterator() { + return Collections.unmodifiableCollection(shards.values()).iterator(); } /** @@ -67,6 +80,11 @@ public class RoutingNode implements Iterable { return this.node; } + @Nullable + public ShardRouting getByShardId(ShardId id) { + return shards.get(id); + } + /** * Get the id of this node * @return id of the node @@ -84,13 +102,25 @@ public class RoutingNode implements Iterable { * @param shard Shard to crate on this Node */ void add(ShardRouting shard) { - // TODO use Set with ShardIds for faster lookup. - for (ShardRouting shardRouting : shards) { - if (shardRouting.isSameShard(shard)) { - throw new IllegalStateException("Trying to add a shard [" + shard.shardId().getIndex().getName() + "][" + shard.shardId().id() + "] to a node [" + nodeId + "] where it already exists"); - } + if (shards.containsKey(shard.shardId())) { + throw new IllegalStateException("Trying to add a shard " + shard.shardId() + " to a node [" + nodeId + "] where it already exists"); } - shards.add(shard); + shards.put(shard.shardId(), shard); + } + + void update(ShardRouting oldShard, ShardRouting newShard) { + if (shards.containsKey(oldShard.shardId()) == false) { + // Shard was already removed by routing nodes iterator + // TODO: change caller logic in RoutingNodes so that this check can go away + return; + } + ShardRouting previousValue = shards.put(newShard.shardId(), newShard); + assert previousValue == oldShard : "expected shard " + previousValue + " but was " + oldShard; + } + + void remove(ShardRouting shard) { + ShardRouting previousValue = shards.remove(shard.shardId()); + assert previousValue == shard : "expected shard " + previousValue + " but was " + shard; } /** @@ -166,7 +196,7 @@ public class RoutingNode implements Iterable { public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("-----node_id[").append(nodeId).append("][" + (node == null ? "X" : "V") + "]\n"); - for (ShardRouting entry : shards) { + for (ShardRouting entry : shards.values()) { sb.append("--------").append(entry.shortSummary()).append('\n'); } return sb.toString(); @@ -188,12 +218,8 @@ public class RoutingNode implements Iterable { return sb.toString(); } - public ShardRouting get(int i) { - return shards.get(i) ; - } - - public Collection copyShards() { - return new ArrayList<>(shards); + public List copyShards() { + return new ArrayList<>(shards.values()); } public boolean isEmpty() { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 173f42ce241..a4e61eac739 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -23,22 +23,26 @@ import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; +import java.util.ListIterator; import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Queue; import java.util.Set; import java.util.function.Predicate; @@ -48,20 +52,12 @@ import java.util.function.Predicate; */ public class RoutingNodes implements Iterable { - private final MetaData metaData; - - private final ClusterBlocks blocks; - - private final RoutingTable routingTable; - private final Map nodesToShards = new HashMap<>(); private final UnassignedShards unassignedShards = new UnassignedShards(this); private final Map> assignedShards = new HashMap<>(); - private final ImmutableOpenMap customs; - private final boolean readOnly; private int inactivePrimaryCount = 0; @@ -71,7 +67,7 @@ public class RoutingNodes implements Iterable { private int relocatingShards = 0; private final Map> nodesPerAttributeNames = new HashMap<>(); - private final Map recoveryiesPerNode = new HashMap<>(); + private final Map recoveriesPerNode = new HashMap<>(); public RoutingNodes(ClusterState clusterState) { this(clusterState, true); @@ -79,15 +75,12 @@ public class RoutingNodes implements Iterable { public RoutingNodes(ClusterState clusterState, boolean readOnly) { this.readOnly = readOnly; - this.metaData = clusterState.metaData(); - this.blocks = clusterState.blocks(); - this.routingTable = clusterState.routingTable(); - this.customs = clusterState.customs(); + final RoutingTable routingTable = clusterState.routingTable(); - Map> nodesToShards = new HashMap<>(); + Map> nodesToShards = new HashMap<>(); // fill in the nodeToShards with the "live" nodes for (ObjectCursor cursor : clusterState.nodes().getDataNodes().values()) { - nodesToShards.put(cursor.value.getId(), new ArrayList<>()); + nodesToShards.put(cursor.value.getId(), new LinkedHashMap<>()); // LinkedHashMap to preserve order } // fill in the inverse of node -> shards allocated @@ -101,109 +94,110 @@ public class RoutingNodes implements Iterable { // by the ShardId, as this is common for primary and replicas. // A replica Set might have one (and not more) replicas with the state of RELOCATING. if (shard.assignedToNode()) { - List entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), k -> new ArrayList<>()); - final ShardRouting sr = getRouting(shard, readOnly); - entries.add(sr); - assignedShardsAdd(sr); + Map entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), + k -> new LinkedHashMap<>()); // LinkedHashMap to preserve order + ShardRouting previousValue = entries.put(shard.shardId(), shard); + if (previousValue != null) { + throw new IllegalArgumentException("Cannot have two different shards with same shard id on same node"); + } + assignedShardsAdd(shard); if (shard.relocating()) { relocatingShards++; - entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new ArrayList<>()); + entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), + k -> new LinkedHashMap<>()); // LinkedHashMap to preserve order // add the counterpart shard with relocatingNodeId reflecting the source from which // it's relocating from. ShardRouting targetShardRouting = shard.buildTargetRelocatingShard(); - addInitialRecovery(targetShardRouting); - if (readOnly) { - targetShardRouting.freeze(); + addInitialRecovery(targetShardRouting, indexShard.primary); + previousValue = entries.put(targetShardRouting.shardId(), targetShardRouting); + if (previousValue != null) { + throw new IllegalArgumentException("Cannot have two different shards with same shard id on same node"); } - entries.add(targetShardRouting); assignedShardsAdd(targetShardRouting); - } else if (shard.active() == false) { // shards that are initializing without being relocated + } else if (shard.initializing()) { if (shard.primary()) { inactivePrimaryCount++; } inactiveShardCount++; - addInitialRecovery(shard); + addInitialRecovery(shard, indexShard.primary); } } else { - final ShardRouting sr = getRouting(shard, readOnly); - assignedShardsAdd(sr); - unassignedShards.add(sr); + unassignedShards.add(shard); } } } } - for (Map.Entry> entry : nodesToShards.entrySet()) { + for (Map.Entry> entry : nodesToShards.entrySet()) { String nodeId = entry.getKey(); this.nodesToShards.put(nodeId, new RoutingNode(nodeId, clusterState.nodes().get(nodeId), entry.getValue())); } } private void addRecovery(ShardRouting routing) { - addRecovery(routing, true, false); + updateRecoveryCounts(routing, true, findAssignedPrimaryIfPeerRecovery(routing)); } private void removeRecovery(ShardRouting routing) { - addRecovery(routing, false, false); + updateRecoveryCounts(routing, false, findAssignedPrimaryIfPeerRecovery(routing)); } - public void addInitialRecovery(ShardRouting routing) { - addRecovery(routing,true, true); + private void addInitialRecovery(ShardRouting routing, ShardRouting initialPrimaryShard) { + updateRecoveryCounts(routing, true, initialPrimaryShard); } - private void addRecovery(final ShardRouting routing, final boolean increment, final boolean initializing) { + private void updateRecoveryCounts(final ShardRouting routing, final boolean increment, @Nullable final ShardRouting primary) { final int howMany = increment ? 1 : -1; assert routing.initializing() : "routing must be initializing: " + routing; - Recoveries.getOrAdd(recoveryiesPerNode, routing.currentNodeId()).addIncoming(howMany); - final String sourceNodeId; - if (routing.relocatingNodeId() != null) { // this is a relocation-target - sourceNodeId = routing.relocatingNodeId(); - if (routing.primary() && increment == false) { // primary is done relocating + // TODO: check primary == null || primary.active() after all tests properly add ReplicaAfterPrimaryActiveAllocationDecider + assert primary == null || primary.assignedToNode() : + "shard is initializing but its primary is not assigned to a node"; + + Recoveries.getOrAdd(recoveriesPerNode, routing.currentNodeId()).addIncoming(howMany); + + if (routing.isPeerRecovery()) { + // add/remove corresponding outgoing recovery on node with primary shard + if (primary == null) { + throw new IllegalStateException("shard is peer recovering but primary is unassigned"); + } + Recoveries.getOrAdd(recoveriesPerNode, primary.currentNodeId()).addOutgoing(howMany); + + if (increment == false && routing.primary() && routing.relocatingNodeId() != null) { + // primary is done relocating, move non-primary recoveries from old primary to new primary int numRecoveringReplicas = 0; - for (ShardRouting assigned : assignedShards(routing)) { - if (assigned.primary() == false && assigned.initializing() && assigned.relocatingNodeId() == null) { + for (ShardRouting assigned : assignedShards(routing.shardId())) { + if (assigned.primary() == false && assigned.isPeerRecovery()) { numRecoveringReplicas++; } } - // we transfer the recoveries to the relocated primary - recoveryiesPerNode.get(sourceNodeId).addOutgoing(-numRecoveringReplicas); - recoveryiesPerNode.get(routing.currentNodeId()).addOutgoing(numRecoveringReplicas); + recoveriesPerNode.get(routing.relocatingNodeId()).addOutgoing(-numRecoveringReplicas); + recoveriesPerNode.get(routing.currentNodeId()).addOutgoing(numRecoveringReplicas); } - } else if (routing.primary() == false) { // primary without relocationID is initial recovery - ShardRouting primary = findPrimary(routing); - if (primary == null && initializing) { - primary = routingTable.index(routing.index().getName()).shard(routing.shardId().id()).primary; - } else if (primary == null) { - throw new IllegalStateException("replica is initializing but primary is unassigned"); - } - sourceNodeId = primary.currentNodeId(); - } else { - sourceNodeId = null; - } - if (sourceNodeId != null) { - Recoveries.getOrAdd(recoveryiesPerNode, sourceNodeId).addOutgoing(howMany); } } public int getIncomingRecoveries(String nodeId) { - return recoveryiesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getIncoming(); + return recoveriesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getIncoming(); } public int getOutgoingRecoveries(String nodeId) { - return recoveryiesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getOutgoing(); + return recoveriesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getOutgoing(); } - private ShardRouting findPrimary(ShardRouting routing) { - List shardRoutings = assignedShards.get(routing.shardId()); + @Nullable + private ShardRouting findAssignedPrimaryIfPeerRecovery(ShardRouting routing) { ShardRouting primary = null; - if (shardRoutings != null) { - for (ShardRouting shardRouting : shardRoutings) { - if (shardRouting.primary()) { - if (shardRouting.active()) { - return shardRouting; - } else if (primary == null) { - primary = shardRouting; - } else if (primary.relocatingNodeId() != null) { - primary = shardRouting; + if (routing.isPeerRecovery()) { + List shardRoutings = assignedShards.get(routing.shardId()); + if (shardRoutings != null) { + for (ShardRouting shardRouting : shardRoutings) { + if (shardRouting.primary()) { + if (shardRouting.active()) { + return shardRouting; + } else if (primary == null) { + primary = shardRouting; + } else if (primary.relocatingNodeId() != null) { + primary = shardRouting; + } } } } @@ -211,58 +205,19 @@ public class RoutingNodes implements Iterable { return primary; } - private static ShardRouting getRouting(ShardRouting src, boolean readOnly) { - if (readOnly) { - src.freeze(); // we just freeze and reuse this instance if we are read only - } else { - src = new ShardRouting(src); - } - return src; - } - @Override public Iterator iterator() { return Collections.unmodifiableCollection(nodesToShards.values()).iterator(); } - public RoutingTable routingTable() { - return routingTable; + public Iterator mutableIterator() { + return nodesToShards.values().iterator(); } - public RoutingTable getRoutingTable() { - return routingTable(); - } - - public MetaData metaData() { - return this.metaData; - } - - public MetaData getMetaData() { - return metaData(); - } - - public ClusterBlocks blocks() { - return this.blocks; - } - - public ClusterBlocks getBlocks() { - return this.blocks; - } - - public ImmutableOpenMap customs() { - return this.customs; - } - - public T custom(String type) { return (T) customs.get(type); } - public UnassignedShards unassigned() { return this.unassignedShards; } - public RoutingNodesIterator nodes() { - return new RoutingNodesIterator(nodesToShards.values().iterator()); - } - public RoutingNode node(String nodeId) { return nodesToShards.get(nodeId); } @@ -312,11 +267,20 @@ public class RoutingNodes implements Iterable { } /** - * Returns the active primary shard for the given ShardRouting or null if + * Returns all shards that are not in the state UNASSIGNED with the same shard + * ID as the given shard. + */ + public List assignedShards(ShardId shardId) { + final List replicaSet = assignedShards.get(shardId); + return replicaSet == null ? EMPTY : Collections.unmodifiableList(replicaSet); + } + + /** + * Returns the active primary shard for the given shard id or null if * no primary is found or the primary is not active. */ - public ShardRouting activePrimary(ShardRouting shard) { - for (ShardRouting shardRouting : assignedShards(shard.shardId())) { + public ShardRouting activePrimary(ShardId shardId) { + for (ShardRouting shardRouting : assignedShards(shardId)) { if (shardRouting.primary() && shardRouting.active()) { return shardRouting; } @@ -325,11 +289,11 @@ public class RoutingNodes implements Iterable { } /** - * Returns one active replica shard for the given ShardRouting shard ID or null if + * Returns one active replica shard for the given shard id or null if * no active replica is found. */ - public ShardRouting activeReplica(ShardRouting shard) { - for (ShardRouting shardRouting : assignedShards(shard.shardId())) { + public ShardRouting activeReplica(ShardId shardId) { + for (ShardRouting shardRouting : assignedShards(shardId)) { if (!shardRouting.primary() && shardRouting.active()) { return shardRouting; } @@ -337,20 +301,12 @@ public class RoutingNodes implements Iterable { return null; } - /** - * Returns all shards that are not in the state UNASSIGNED with the same shard - * ID as the given shard. - */ - public Iterable assignedShards(ShardRouting shard) { - return assignedShards(shard.shardId()); - } - /** * Returns true iff all replicas are active for the given shard routing. Otherwise false */ - public boolean allReplicasActive(ShardRouting shardRouting) { - final List shards = assignedShards(shardRouting.shardId()); - if (shards.isEmpty() || shards.size() < this.routingTable.index(shardRouting.index().getName()).shard(shardRouting.id()).size()) { + public boolean allReplicasActive(ShardId shardId, MetaData metaData) { + final List shards = assignedShards(shardId); + if (shards.isEmpty() || shards.size() < metaData.getIndexSafe(shardId.getIndex()).getNumberOfReplicas() + 1) { return false; // if we are empty nothing is active if we have less than total at least one is unassigned } for (ShardRouting shard : shards) { @@ -423,42 +379,48 @@ public class RoutingNodes implements Iterable { * Moves a shard from unassigned to initialize state * * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. + * @return the initialized shard */ - public void initialize(ShardRouting shard, String nodeId, @Nullable String existingAllocationId, long expectedSize) { + public ShardRouting initialize(ShardRouting shard, String nodeId, @Nullable String existingAllocationId, long expectedSize) { ensureMutable(); - assert shard.unassigned() : shard; - shard.initialize(nodeId, existingAllocationId, expectedSize); - node(nodeId).add(shard); + assert shard.unassigned() : "expected an unassigned shard " + shard; + ShardRouting initializedShard = shard.initialize(nodeId, existingAllocationId, expectedSize); + node(nodeId).add(initializedShard); inactiveShardCount++; - if (shard.primary()) { + if (initializedShard.primary()) { inactivePrimaryCount++; } - addRecovery(shard); - assignedShardsAdd(shard); + addRecovery(initializedShard); + assignedShardsAdd(initializedShard); + return initializedShard; } /** * Relocate a shard to another node, adding the target initializing - * shard as well as assigning it. And returning the target initializing - * shard. + * shard as well as assigning it. + * + * @return pair of source relocating and target initializing shards. */ - public ShardRouting relocate(ShardRouting shard, String nodeId, long expectedShardSize) { + public Tuple relocate(ShardRouting shard, String nodeId, long expectedShardSize) { ensureMutable(); relocatingShards++; - shard.relocate(nodeId, expectedShardSize); - ShardRouting target = shard.buildTargetRelocatingShard(); + ShardRouting source = shard.relocate(nodeId, expectedShardSize); + ShardRouting target = source.buildTargetRelocatingShard(); + updateAssigned(shard, source); node(target.currentNodeId()).add(target); assignedShardsAdd(target); addRecovery(target); - return target; + return Tuple.tuple(source, target); } /** * Mark a shard as started and adjusts internal statistics. + * + * @return the started shard */ - public void started(ShardRouting shard) { + public ShardRouting started(ShardRouting shard) { ensureMutable(); - assert !shard.active() : "expected an initializing shard " + shard; + assert shard.initializing() : "expected an initializing shard " + shard; if (shard.relocatingNodeId() == null) { // if this is not a target shard for relocation, we need to update statistics inactiveShardCount--; @@ -467,63 +429,59 @@ public class RoutingNodes implements Iterable { } } removeRecovery(shard); - shard.moveToStarted(); + ShardRouting startedShard = shard.moveToStarted(); + updateAssigned(shard, startedShard); + return startedShard; } /** * Cancels a relocation of a shard that shard must relocating. + * + * @return the shard after cancelling relocation */ - public void cancelRelocation(ShardRouting shard) { + public ShardRouting cancelRelocation(ShardRouting shard) { ensureMutable(); relocatingShards--; - shard.cancelRelocation(); + ShardRouting cancelledShard = shard.cancelRelocation(); + updateAssigned(shard, cancelledShard); + return cancelledShard; } /** - * swaps the status of a shard, making replicas primary and vice versa. + * moves the assigned replica shard to primary. * - * @param shards the shard to have its primary status swapped. + * @param replicaShard the replica shard to be promoted to primary + * @return the resulting primary shard */ - public void swapPrimaryFlag(ShardRouting... shards) { + public ShardRouting promoteAssignedReplicaShardToPrimary(ShardRouting replicaShard) { ensureMutable(); - for (ShardRouting shard : shards) { - if (shard.primary()) { - shard.moveFromPrimary(); - if (shard.unassigned()) { - unassignedShards.primaries--; - } - } else { - shard.moveToPrimary(); - if (shard.unassigned()) { - unassignedShards.primaries++; - } - } - } + assert replicaShard.unassigned() == false : "unassigned shard cannot be promoted to primary: " + replicaShard; + assert replicaShard.primary() == false : "primary shard cannot be promoted to primary: " + replicaShard; + ShardRouting primaryShard = replicaShard.moveToPrimary(); + updateAssigned(replicaShard, primaryShard); + return primaryShard; } private static final List EMPTY = Collections.emptyList(); - private List assignedShards(ShardId shardId) { - final List replicaSet = assignedShards.get(shardId); - return replicaSet == null ? EMPTY : Collections.unmodifiableList(replicaSet); - } - /** * Cancels the give shard from the Routing nodes internal statistics and cancels * the relocation if the shard is relocating. */ - private void remove(ShardRouting shard) { + public void remove(ShardRouting shard) { ensureMutable(); - if (!shard.active() && shard.relocatingNodeId() == null) { + assert shard.unassigned() == false : "only assigned shards can be removed here (" + shard + ")"; + node(shard.currentNodeId()).remove(shard); + if (shard.initializing() && shard.relocatingNodeId() == null) { inactiveShardCount--; assert inactiveShardCount >= 0; if (shard.primary()) { inactivePrimaryCount--; } } else if (shard.relocating()) { - cancelRelocation(shard); + shard = cancelRelocation(shard); } assignedShardsRemove(shard); if (shard.initializing()) { @@ -531,13 +489,23 @@ public class RoutingNodes implements Iterable { } } + /** + * Removes relocation source of an initializing non-primary shard. This allows the replica shard to continue recovery from + * the primary even though its non-primary relocation source has failed. + */ + public ShardRouting removeRelocationSource(ShardRouting shard) { + assert shard.isRelocationTarget() : "only relocation target shards can have their relocation source removed (" + shard + ")"; + ensureMutable(); + ShardRouting relocationMarkerRemoved = shard.removeRelocationSource(); + updateAssigned(shard, relocationMarkerRemoved); + inactiveShardCount++; // relocation targets are not counted as inactive shards whereas initializing shards are + return relocationMarkerRemoved; + } + private void assignedShardsAdd(ShardRouting shard) { - if (shard.unassigned()) { - // no unassigned - return; - } + assert shard.unassigned() == false : "unassigned shard " + shard + " cannot be added to list of assigned shards"; List shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>()); - assert assertInstanceNotInList(shard, shards); + assert assertInstanceNotInList(shard, shards) : "shard " + shard + " cannot appear twice in list of assigned shards"; shards.add(shard); } @@ -560,41 +528,43 @@ public class RoutingNodes implements Iterable { return; } } - assert false : "Illegal state"; } + assert false : "No shard found to remove"; } - public boolean isKnown(DiscoveryNode node) { - return nodesToShards.containsKey(node.getId()); - } - - public void addNode(DiscoveryNode node) { - ensureMutable(); - RoutingNode routingNode = new RoutingNode(node.getId(), node); - nodesToShards.put(routingNode.nodeId(), routingNode); - } - - public RoutingNodeIterator routingNodeIter(String nodeId) { - final RoutingNode routingNode = nodesToShards.get(nodeId); - if (routingNode == null) { - return null; - } - return new RoutingNodeIterator(routingNode); - } - - public RoutingNode[] toArray() { - return nodesToShards.values().toArray(new RoutingNode[nodesToShards.size()]); - } - - public void reinitShadowPrimary(ShardRouting candidate) { + public ShardRouting reinitShadowPrimary(ShardRouting candidate) { ensureMutable(); if (candidate.relocating()) { cancelRelocation(candidate); } - candidate.reinitializeShard(); + ShardRouting reinitializedShard = candidate.reinitializeShard(); + updateAssigned(candidate, reinitializedShard); inactivePrimaryCount++; inactiveShardCount++; + return reinitializedShard; + } + private void updateAssigned(ShardRouting oldShard, ShardRouting newShard) { + assert oldShard.shardId().equals(newShard.shardId()) : + "can only update " + oldShard + " by shard with same shard id but was " + newShard; + assert oldShard.unassigned() == false && newShard.unassigned() == false : + "only assigned shards can be updated in list of assigned shards (prev: " + oldShard + ", new: " + newShard + ")"; + assert oldShard.currentNodeId().equals(newShard.currentNodeId()) : "shard to update " + oldShard + + " can only update " + oldShard + " by shard assigned to same node but was " + newShard; + node(oldShard.currentNodeId()).update(oldShard, newShard); + List shardsWithMatchingShardId = assignedShards.computeIfAbsent(oldShard.shardId(), k -> new ArrayList<>()); + int previousShardIndex = shardsWithMatchingShardId.indexOf(oldShard); + assert previousShardIndex >= 0 : "shard to update " + oldShard + " does not exist in list of assigned shards"; + shardsWithMatchingShardId.set(previousShardIndex, newShard); + } + + public ShardRouting moveToUnassigned(ShardRouting shard, UnassignedInfo unassignedInfo) { + ensureMutable(); + assert shard.unassigned() == false : "only assigned shards can be moved to unassigned (" + shard + ")"; + remove(shard); + ShardRouting unassigned = shard.moveToUnassigned(unassignedInfo); + unassignedShards.add(unassigned); + return unassigned; } /** @@ -683,11 +653,11 @@ public class RoutingNodes implements Iterable { public class UnassignedIterator implements Iterator { - private final Iterator iterator; + private final ListIterator iterator; private ShardRouting current; public UnassignedIterator() { - this.iterator = unassigned.iterator(); + this.iterator = unassigned.listIterator(); } @Override @@ -705,9 +675,9 @@ public class RoutingNodes implements Iterable { * * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. */ - public void initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) { + public ShardRouting initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) { innerRemove(); - nodes.initialize(new ShardRouting(current), nodeId, existingAllocationId, expectedShardSize); + return nodes.initialize(current, nodeId, existingAllocationId, expectedShardSize); } /** @@ -721,6 +691,35 @@ public class RoutingNodes implements Iterable { ignoreShard(current); } + private void updateShardRouting(ShardRouting shardRouting) { + current = shardRouting; + iterator.set(shardRouting); + } + + /** + * updates the unassigned info on the current unassigned shard + * + * @param unassignedInfo the new unassigned info to use + * @return the shard with unassigned info updated + */ + public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo) { + ShardRouting updatedShardRouting = current.updateUnassignedInfo(unassignedInfo); + updateShardRouting(updatedShardRouting); + return updatedShardRouting; + } + + /** + * marks the current primary shard as replica + * + * @return the shard with primary status swapped + */ + public ShardRouting demotePrimaryToReplicaShard() { + assert current.primary() : "non-primary shard " + current + " cannot be demoted"; + updateShardRouting(current.moveFromPrimary()); + primaries--; + return current; + } + /** * Unsupported operation, just there for the interface. Use {@link #removeAndIgnore()} or * {@link #initialize(String, String, long)}. @@ -794,12 +793,10 @@ public class RoutingNodes implements Iterable { Map indicesAndShards = new HashMap<>(); for (RoutingNode node : routingNodes) { for (ShardRouting shard : node) { - if (!shard.active() && shard.relocatingNodeId() == null) { - if (!shard.relocating()) { - inactiveShardCount++; - if (shard.primary()) { - inactivePrimaryCount++; - } + if (shard.initializing() && shard.relocatingNodeId() == null) { + inactiveShardCount++; + if (shard.primary()) { + inactivePrimaryCount++; } } if (shard.relocating()) { @@ -847,7 +844,7 @@ public class RoutingNodes implements Iterable { } } - for (Map.Entry recoveries : routingNodes.recoveryiesPerNode.entrySet()) { + for (Map.Entry recoveries : routingNodes.recoveriesPerNode.entrySet()) { String node = recoveries.getKey(); final Recoveries value = recoveries.getValue(); int incoming = 0; @@ -857,20 +854,17 @@ public class RoutingNodes implements Iterable { for (ShardRouting routing : routingNode) { if (routing.initializing()) { incoming++; - } else if (routing.relocating()) { - outgoing++; } - if (routing.primary() && (routing.initializing() && routing.relocatingNodeId() != null) == false) { // we don't count the initialization end of the primary relocation - List shardRoutings = routingNodes.assignedShards.get(routing.shardId()); - for (ShardRouting assigned : shardRoutings) { - if (assigned.primary() == false && assigned.initializing() && assigned.relocatingNodeId() == null) { + if (routing.primary() && routing.isPeerRecovery() == false) { + for (ShardRouting assigned : routingNodes.assignedShards.get(routing.shardId())) { + if (assigned.isPeerRecovery()) { outgoing++; } } } } } - assert incoming == value.incoming : incoming + " != " + value.incoming; + assert incoming == value.incoming : incoming + " != " + value.incoming + " node: " + routingNode; assert outgoing == value.outgoing : outgoing + " != " + value.outgoing + " node: " + routingNode; } @@ -887,102 +881,50 @@ public class RoutingNodes implements Iterable { return true; } - - public class RoutingNodesIterator implements Iterator, Iterable { - private RoutingNode current; - private final Iterator delegate; - - public RoutingNodesIterator(Iterator iterator) { - delegate = iterator; - } - - @Override - public boolean hasNext() { - return delegate.hasNext(); - } - - @Override - public RoutingNode next() { - return current = delegate.next(); - } - - public RoutingNodeIterator nodeShards() { - return new RoutingNodeIterator(current); - } - - @Override - public void remove() { - delegate.remove(); - } - - @Override - public Iterator iterator() { - return nodeShards(); - } - } - - public final class RoutingNodeIterator implements Iterator, Iterable { - private final RoutingNode iterable; - private ShardRouting shard; - private final Iterator delegate; - private boolean removed = false; - - public RoutingNodeIterator(RoutingNode iterable) { - this.delegate = iterable.mutableIterator(); - this.iterable = iterable; - } - - @Override - public boolean hasNext() { - return delegate.hasNext(); - } - - @Override - public ShardRouting next() { - removed = false; - return shard = delegate.next(); - } - - @Override - public void remove() { - ensureMutable(); - delegate.remove(); - RoutingNodes.this.remove(shard); - removed = true; - } - - - /** returns true if {@link #remove()} or {@link #moveToUnassigned(UnassignedInfo)} were called on the current shard */ - public boolean isRemoved() { - return removed; - } - - @Override - public Iterator iterator() { - return iterable.iterator(); - } - - public void moveToUnassigned(UnassignedInfo unassignedInfo) { - ensureMutable(); - if (isRemoved() == false) { - remove(); - } - ShardRouting unassigned = new ShardRouting(shard); // protective copy of the mutable shard - unassigned.moveToUnassigned(unassignedInfo); - unassigned().add(unassigned); - } - - public ShardRouting current() { - return shard; - } - } - private void ensureMutable() { if (readOnly) { throw new IllegalStateException("can't modify RoutingNodes - readonly"); } } + /** + * Creates an iterator over shards interleaving between nodes: The iterator returns the first shard from + * the first node, then the first shard of the second node, etc. until one shard from each node has been returned. + * The iterator then resumes on the first node by returning the second shard and continues until all shards from + * all the nodes have been returned. + */ + public Iterator nodeInterleavedShardIterator() { + final Queue> queue = new ArrayDeque<>(); + for (Map.Entry entry : nodesToShards.entrySet()) { + queue.add(entry.getValue().copyShards().iterator()); + } + return new Iterator() { + public boolean hasNext() { + while (!queue.isEmpty()) { + if (queue.peek().hasNext()) { + return true; + } + queue.poll(); + } + return false; + } + + public ShardRouting next() { + if (hasNext() == false) { + throw new NoSuchElementException(); + } + Iterator iter = queue.poll(); + ShardRouting result = iter.next(); + queue.offer(iter); + return result; + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + private static final class Recoveries { private static final Recoveries EMPTY = new Recoveries(); private int incoming = 0; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 1ebd4699d1a..cfe48dd711e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -30,12 +29,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.threadpool.ThreadPool; -import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; /** @@ -50,27 +44,20 @@ import java.util.concurrent.atomic.AtomicBoolean; * actions. *

    */ -public class RoutingService extends AbstractLifecycleComponent implements ClusterStateListener { +public class RoutingService extends AbstractLifecycleComponent { private static final String CLUSTER_UPDATE_TASK_SOURCE = "cluster_reroute"; - final ThreadPool threadPool; private final ClusterService clusterService; private final AllocationService allocationService; private AtomicBoolean rerouting = new AtomicBoolean(); - private volatile long minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE; - private volatile ScheduledFuture registeredNextDelayFuture; @Inject - public RoutingService(Settings settings, ThreadPool threadPool, ClusterService clusterService, AllocationService allocationService) { + public RoutingService(Settings settings, ClusterService clusterService, AllocationService allocationService) { super(settings); - this.threadPool = threadPool; this.clusterService = clusterService; this.allocationService = allocationService; - if (clusterService != null) { - clusterService.addFirst(this); - } } @Override @@ -83,12 +70,6 @@ public class RoutingService extends AbstractLifecycleComponent i @Override protected void doClose() { - FutureUtils.cancel(registeredNextDelayFuture); - clusterService.remove(this); - } - - public AllocationService getAllocationService() { - return this.allocationService; } /** @@ -98,48 +79,6 @@ public class RoutingService extends AbstractLifecycleComponent i performReroute(reason); } - @Override - public void clusterChanged(ClusterChangedEvent event) { - if (event.state().nodes().isLocalNodeElectedMaster()) { - // Figure out if an existing scheduled reroute is good enough or whether we need to cancel and reschedule. - // If the minimum of the currently relevant delay settings is larger than something we scheduled in the past, - // we are guaranteed that the planned schedule will happen before any of the current shard delays are expired. - long minDelaySetting = UnassignedInfo.findSmallestDelayedAllocationSettingNanos(settings, event.state()); - if (minDelaySetting <= 0) { - logger.trace("no need to schedule reroute - no delayed unassigned shards, minDelaySetting [{}], scheduled [{}]", minDelaySetting, minDelaySettingAtLastSchedulingNanos); - minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE; - FutureUtils.cancel(registeredNextDelayFuture); - } else if (minDelaySetting < minDelaySettingAtLastSchedulingNanos) { - FutureUtils.cancel(registeredNextDelayFuture); - minDelaySettingAtLastSchedulingNanos = minDelaySetting; - TimeValue nextDelay = TimeValue.timeValueNanos(UnassignedInfo.findNextDelayedAllocationIn(event.state())); - assert nextDelay.nanos() > 0 : "next delay must be non 0 as minDelaySetting is [" + minDelaySetting + "]"; - logger.info("delaying allocation for [{}] unassigned shards, next check in [{}]", - UnassignedInfo.getNumberOfDelayedUnassigned(event.state()), nextDelay); - registeredNextDelayFuture = threadPool.schedule(nextDelay, ThreadPool.Names.SAME, new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE; - reroute("assign delayed unassigned shards"); - } - - @Override - public void onFailure(Throwable t) { - logger.warn("failed to schedule/execute reroute post unassigned shard", t); - minDelaySettingAtLastSchedulingNanos = Long.MAX_VALUE; - } - }); - } else { - logger.trace("no need to schedule reroute - current schedule reroute is enough. minDelaySetting [{}], scheduled [{}]", minDelaySetting, minDelaySettingAtLastSchedulingNanos); - } - } - } - - // visible for testing - long getMinDelaySettingAtLastSchedulingNanos() { - return this.minDelaySettingAtLastSchedulingNanos; - } - // visible for testing protected void performReroute(String reason) { try { @@ -170,17 +109,17 @@ public class RoutingService extends AbstractLifecycleComponent i } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); if (logger.isTraceEnabled()) { - logger.error("unexpected failure during [{}], current state:\n{}", t, source, state.prettyPrint()); + logger.error("unexpected failure during [{}], current state:\n{}", e, source, state.prettyPrint()); } else { - logger.error("unexpected failure during [{}], current state version [{}]", t, source, state.version()); + logger.error("unexpected failure during [{}], current state version [{}]", e, source, state.version()); } } }); - } catch (Throwable e) { + } catch (Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); logger.warn("failed to reroute routing table, current state:\n{}", e, state.prettyPrint()); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 86973b8c83f..f43517ec559 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -404,9 +404,9 @@ public class RoutingTable implements Iterable, Diffable indexRoutingTableBuilders = new HashMap<>(); for (RoutingNode routingNode : routingNodes) { @@ -422,8 +422,7 @@ public class RoutingTable implements Iterable, Diffable, Diffable asList; - private transient ShardId shardIdentifier; - private boolean frozen = false; - private long expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; - - private ShardRouting() { - this.asList = Collections.singletonList(this); - } - - public ShardRouting(ShardRouting copy) { - this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize()); - } + private final long expectedShardSize; /** * A constructor to internally create shard routing instances, note, the internal flag should only be set to true * by either this class or tests. Visible for testing. */ - ShardRouting(Index index, int shardId, String currentNodeId, + ShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, - UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal, long expectedShardSize) { - this.index = index; + UnassignedInfo unassignedInfo, AllocationId allocationId, long expectedShardSize) { this.shardId = shardId; this.currentNodeId = currentNodeId; this.relocatingNodeId = relocatingNodeId; @@ -88,38 +77,31 @@ public final class ShardRouting implements Streamable, ToXContent { assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta"; - if (!internal) { - assert state == ShardRoutingState.UNASSIGNED; - assert currentNodeId == null; - assert relocatingNodeId == null; - assert allocationId == null; - } - } /** * Creates a new unassigned shard. */ - public static ShardRouting newUnassigned(Index index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) { - return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE); + public static ShardRouting newUnassigned(ShardId shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) { + return new ShardRouting(shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE); } public Index index() { - return this.index; + return shardId.getIndex(); } /** * The index name. */ public String getIndexName() { - return index().getName(); + return shardId.getIndexName(); } /** * The shard id. */ public int id() { - return this.shardId; + return shardId.id(); } /** @@ -201,8 +183,8 @@ public final class ShardRouting implements Streamable, ToXContent { */ public ShardRouting buildTargetRelocatingShard() { assert relocating(); - return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, unassignedInfo, - AllocationId.newTargetRelocation(allocationId), true, expectedShardSize); + return new ShardRouting(shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, unassignedInfo, + AllocationId.newTargetRelocation(allocationId), expectedShardSize); } /** @@ -247,11 +229,7 @@ public final class ShardRouting implements Streamable, ToXContent { * The shard id. */ public ShardId shardId() { - if (shardIdentifier != null) { - return shardIdentifier; - } - shardIdentifier = new ShardId(index, shardId); - return shardIdentifier; + return shardId; } public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) { @@ -274,61 +252,41 @@ public final class ShardRouting implements Streamable, ToXContent { return true; } + /** + * returns true for initializing shards that recover their data from another shard copy + */ + public boolean isPeerRecovery() { + return state == ShardRoutingState.INITIALIZING && (primary() == false || relocatingNodeId != null); + } + /** * A shard iterator with just this shard in it. */ public ShardIterator shardsIt() { - return new PlainShardIterator(shardId(), asList); + return new PlainShardIterator(shardId, asList); } - public static ShardRouting readShardRoutingEntry(StreamInput in) throws IOException { - ShardRouting entry = new ShardRouting(); - entry.readFrom(in); - return entry; - } - - public static ShardRouting readShardRoutingEntry(StreamInput in, Index index, int shardId) throws IOException { - ShardRouting entry = new ShardRouting(); - entry.readFrom(in, index, shardId); - return entry; - } - - public void readFrom(StreamInput in, Index index, int shardId) throws IOException { - this.index = index; + public ShardRouting(ShardId shardId, StreamInput in) throws IOException { this.shardId = shardId; - readFromThin(in); - } - - public void readFromThin(StreamInput in) throws IOException { - if (in.readBoolean()) { - currentNodeId = in.readString(); - } - - if (in.readBoolean()) { - relocatingNodeId = in.readString(); - } - + currentNodeId = in.readOptionalString(); + relocatingNodeId = in.readOptionalString(); primary = in.readBoolean(); state = ShardRoutingState.fromValue(in.readByte()); - restoreSource = RestoreSource.readOptionalRestoreSource(in); - if (in.readBoolean()) { - unassignedInfo = new UnassignedInfo(in); - } - if (in.readBoolean()) { - allocationId = new AllocationId(in); - } - if (relocating() || initializing()) { - expectedShardSize = in.readLong(); + unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new); + allocationId = in.readOptionalWriteable(AllocationId::new); + final long shardSize; + if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING) { + shardSize = in.readLong(); } else { - expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; + shardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; } - freeze(); + expectedShardSize = shardSize; + asList = Collections.singletonList(this); } - @Override - public void readFrom(StreamInput in) throws IOException { - readFrom(in, new Index(in), in.readVInt()); + public ShardRouting(StreamInput in) throws IOException { + this(ShardId.readShardId(in), in); } /** @@ -338,74 +296,38 @@ public final class ShardRouting implements Streamable, ToXContent { * @throws IOException if something happens during write */ public void writeToThin(StreamOutput out) throws IOException { - if (currentNodeId != null) { - out.writeBoolean(true); - out.writeString(currentNodeId); - } else { - out.writeBoolean(false); - } - - if (relocatingNodeId != null) { - out.writeBoolean(true); - out.writeString(relocatingNodeId); - } else { - out.writeBoolean(false); - } - + out.writeOptionalString(currentNodeId); + out.writeOptionalString(relocatingNodeId); out.writeBoolean(primary); out.writeByte(state.value()); - - if (restoreSource != null) { - out.writeBoolean(true); - restoreSource.writeTo(out); - } else { - out.writeBoolean(false); - } - if (unassignedInfo != null) { - out.writeBoolean(true); - unassignedInfo.writeTo(out); - } else { - out.writeBoolean(false); - } - if (allocationId != null) { - out.writeBoolean(true); - allocationId.writeTo(out); - } else { - out.writeBoolean(false); - } - if (relocating() || initializing()) { + out.writeOptionalStreamable(restoreSource); + out.writeOptionalWriteable(unassignedInfo); + out.writeOptionalWriteable(allocationId); + if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING) { out.writeLong(expectedShardSize); } - } @Override public void writeTo(StreamOutput out) throws IOException { - index.writeTo(out); - out.writeVInt(shardId); + shardId.writeTo(out); writeToThin(out); } - public void updateUnassignedInfo(UnassignedInfo unassignedInfo) { - ensureNotFrozen(); + public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo) { assert this.unassignedInfo != null : "can only update unassign info if they are already set"; - this.unassignedInfo = unassignedInfo; + assert this.unassignedInfo.isDelayed() || (unassignedInfo.isDelayed() == false) : "cannot transition from non-delayed to delayed"; + return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, + unassignedInfo, allocationId, expectedShardSize); } - // package private mutators start here - /** * Moves the shard to unassigned state. */ - void moveToUnassigned(UnassignedInfo unassignedInfo) { - ensureNotFrozen(); + public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) { assert state != ShardRoutingState.UNASSIGNED : this; - state = ShardRoutingState.UNASSIGNED; - currentNodeId = null; - relocatingNodeId = null; - this.unassignedInfo = unassignedInfo; - allocationId = null; - expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; + return new ShardRouting(shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, + unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE); } /** @@ -413,18 +335,17 @@ public final class ShardRouting implements Streamable, ToXContent { * * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. */ - void initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) { - ensureNotFrozen(); + public ShardRouting initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize) { assert state == ShardRoutingState.UNASSIGNED : this; assert relocatingNodeId == null : this; - state = ShardRoutingState.INITIALIZING; - currentNodeId = nodeId; + final AllocationId allocationId; if (existingAllocationId == null) { allocationId = AllocationId.newInitializing(); } else { allocationId = AllocationId.newInitializing(existingAllocationId); } - this.expectedShardSize = expectedShardSize; + return new ShardRouting(shardId, nodeId, null, restoreSource, primary, ShardRoutingState.INITIALIZING, + unassignedInfo, allocationId, expectedShardSize); } /** @@ -432,39 +353,45 @@ public final class ShardRouting implements Streamable, ToXContent { * * @param relocatingNodeId id of the node to relocate the shard */ - void relocate(String relocatingNodeId, long expectedShardSize) { - ensureNotFrozen(); + public ShardRouting relocate(String relocatingNodeId, long expectedShardSize) { assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this; - state = ShardRoutingState.RELOCATING; - this.relocatingNodeId = relocatingNodeId; - this.allocationId = AllocationId.newRelocation(allocationId); - this.expectedShardSize = expectedShardSize; + return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, ShardRoutingState.RELOCATING, + null, AllocationId.newRelocation(allocationId), expectedShardSize); } /** * Cancel relocation of a shard. The shards state must be set * to RELOCATING. */ - void cancelRelocation() { - ensureNotFrozen(); + public ShardRouting cancelRelocation() { assert state == ShardRoutingState.RELOCATING : this; assert assignedToNode() : this; assert relocatingNodeId != null : this; - expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; - state = ShardRoutingState.STARTED; - relocatingNodeId = null; - allocationId = AllocationId.cancelRelocation(allocationId); + return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.STARTED, + null, AllocationId.cancelRelocation(allocationId), UNAVAILABLE_EXPECTED_SHARD_SIZE); + } + + /** + * Removes relocation source of a non-primary shard. The shard state must be INITIALIZING. + * This allows the non-primary shard to continue recovery from the primary even though its non-primary + * relocation source has failed. + */ + public ShardRouting removeRelocationSource() { + assert primary == false : this; + assert state == ShardRoutingState.INITIALIZING : this; + assert assignedToNode() : this; + assert relocatingNodeId != null : this; + return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, state, unassignedInfo, + AllocationId.finishRelocation(allocationId), expectedShardSize); } /** * Moves the shard from started to initializing */ - void reinitializeShard() { - ensureNotFrozen(); + public ShardRouting reinitializeShard() { assert state == ShardRoutingState.STARTED; - state = ShardRoutingState.INITIALIZING; - allocationId = AllocationId.newInitializing(); - this.unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null); + return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.INITIALIZING, + new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null), AllocationId.newInitializing(), UNAVAILABLE_EXPECTED_SHARD_SIZE); } /** @@ -472,46 +399,46 @@ public final class ShardRouting implements Streamable, ToXContent { * INITIALIZING or RELOCATING. Any relocation will be * canceled. */ - void moveToStarted() { - ensureNotFrozen(); + public ShardRouting moveToStarted() { assert state == ShardRoutingState.INITIALIZING : "expected an initializing shard " + this; - relocatingNodeId = null; - restoreSource = null; - unassignedInfo = null; // we keep the unassigned data until the shard is started + AllocationId allocationId = this.allocationId; if (allocationId.getRelocationId() != null) { // relocation target allocationId = AllocationId.finishRelocation(allocationId); } - expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; - state = ShardRoutingState.STARTED; + return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.STARTED, null, allocationId, + UNAVAILABLE_EXPECTED_SHARD_SIZE); } /** * Make the shard primary unless it's not Primary - * //TODO: doc exception + * + * @throws IllegalShardRoutingStateException if shard is already a primary */ - void moveToPrimary() { - ensureNotFrozen(); + public ShardRouting moveToPrimary() { if (primary) { throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary"); } - primary = true; + return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, true, state, unassignedInfo, allocationId, + expectedShardSize); } /** * Set the primary shard to non-primary + * + * @throws IllegalShardRoutingStateException if shard is already a replica */ - void moveFromPrimary() { - ensureNotFrozen(); + public ShardRouting moveFromPrimary() { if (!primary) { throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica"); } - primary = false; + return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, false, state, unassignedInfo, allocationId, + expectedShardSize); } /** returns true if this routing has the same shardId as another */ public boolean isSameShard(ShardRouting other) { - return index.equals(other.index) && shardId == other.shardId; + return getIndexName().equals(other.getIndexName()) && id() == other.id(); } /** @@ -592,15 +519,12 @@ public final class ShardRouting implements Streamable, ToXContent { if (primary != other.primary) { return false; } - if (shardId != other.shardId) { + if (shardId != null ? !shardId.equals(other.shardId) : other.shardId != null) { return false; } if (currentNodeId != null ? !currentNodeId.equals(other.currentNodeId) : other.currentNodeId != null) { return false; } - if (index != null ? !index.equals(other.index) : other.index != null) { - return false; - } if (relocatingNodeId != null ? !relocatingNodeId.equals(other.relocatingNodeId) : other.relocatingNodeId != null) { return false; } @@ -631,27 +555,27 @@ public final class ShardRouting implements Streamable, ToXContent { return equalsIgnoringMetaData(that); } - private boolean usePreComputedHashCode = false; - private int hashCode = 0; + /** + * Cache hash code in same same way as {@link String#hashCode()}) using racy single-check idiom + * as it is mainly used in single-threaded code ({@link BalancedShardsAllocator}). + */ + private int hashCode; // default to 0 @Override public int hashCode() { - if (frozen && usePreComputedHashCode) { - return hashCode; + int h = hashCode; + if (h == 0) { + h = shardId.hashCode(); + h = 31 * h + (currentNodeId != null ? currentNodeId.hashCode() : 0); + h = 31 * h + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0); + h = 31 * h + (primary ? 1 : 0); + h = 31 * h + (state != null ? state.hashCode() : 0); + h = 31 * h + (restoreSource != null ? restoreSource.hashCode() : 0); + h = 31 * h + (allocationId != null ? allocationId.hashCode() : 0); + h = 31 * h + (unassignedInfo != null ? unassignedInfo.hashCode() : 0); + hashCode = h; } - int result = index != null ? index.hashCode() : 0; - result = 31 * result + shardId; - result = 31 * result + (currentNodeId != null ? currentNodeId.hashCode() : 0); - result = 31 * result + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0); - result = 31 * result + (primary ? 1 : 0); - result = 31 * result + (state != null ? state.hashCode() : 0); - result = 31 * result + (restoreSource != null ? restoreSource.hashCode() : 0); - result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0); - result = 31 * result + (unassignedInfo != null ? unassignedInfo.hashCode() : 0); - if (frozen) { - usePreComputedHashCode = true; - } - return hashCode = result; + return h; } @Override @@ -664,7 +588,7 @@ public final class ShardRouting implements Streamable, ToXContent { */ public String shortSummary() { StringBuilder sb = new StringBuilder(); - sb.append('[').append(index).append(']').append('[').append(shardId).append(']'); + sb.append('[').append(shardId.getIndexName()).append(']').append('[').append(shardId.getId()).append(']'); sb.append(", node[").append(currentNodeId).append("], "); if (relocatingNodeId != null) { sb.append("relocating [").append(relocatingNodeId).append("], "); @@ -697,8 +621,8 @@ public final class ShardRouting implements Streamable, ToXContent { .field("primary", primary()) .field("node", currentNodeId()) .field("relocating_node", relocatingNodeId()) - .field("shard", shardId().id()) - .field("index", shardId().getIndex().getName()); + .field("shard", id()) + .field("index", getIndexName()); if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) { builder.field("expected_shard_size_in_bytes", expectedShardSize); } @@ -716,20 +640,6 @@ public final class ShardRouting implements Streamable, ToXContent { return builder.endObject(); } - private void ensureNotFrozen() { - if (frozen) { - throw new IllegalStateException("ShardRouting can't be modified anymore - already frozen"); - } - } - - void freeze() { - frozen = true; - } - - boolean isFrozen() { - return frozen; - } - /** * Returns the expected shard size for {@link ShardRoutingState#RELOCATING} and {@link ShardRoutingState#INITIALIZING} * shards. If it's size is not available {@value #UNAVAILABLE_EXPECTED_SHARD_SIZE} will be returned. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 92f42e2bc4f..c66c4e0bd3d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -40,15 +40,13 @@ import java.io.IOException; /** * Holds additional information as to why the shard is in unassigned state. */ -public class UnassignedInfo implements ToXContent, Writeable { +public final class UnassignedInfo implements ToXContent, Writeable { public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime"); - private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1); public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = - Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic, + Setting.timeSetting("index.unassigned.node_left.delayed_timeout", TimeValue.timeValueMinutes(1), Property.Dynamic, Property.IndexScope); - /** * Reason why the shard is in unassigned state. *

    @@ -103,24 +101,29 @@ public class UnassignedInfo implements ToXContent, Writeable { /** * A better replica location is identified and causes the existing replica allocation to be cancelled. */ - REALLOCATED_REPLICA; + REALLOCATED_REPLICA, + /** + * Unassigned as a result of a failed primary while the replica was initializing. + */ + PRIMARY_FAILED; } private final Reason reason; private final long unassignedTimeMillis; // used for display and log messages, in milliseconds private final long unassignedTimeNanos; // in nanoseconds, used to calculate delay for delayed shard allocation - private volatile long lastComputedLeftDelayNanos = 0L; // how long to delay shard allocation, not serialized (always positive, 0 means no delay) + private final boolean delayed; // if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING private final String message; - private final Throwable failure; + private final Exception failure; + private final int failedAllocations; /** - * creates an UnassingedInfo object based **current** time + * creates an UnassignedInfo object based on **current** time * * @param reason the cause for making this shard unassigned. See {@link Reason} for more information. * @param message more information about cause. **/ public UnassignedInfo(Reason reason, String message) { - this(reason, message, null, System.nanoTime(), System.currentTimeMillis()); + this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis(), false); } /** @@ -129,38 +132,63 @@ public class UnassignedInfo implements ToXContent, Writeable { * @param failure the shard level failure that caused this shard to be unassigned, if exists. * @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation * @param unassignedTimeMillis the time of unassignment used to display to in our reporting. + * @param delayed if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING. */ - public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Throwable failure, long unassignedTimeNanos, long unassignedTimeMillis) { + public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Exception failure, int failedAllocations, + long unassignedTimeNanos, long unassignedTimeMillis, boolean delayed) { this.reason = reason; this.unassignedTimeMillis = unassignedTimeMillis; this.unassignedTimeNanos = unassignedTimeNanos; + this.delayed = delayed; this.message = message; this.failure = failure; + this.failedAllocations = failedAllocations; + assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED) : + "failedAllocations: " + failedAllocations + " for reason " + reason; assert !(message == null && failure != null) : "provide a message if a failure exception is provided"; + assert !(delayed && reason != Reason.NODE_LEFT) : "shard can only be delayed if it is unassigned due to a node leaving"; } public UnassignedInfo(StreamInput in) throws IOException { this.reason = Reason.values()[(int) in.readByte()]; this.unassignedTimeMillis = in.readLong(); // As System.nanoTime() cannot be compared across different JVMs, reset it to now. - // This means that in master failover situations, elapsed delay time is forgotten. + // This means that in master fail-over situations, elapsed delay time is forgotten. this.unassignedTimeNanos = System.nanoTime(); + this.delayed = in.readBoolean(); this.message = in.readOptionalString(); - this.failure = in.readThrowable(); + this.failure = in.readException(); + this.failedAllocations = in.readVInt(); } public void writeTo(StreamOutput out) throws IOException { out.writeByte((byte) reason.ordinal()); out.writeLong(unassignedTimeMillis); // Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs + out.writeBoolean(delayed); out.writeOptionalString(message); - out.writeThrowable(failure); + out.writeException(failure); + out.writeVInt(failedAllocations); } public UnassignedInfo readFrom(StreamInput in) throws IOException { return new UnassignedInfo(in); } + /** + * Returns the number of previously failed allocations of this shard. + */ + public int getNumFailedAllocations() { + return failedAllocations; + } + + /** + * Returns true if allocation of this shard is delayed due to {@link #INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING} + */ + public boolean isDelayed() { + return delayed; + } + /** * The reason why the shard is unassigned. */ @@ -197,7 +225,7 @@ public class UnassignedInfo implements ToXContent, Writeable { * Returns additional failure exception details if exists. */ @Nullable - public Throwable getFailure() { + public Exception getFailure() { return failure; } @@ -213,48 +241,16 @@ public class UnassignedInfo implements ToXContent, Writeable { } /** - * The allocation delay value in nano seconds associated with the index (defaulting to node settings if not set). - */ - public long getAllocationDelayTimeoutSettingNanos(Settings settings, Settings indexSettings) { - if (reason != Reason.NODE_LEFT) { - return 0; - } - TimeValue delayTimeout = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings, settings); - return Math.max(0L, delayTimeout.nanos()); - } - - /** - * The delay in nanoseconds until this unassigned shard can be reassigned. This value is cached and might be slightly out-of-date. - * See also the {@link #updateDelay(long, Settings, Settings)} method. - */ - public long getLastComputedLeftDelayNanos() { - return lastComputedLeftDelayNanos; - } - - /** - * Calculates the delay left based on current time (in nanoseconds) and index/node settings. + * Calculates the delay left based on current time (in nanoseconds) and the delay defined by the index settings. + * Only relevant if shard is effectively delayed (see {@link #isDelayed()}) + * Returns 0 if delay is negative * * @return calculated delay in nanoseconds */ - public long getRemainingDelay(final long nanoTimeNow, final Settings settings, final Settings indexSettings) { - final long delayTimeoutNanos = getAllocationDelayTimeoutSettingNanos(settings, indexSettings); - if (delayTimeoutNanos == 0L) { - return 0L; - } else { - assert nanoTimeNow >= unassignedTimeNanos; - return Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos)); - } - } - - /** - * Updates delay left based on current time (in nanoseconds) and index/node settings. - * - * @return updated delay in nanoseconds - */ - public long updateDelay(final long nanoTimeNow, final Settings settings, final Settings indexSettings) { - final long newComputedLeftDelayNanos = getRemainingDelay(nanoTimeNow, settings, indexSettings); - lastComputedLeftDelayNanos = newComputedLeftDelayNanos; - return newComputedLeftDelayNanos; + public long getRemainingDelay(final long nanoTimeNow, final Settings indexSettings) { + long delayTimeoutNanos = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings).nanos(); + assert nanoTimeNow >= unassignedTimeNanos; + return Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos)); } /** @@ -263,56 +259,46 @@ public class UnassignedInfo implements ToXContent, Writeable { public static int getNumberOfDelayedUnassigned(ClusterState state) { int count = 0; for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) { - if (shard.primary() == false) { - long delay = shard.unassignedInfo().getLastComputedLeftDelayNanos(); - if (delay > 0) { - count++; - } + if (shard.unassignedInfo().isDelayed()) { + count++; } } return count; } /** - * Finds the smallest delay expiration setting in nanos of all unassigned shards that are still delayed. Returns 0 if there are none. + * Finds the next (closest) delay expiration of an delayed shard in nanoseconds based on current time. + * Returns 0 if delay is negative. + * Returns -1 if no delayed shard is found. */ - public static long findSmallestDelayedAllocationSettingNanos(Settings settings, ClusterState state) { - long minDelaySetting = Long.MAX_VALUE; - for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) { - if (shard.primary() == false) { - IndexMetaData indexMetaData = state.metaData().index(shard.getIndexName()); - boolean delayed = shard.unassignedInfo().getLastComputedLeftDelayNanos() > 0; - long delayTimeoutSetting = shard.unassignedInfo().getAllocationDelayTimeoutSettingNanos(settings, indexMetaData.getSettings()); - if (delayed && delayTimeoutSetting > 0 && delayTimeoutSetting < minDelaySetting) { - minDelaySetting = delayTimeoutSetting; + public static long findNextDelayedAllocation(long currentNanoTime, ClusterState state) { + MetaData metaData = state.metaData(); + RoutingTable routingTable = state.routingTable(); + long nextDelayNanos = Long.MAX_VALUE; + for (ShardRouting shard : routingTable.shardsWithState(ShardRoutingState.UNASSIGNED)) { + UnassignedInfo unassignedInfo = shard.unassignedInfo(); + if (unassignedInfo.isDelayed()) { + Settings indexSettings = metaData.index(shard.index()).getSettings(); + // calculate next time to schedule + final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(currentNanoTime, indexSettings); + if (newComputedLeftDelayNanos < nextDelayNanos) { + nextDelayNanos = newComputedLeftDelayNanos; } } } - return minDelaySetting == Long.MAX_VALUE ? 0L : minDelaySetting; - } - - - /** - * Finds the next (closest) delay expiration of an unassigned shard in nanoseconds. Returns 0 if there are none. - */ - public static long findNextDelayedAllocationIn(ClusterState state) { - long nextDelay = Long.MAX_VALUE; - for (ShardRouting shard : state.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED)) { - if (shard.primary() == false) { - long nextShardDelay = shard.unassignedInfo().getLastComputedLeftDelayNanos(); - if (nextShardDelay > 0 && nextShardDelay < nextDelay) { - nextDelay = nextShardDelay; - } - } - } - return nextDelay == Long.MAX_VALUE ? 0L : nextDelay; + return nextDelayNanos == Long.MAX_VALUE ? -1L : nextDelayNanos; } public String shortSummary() { StringBuilder sb = new StringBuilder(); sb.append("[reason=").append(reason).append("]"); sb.append(", at[").append(DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis)).append("]"); + if (failedAllocations > 0) { + sb.append(", failed_attempts[").append(failedAllocations).append("]"); + } + sb.append(", delayed=").append(delayed); String details = getDetails(); + if (details != null) { sb.append(", details[").append(details).append("]"); } @@ -329,6 +315,10 @@ public class UnassignedInfo implements ToXContent, Writeable { builder.startObject("unassigned_info"); builder.field("reason", reason); builder.field("at", DATE_TIME_FORMATTER.printer().print(unassignedTimeMillis)); + if (failedAllocations > 0) { + builder.field("failed_attempts", failedAllocations); + } + builder.field("delayed", delayed); String details = getDetails(); if (details != null) { builder.field("details", details); @@ -351,6 +341,12 @@ public class UnassignedInfo implements ToXContent, Writeable { if (unassignedTimeMillis != that.unassignedTimeMillis) { return false; } + if (delayed != that.delayed) { + return false; + } + if (failedAllocations != that.failedAllocations) { + return false; + } if (reason != that.reason) { return false; } @@ -358,12 +354,13 @@ public class UnassignedInfo implements ToXContent, Writeable { return false; } return !(failure != null ? !failure.equals(that.failure) : that.failure != null); - } @Override public int hashCode() { int result = reason != null ? reason.hashCode() : 0; + result = 31 * result + Boolean.hashCode(delayed); + result = 31 * result + Integer.hashCode(failedAllocations); result = 31 * result + Long.hashCode(unassignedTimeMillis); result = 31 * result + (message != null ? message.hashCode() : 0); result = 31 * result + (failure != null ? failure.hashCode() : 0); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index ed6de16c11f..7a91c694399 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -19,14 +19,13 @@ package org.elasticsearch.cluster.routing.allocation; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -40,6 +39,7 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.index.shard.ShardId; @@ -47,12 +47,15 @@ import org.elasticsearch.index.shard.ShardId; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; +import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; + /** * This service manages the node allocation of a cluster. For this reason the @@ -66,6 +69,7 @@ public class AllocationService extends AbstractComponent { private final GatewayAllocator gatewayAllocator; private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; + private final ClusterName clusterName; @Inject public AllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator, @@ -75,6 +79,7 @@ public class AllocationService extends AbstractComponent { this.gatewayAllocator = gatewayAllocator; this.shardsAllocator = shardsAllocator; this.clusterInfoService = clusterInfoService; + clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); } /** @@ -90,8 +95,8 @@ public class AllocationService extends AbstractComponent { RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); - StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo()); - boolean changed = applyStartedShards(routingNodes, startedShards); + StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState, startedShards, clusterInfoService.getClusterInfo(), currentNanoTime()); + boolean changed = applyStartedShards(allocation, startedShards); if (!changed) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } @@ -99,28 +104,29 @@ public class AllocationService extends AbstractComponent { if (withReroute) { reroute(allocation); } - final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes); - String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString()); - logClusterHealthStateChange( - new ClusterStateHealth(clusterState), - new ClusterStateHealth(clusterState.metaData(), result.routingTable()), - "shards started [" + startedShardsAsString + "] ..." - ); - return result; + return buildResultAndLogHealthChange(allocation, "shards started [" + startedShardsAsString + "] ..."); + } + + protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason) { + return buildResultAndLogHealthChange(allocation, reason, new RoutingExplanations()); } - protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes) { - return buildChangedResult(oldMetaData, oldRoutingTable, newRoutingNodes, new RoutingExplanations()); - - } - - protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes, - RoutingExplanations explanations) { - final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(newRoutingNodes).build(); + protected RoutingAllocation.Result buildResultAndLogHealthChange(RoutingAllocation allocation, String reason, RoutingExplanations explanations) { + MetaData oldMetaData = allocation.metaData(); + RoutingTable oldRoutingTable = allocation.routingTable(); + RoutingNodes newRoutingNodes = allocation.routingNodes(); + final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(oldRoutingTable.version(), newRoutingNodes).build(); MetaData newMetaData = updateMetaDataWithRoutingTable(oldMetaData, oldRoutingTable, newRoutingTable); assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata + logClusterHealthStateChange( + new ClusterStateHealth(ClusterState.builder(clusterName). + metaData(allocation.metaData()).routingTable(allocation.routingTable()).build()), + new ClusterStateHealth(ClusterState.builder(clusterName). + metaData(newMetaData).routingTable(newRoutingTable).build()), + reason + ); return new RoutingAllocation.Result(true, newRoutingTable, newMetaData, explanations); } @@ -216,28 +222,48 @@ public class AllocationService extends AbstractComponent { RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); - FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), failedShards, clusterInfoService.getClusterInfo()); + long currentNanoTime = currentNanoTime(); + FailedRerouteAllocation allocation = new FailedRerouteAllocation(allocationDeciders, routingNodes, clusterState, failedShards, clusterInfoService.getClusterInfo(), currentNanoTime); boolean changed = false; // as failing primaries also fail associated replicas, we fail replicas first here so that their nodes are added to ignore list List orderedFailedShards = new ArrayList<>(failedShards); orderedFailedShards.sort(Comparator.comparing(failedShard -> failedShard.shard.primary())); for (FailedRerouteAllocation.FailedShard failedShard : orderedFailedShards) { + UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo(); + final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0; changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure, - System.nanoTime(), System.currentTimeMillis())); + failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false)); } if (!changed) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } gatewayAllocator.applyFailedShards(allocation); reroute(allocation); - final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes); String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString()); - logClusterHealthStateChange( - new ClusterStateHealth(clusterState), - new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()), - "shards failed [" + failedShardsAsString + "] ..." - ); - return result; + return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ..."); + } + + /** + * Removes delay markers from unassigned shards based on current time stamp. Returns true if markers were removed. + */ + private boolean removeDelayMarkers(RoutingAllocation allocation) { + final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator(); + final MetaData metaData = allocation.metaData(); + boolean changed = false; + while (unassignedIterator.hasNext()) { + ShardRouting shardRouting = unassignedIterator.next(); + UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + if (unassignedInfo.isDelayed()) { + final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(allocation.getCurrentNanoTime(), + metaData.getIndexSafe(shardRouting.index()).getSettings()); + if (newComputedLeftDelayNanos == 0) { + changed = true; + unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), unassignedInfo.getFailure(), + unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), false)); + } + } + } + return changed; } /** @@ -257,16 +283,13 @@ public class AllocationService extends AbstractComponent { .collect(Collectors.joining(", ")); } - public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) { - return reroute(clusterState, commands, false); - } - - public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) { + public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain, boolean retryFailed) { RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // we don't shuffle the unassigned shards here, to try and get as close as possible to // a consistent result of the effect the commands have on the routing // this allows systems to dry run the commands, see the resulting cluster state, and act on it - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime()); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, + clusterInfoService.getClusterInfo(), currentNanoTime(), retryFailed); // don't short circuit deciders, we want a full explanation allocation.debugDecision(true); // we ignore disable allocation, because commands are explicit @@ -277,13 +300,7 @@ public class AllocationService extends AbstractComponent { // the assumption is that commands will move / act on shards (or fail through exceptions) // so, there will always be shard "movements", so no need to check on reroute reroute(allocation); - RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes, explanations); - logClusterHealthStateChange( - new ClusterStateHealth(clusterState), - new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()), - "reroute commands" - ); - return result; + return buildResultAndLogHealthChange(allocation, "reroute commands", explanations); } @@ -305,18 +322,13 @@ public class AllocationService extends AbstractComponent { RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); // shuffle the unassigned nodes, just so we won't have things like poison failed shards routingNodes.unassigned().shuffle(); - RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime()); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, + clusterInfoService.getClusterInfo(), currentNanoTime(), false); allocation.debugDecision(debug); if (!reroute(allocation)) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } - RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes); - logClusterHealthStateChange( - new ClusterStateHealth(clusterState), - new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()), - reason - ); - return result; + return buildResultAndLogHealthChange(allocation, reason); } private void logClusterHealthStateChange(ClusterStateHealth previousStateHealth, ClusterStateHealth newStateHealth, String reason) { @@ -332,17 +344,13 @@ public class AllocationService extends AbstractComponent { // first, clear from the shards any node id they used to belong to that is now dead changed |= deassociateDeadNodes(allocation); - // create a sorted list of from nodes with least number of shards to the maximum ones - applyNewNodes(allocation); - // elect primaries *before* allocating unassigned, so backups of primaries that failed // will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*) changed |= electPrimariesAndUnassignedDanglingReplicas(allocation); // now allocate all the unassigned to available nodes if (allocation.routingNodes().unassigned().size() > 0) { - updateLeftDelayOfUnassignedShards(allocation, settings); - + changed |= removeDelayMarkers(allocation); changed |= gatewayAllocator.allocateUnassigned(allocation); } @@ -351,15 +359,6 @@ public class AllocationService extends AbstractComponent { return changed; } - // public for testing - public static void updateLeftDelayOfUnassignedShards(RoutingAllocation allocation, Settings settings) { - for (ShardRouting shardRouting : allocation.routingNodes().unassigned()) { - final MetaData metaData = allocation.metaData(); - final IndexMetaData indexMetaData = metaData.getIndexSafe(shardRouting.index()); - shardRouting.unassignedInfo().updateDelay(allocation.getCurrentNanoTime(), settings, indexMetaData.getSettings()); - } - } - private boolean electPrimariesAndUnassignedDanglingReplicas(RoutingAllocation allocation) { boolean changed = false; final RoutingNodes routingNodes = allocation.routingNodes(); @@ -369,29 +368,32 @@ public class AllocationService extends AbstractComponent { } // now, go over and elect a new primary if possible, not, from this code block on, if one is elected, // routingNodes.hasUnassignedPrimaries() will potentially be false - for (ShardRouting shardEntry : routingNodes.unassigned()) { + final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); + while (unassignedIterator.hasNext()) { + ShardRouting shardEntry = unassignedIterator.next(); if (shardEntry.primary()) { // remove dangling replicas that are initializing for primary shards changed |= failReplicasForUnassignedPrimary(allocation, shardEntry); - ShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry); + ShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry.shardId()); if (candidate != null) { - routingNodes.swapPrimaryFlag(shardEntry, candidate); - if (candidate.relocatingNodeId() != null) { + shardEntry = unassignedIterator.demotePrimaryToReplicaShard(); + ShardRouting primarySwappedCandidate = routingNodes.promoteAssignedReplicaShardToPrimary(candidate); + if (primarySwappedCandidate.relocatingNodeId() != null) { changed = true; // its also relocating, make sure to move the other routing to primary - RoutingNode node = routingNodes.node(candidate.relocatingNodeId()); + RoutingNode node = routingNodes.node(primarySwappedCandidate.relocatingNodeId()); if (node != null) { for (ShardRouting shardRouting : node) { - if (shardRouting.shardId().equals(candidate.shardId()) && !shardRouting.primary()) { - routingNodes.swapPrimaryFlag(shardRouting); + if (shardRouting.shardId().equals(primarySwappedCandidate.shardId()) && !shardRouting.primary()) { + routingNodes.promoteAssignedReplicaShardToPrimary(shardRouting); break; } } } } - IndexMetaData index = allocation.metaData().getIndexSafe(candidate.index()); + IndexMetaData index = allocation.metaData().getIndexSafe(primarySwappedCandidate.index()); if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) { - routingNodes.reinitShadowPrimary(candidate); + routingNodes.reinitShadowPrimary(primarySwappedCandidate); changed = true; } } @@ -401,23 +403,9 @@ public class AllocationService extends AbstractComponent { return changed; } - /** - * Applies the new nodes to the routing nodes and returns them (just the - * new nodes); - */ - private void applyNewNodes(RoutingAllocation allocation) { - final RoutingNodes routingNodes = allocation.routingNodes(); - for (ObjectCursor cursor : allocation.nodes().getDataNodes().values()) { - DiscoveryNode node = cursor.value; - if (!routingNodes.isKnown(node)) { - routingNodes.addNode(node); - } - } - } - private boolean deassociateDeadNodes(RoutingAllocation allocation) { boolean changed = false; - for (RoutingNodes.RoutingNodesIterator it = allocation.routingNodes().nodes(); it.hasNext(); ) { + for (Iterator it = allocation.routingNodes().mutableIterator(); it.hasNext(); ) { RoutingNode node = it.next(); if (allocation.nodes().getDataNodes().containsKey(node.nodeId())) { // its a live node, continue @@ -426,8 +414,10 @@ public class AllocationService extends AbstractComponent { changed = true; // now, go over all the shards routing on the node, and fail them for (ShardRouting shardRouting : node.copyShards()) { - UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", null, - allocation.getCurrentNanoTime(), System.currentTimeMillis()); + final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); + boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0; + UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", + null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed); applyFailedShard(allocation, shardRouting, false, unassignedInfo); } // its a dead node, remove it, note, its important to remove it *after* we apply failed shard @@ -439,7 +429,7 @@ public class AllocationService extends AbstractComponent { private boolean failReplicasForUnassignedPrimary(RoutingAllocation allocation, ShardRouting primary) { List replicas = new ArrayList<>(); - for (ShardRouting routing : allocation.routingNodes().assignedShards(primary)) { + for (ShardRouting routing : allocation.routingNodes().assignedShards(primary.shardId())) { if (!routing.primary() && routing.initializing()) { replicas.add(routing); } @@ -447,61 +437,52 @@ public class AllocationService extends AbstractComponent { boolean changed = false; for (ShardRouting routing : replicas) { changed |= applyFailedShard(allocation, routing, false, - new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing", - null, allocation.getCurrentNanoTime(), System.currentTimeMillis())); + new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, "primary failed while replica initializing", + null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false)); } return changed; } - private boolean applyStartedShards(RoutingNodes routingNodes, Iterable startedShardEntries) { + private boolean applyStartedShards(RoutingAllocation routingAllocation, Iterable startedShardEntries) { boolean dirty = false; // apply shards might be called several times with the same shard, ignore it + RoutingNodes routingNodes = routingAllocation.routingNodes(); for (ShardRouting startedShard : startedShardEntries) { assert startedShard.initializing(); // validate index still exists. strictly speaking this is not needed but it gives clearer logs - if (routingNodes.routingTable().index(startedShard.index()) == null) { + if (routingAllocation.metaData().index(startedShard.index()) == null) { logger.debug("{} ignoring shard started, unknown index (routing: {})", startedShard.shardId(), startedShard); continue; } - - RoutingNodes.RoutingNodeIterator currentRoutingNode = routingNodes.routingNodeIter(startedShard.currentNodeId()); + RoutingNode currentRoutingNode = routingNodes.node(startedShard.currentNodeId()); if (currentRoutingNode == null) { logger.debug("{} failed to find shard in order to start it [failed to find node], ignoring (routing: {})", startedShard.shardId(), startedShard); continue; } - for (ShardRouting shard : currentRoutingNode) { - if (shard.isSameAllocation(startedShard)) { - if (shard.active()) { - logger.trace("{} shard is already started, ignoring (routing: {})", startedShard.shardId(), startedShard); - } else { - dirty = true; - // override started shard with the latest copy. Capture it now , before starting the shard destroys it... - startedShard = new ShardRouting(shard); - routingNodes.started(shard); - logger.trace("{} marked shard as started (routing: {})", startedShard.shardId(), startedShard); - } - break; - } - } + ShardRouting matchingShard = currentRoutingNode.getByShardId(startedShard.shardId()); + if (matchingShard == null) { + logger.debug("{} failed to find shard in order to start it [failed to find shard], ignoring (routing: {})", startedShard.shardId(), startedShard); + } else if (matchingShard.isSameAllocation(startedShard) == false) { + logger.debug("{} failed to find shard with matching allocation id in order to start it [failed to find matching shard], ignoring (routing: {}, matched shard routing: {})", startedShard.shardId(), startedShard, matchingShard); + } else { + startedShard = matchingShard; + if (startedShard.active()) { + logger.trace("{} shard is already started, ignoring (routing: {})", startedShard.shardId(), startedShard); + } else { + assert startedShard.initializing(); + dirty = true; + routingNodes.started(startedShard); + logger.trace("{} marked shard as started (routing: {})", startedShard.shardId(), startedShard); - // startedShard is the current state of the shard (post relocation for example) - // this means that after relocation, the state will be started and the currentNodeId will be - // the node we relocated to - if (startedShard.relocatingNodeId() == null) { - continue; - } - - RoutingNodes.RoutingNodeIterator sourceRoutingNode = routingNodes.routingNodeIter(startedShard.relocatingNodeId()); - if (sourceRoutingNode != null) { - while (sourceRoutingNode.hasNext()) { - ShardRouting shard = sourceRoutingNode.next(); - if (shard.isRelocationSourceOf(startedShard)) { - dirty = true; - sourceRoutingNode.remove(); - break; + if (startedShard.relocatingNodeId() != null) { + // relocation target has been started, remove relocation source + RoutingNode relocationSourceNode = routingNodes.node(startedShard.relocatingNodeId()); + ShardRouting relocationSourceShard = relocationSourceNode.getByShardId(startedShard.shardId()); + assert relocationSourceShard.isRelocationSourceOf(startedShard); + routingNodes.remove(relocationSourceShard); } } } @@ -521,84 +502,75 @@ public class AllocationService extends AbstractComponent { } RoutingNodes routingNodes = allocation.routingNodes(); - RoutingNodes.RoutingNodeIterator matchedNode = routingNodes.routingNodeIter(failedShard.currentNodeId()); + RoutingNode matchedNode = routingNodes.node(failedShard.currentNodeId()); if (matchedNode == null) { logger.debug("{} ignoring shard failure, unknown node in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); return false; } - boolean matchedShard = false; - while (matchedNode.hasNext()) { - ShardRouting routing = matchedNode.next(); - if (routing.isSameAllocation(failedShard)) { - matchedShard = true; - logger.debug("{} failed shard {} found in routingNodes, failing it ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); - break; - } - } - - if (matchedShard == false) { + ShardRouting matchedShard = matchedNode.getByShardId(failedShard.shardId()); + if (matchedShard != null && matchedShard.isSameAllocation(failedShard)) { + logger.debug("{} failed shard {} found in routingNodes, failing it ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); + // replace incoming instance to make sure we work on the latest one + failedShard = matchedShard; + } else { logger.debug("{} ignoring shard failure, unknown allocation id in {} ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); return false; } + if (failedShard.primary()) { // fail replicas first otherwise we move RoutingNodes into an inconsistent state failReplicasForUnassignedPrimary(allocation, failedShard); } - // replace incoming instance to make sure we work on the latest one. Copy it to maintain information during modifications. - failedShard = new ShardRouting(matchedNode.current()); - - // remove the current copy of the shard - matchedNode.remove(); if (addToIgnoreList) { // make sure we ignore this shard on the relevant node allocation.addIgnoreShardForNode(failedShard.shardId(), failedShard.currentNodeId()); } - if (failedShard.relocatingNodeId() != null && failedShard.initializing()) { - // The shard is a target of a relocating shard. In that case we only - // need to remove the target shard and cancel the source relocation. - // No shard is left unassigned - logger.trace("{} is a relocation target, resolving source to cancel relocation ({})", failedShard, unassignedInfo.shortSummary()); - RoutingNode relocatingFromNode = routingNodes.node(failedShard.relocatingNodeId()); - if (relocatingFromNode != null) { - for (ShardRouting shardRouting : relocatingFromNode) { - if (shardRouting.isRelocationSourceOf(failedShard)) { - logger.trace("{}, resolved source to [{}]. canceling relocation ... ({})", failedShard.shardId(), shardRouting, unassignedInfo.shortSummary()); - routingNodes.cancelRelocation(shardRouting); - break; - } - } - } - } else { - // The fail shard is the main copy of the current shard routing. Any - // relocation will be cancelled (and the target shard removed as well) - // and the shard copy needs to be marked as unassigned - - if (failedShard.relocatingNodeId() != null) { - // handle relocation source shards. we need to find the target initializing shard that is recovering, and remove it... - assert failedShard.initializing() == false; // should have been dealt with and returned - assert failedShard.relocating(); - - RoutingNodes.RoutingNodeIterator initializingNode = routingNodes.routingNodeIter(failedShard.relocatingNodeId()); - if (initializingNode != null) { - while (initializingNode.hasNext()) { - ShardRouting shardRouting = initializingNode.next(); - if (shardRouting.isRelocationTargetOf(failedShard)) { - logger.trace("{} is removed due to the failure of the source shard", shardRouting); - initializingNode.remove(); - } - } - } - } - - matchedNode.moveToUnassigned(unassignedInfo); - } - assert matchedNode.isRemoved() : "failedShard " + failedShard + " was matched but wasn't removed"; + cancelShard(logger, failedShard, unassignedInfo, routingNodes); + assert matchedNode.getByShardId(failedShard.shardId()) == null : "failedShard " + failedShard + " was matched but wasn't removed"; return true; } + public static void cancelShard(ESLogger logger, ShardRouting cancelledShard, UnassignedInfo unassignedInfo, RoutingNodes routingNodes) { + if (cancelledShard.relocatingNodeId() == null) { + routingNodes.moveToUnassigned(cancelledShard, unassignedInfo); + } else { + if (cancelledShard.initializing()) { + // The shard is a target of a relocating shard. In that case we only + // need to remove the target shard and cancel the source relocation. + // No shard is left unassigned + logger.trace("{} is a relocation target, resolving source to cancel relocation ({})", cancelledShard, unassignedInfo.shortSummary()); + RoutingNode sourceNode = routingNodes.node(cancelledShard.relocatingNodeId()); + ShardRouting sourceShard = sourceNode.getByShardId(cancelledShard.shardId()); + assert sourceShard.isRelocationSourceOf(cancelledShard); + logger.trace("{}, resolved source to [{}]. canceling relocation ... ({})", cancelledShard.shardId(), sourceShard, unassignedInfo.shortSummary()); + routingNodes.cancelRelocation(sourceShard); + routingNodes.remove(cancelledShard); + } else { + assert cancelledShard.relocating(); + // The cancelled shard is the main copy of the current shard routing. + // now, find the shard that is initializing on the target node + RoutingNode targetNode = routingNodes.node(cancelledShard.relocatingNodeId()); + ShardRouting targetShard = targetNode.getByShardId(cancelledShard.shardId()); + assert targetShard.isRelocationTargetOf(cancelledShard); + if (cancelledShard.primary()) { + logger.trace("{} is removed due to the failure/cancellation of the source shard", targetShard); + // cancel and remove target shard + routingNodes.remove(targetShard); + routingNodes.moveToUnassigned(cancelledShard, unassignedInfo); + } else { + logger.trace("{}, relocation source failed / cancelled, mark as initializing without relocation source", targetShard); + // promote to initializing shard without relocation source and ensure that removed relocation source + // is not added back as unassigned shard + routingNodes.removeRelocationSource(targetShard); + routingNodes.remove(cancelledShard); + } + } + } + } + private RoutingNodes getMutableRoutingNodes(ClusterState clusterState) { RoutingNodes routingNodes = new RoutingNodes(clusterState, false); // this is a costly operation - only call this once! return routingNodes; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java index 7232e15f033..c3a397a785b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/FailedRerouteAllocation.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; @@ -41,9 +41,9 @@ public class FailedRerouteAllocation extends RoutingAllocation { public static class FailedShard { public final ShardRouting shard; public final String message; - public final Throwable failure; + public final Exception failure; - public FailedShard(ShardRouting shard, String message, Throwable failure) { + public FailedShard(ShardRouting shard, String message, Exception failure) { this.shard = shard; this.message = message; this.failure = failure; @@ -57,8 +57,8 @@ public class FailedRerouteAllocation extends RoutingAllocation { private final List failedShards; - public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List failedShards, ClusterInfo clusterInfo) { - super(deciders, routingNodes, nodes, clusterInfo, System.nanoTime()); + public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List failedShards, ClusterInfo clusterInfo, long currentNanoTime) { + super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false); this.failedShards = failedShards; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index 536806c0830..f58ff54fc14 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -20,12 +20,14 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.shard.ShardId; import java.util.HashMap; @@ -54,7 +56,7 @@ public class RoutingAllocation { private final MetaData metaData; - private RoutingExplanations explanations = new RoutingExplanations(); + private final RoutingExplanations explanations; /** * Creates a new {@link RoutingAllocation.Result} @@ -63,9 +65,7 @@ public class RoutingAllocation { * @param metaData the {@link MetaData} this Result references */ public Result(boolean changed, RoutingTable routingTable, MetaData metaData) { - this.changed = changed; - this.routingTable = routingTable; - this.metaData = metaData; + this(changed, routingTable, metaData, new RoutingExplanations()); } /** @@ -118,8 +118,14 @@ public class RoutingAllocation { private final RoutingNodes routingNodes; + private final MetaData metaData; + + private final RoutingTable routingTable; + private final DiscoveryNodes nodes; + private final ImmutableOpenMap customs; + private final AllocationExplanation explanation = new AllocationExplanation(); private final ClusterInfo clusterInfo; @@ -128,6 +134,8 @@ public class RoutingAllocation { private boolean ignoreDisable = false; + private final boolean retryFailed; + private boolean debugDecision = false; private boolean hasPendingAsyncFetch = false; @@ -139,15 +147,19 @@ public class RoutingAllocation { * Creates a new {@link RoutingAllocation} * @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations * @param routingNodes Routing nodes in the current cluster - * @param nodes TODO: Documentation + * @param clusterState cluster state before rerouting * @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()}) */ - public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, ClusterInfo clusterInfo, long currentNanoTime) { + public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, ClusterInfo clusterInfo, long currentNanoTime, boolean retryFailed) { this.deciders = deciders; this.routingNodes = routingNodes; - this.nodes = nodes; + this.metaData = clusterState.metaData(); + this.routingTable = clusterState.routingTable(); + this.nodes = clusterState.nodes(); + this.customs = clusterState.customs(); this.clusterInfo = clusterInfo; this.currentNanoTime = currentNanoTime; + this.retryFailed = retryFailed; } /** returns the nano time captured at the beginning of the allocation. used to make sure all time based decisions are aligned */ @@ -168,7 +180,7 @@ public class RoutingAllocation { * @return current routing table */ public RoutingTable routingTable() { - return routingNodes.routingTable(); + return routingTable; } /** @@ -184,7 +196,7 @@ public class RoutingAllocation { * @return Metadata of routing nodes */ public MetaData metaData() { - return routingNodes.metaData(); + return metaData; } /** @@ -199,6 +211,10 @@ public class RoutingAllocation { return clusterInfo; } + public T custom(String key) { + return (T)customs.get(key); + } + /** * Get explanations of current routing * @return explanation of routing @@ -285,4 +301,8 @@ public class RoutingAllocation { public void setHasPendingAsyncFetch() { this.hasPendingAsyncFetch = true; } + + public boolean isRetryFailed() { + return retryFailed; + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java index 00f3944ae03..4d1ac1408a2 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/StartedRerouteAllocation.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.cluster.ClusterInfo; -import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; @@ -35,8 +35,8 @@ public class StartedRerouteAllocation extends RoutingAllocation { private final List startedShards; - public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, List startedShards, ClusterInfo clusterInfo) { - super(deciders, routingNodes, nodes, clusterInfo, System.nanoTime()); + public StartedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState, List startedShards, ClusterInfo clusterInfo, long currentNanoTime) { + super(deciders, routingNodes, clusterState, clusterInfo, currentNanoTime, false); this.startedShards = startedShards; } @@ -47,4 +47,4 @@ public class StartedRerouteAllocation extends RoutingAllocation { public List startedShards() { return startedShards; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 296e4929ec5..046947d64eb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -32,6 +32,8 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; @@ -201,7 +203,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards final float weightIndex = node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index); return theta0 * weightShard + theta1 * weightIndex; } - } /** @@ -224,7 +225,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards this.weight = weight; this.threshold = threshold; this.routingNodes = allocation.routingNodes(); - metaData = routingNodes.metaData(); + this.metaData = allocation.metaData(); avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size(); buildModelFromAssigned(); } @@ -512,28 +513,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards // shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are // offloading the shards. boolean changed = false; - int index = 0; - boolean found = true; final NodeSorter sorter = newNodeSorter(); - while (found) { - found = false; - for (RoutingNode routingNode : allocation.routingNodes()) { - if (index >= routingNode.size()) { - continue; - } - found = true; - ShardRouting shardRouting = routingNode.get(index); - // we can only move started shards... - if (shardRouting.started()) { - final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); - assert sourceNode != null && sourceNode.containsShard(shardRouting); - Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); - if (decision.type() == Decision.Type.NO) { - changed |= moveShard(sorter, shardRouting, sourceNode, routingNode); - } + for (Iterator it = allocation.routingNodes().nodeInterleavedShardIterator(); it.hasNext(); ) { + ShardRouting shardRouting = it.next(); + // we can only move started shards... + if (shardRouting.started()) { + final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); + assert sourceNode != null && sourceNode.containsShard(shardRouting); + RoutingNode routingNode = sourceNode.getRoutingNode(); + Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); + if (decision.type() == Decision.Type.NO) { + changed |= moveShard(sorter, shardRouting, sourceNode, routingNode); } } - index++; } return changed; @@ -560,8 +552,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); if (allocationDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too? sourceNode.removeShard(shardRouting); - ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); - currentNode.addShard(targetRelocatingShard); + Tuple relocatingShards = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); + currentNode.addShard(relocatingShards.v2()); if (logger.isTraceEnabled()) { logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node()); } @@ -729,15 +721,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } assert decision != null && minNode != null || decision == null && minNode == null; if (minNode != null) { - minNode.addShard(shard); + final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); if (decision.type() == Type.YES) { if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); } - routingNodes.initialize(shard, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); + + shard = routingNodes.initialize(shard, minNode.getNodeId(), null, shardSize); + minNode.addShard(shard); changed = true; continue; // don't add to ignoreUnassigned } else { + minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); final RoutingNode node = minNode.getRoutingNode(); if (deciders.canAllocate(node, allocation).type() != Type.YES) { if (logger.isTraceEnabled()) { @@ -810,15 +806,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards if (candidate != null) { /* allocate on the model even if not throttled */ maxNode.removeShard(candidate); - minNode.addShard(candidate); + long shardSize = allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */ if (logger.isTraceEnabled()) { logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(), minNode.getNodeId()); } /* now allocate on the cluster */ - routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); + minNode.addShard(routingNodes.relocate(candidate, minNode.getNodeId(), shardSize).v1()); return true; + } else { + assert decision.type() == Type.THROTTLE; + minNode.addShard(candidate.relocate(minNode.getNodeId(), shardSize)); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java index 50e71c5094a..4ed8c487746 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -37,7 +38,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; import java.util.function.Consumer; +import java.util.function.Function; /** * Abstract base class for allocating an unassigned shard to a node @@ -59,7 +62,7 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom /** * Works around ObjectParser not supporting constructor arguments. */ - protected static abstract class Builder { + protected abstract static class Builder { protected String index; protected int shard = -1; protected String node; @@ -196,17 +199,17 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom * @param routingNodes the routing nodes * @param routingNode the node to initialize it to * @param shardRouting the shard routing that is to be matched in unassigned shards - * @param shardRoutingChanges changes to apply for shard routing in unassigned shards before initialization + * @param unassignedInfo unassigned info to override */ protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode, - ShardRouting shardRouting, @Nullable Consumer shardRoutingChanges) { + ShardRouting shardRouting, @Nullable UnassignedInfo unassignedInfo) { for (RoutingNodes.UnassignedShards.UnassignedIterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) { ShardRouting unassigned = it.next(); if (!unassigned.equalsIgnoringMetaData(shardRouting)) { continue; } - if (shardRoutingChanges != null) { - shardRoutingChanges.accept(unassigned); + if (unassignedInfo != null) { + unassigned = it.updateUnassignedInfo(unassignedInfo); } it.initialize(routingNode.nodeId(), null, allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); return; @@ -226,4 +229,22 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom protected void extraXContent(XContentBuilder builder) throws IOException { } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + AbstractAllocateAllocationCommand other = (AbstractAllocateAllocationCommand) obj; + // Override equals and hashCode for testing + return Objects.equals(index, other.index) && + Objects.equals(shardId, other.shardId) && + Objects.equals(node, other.node); + } + + @Override + public int hashCode() { + // Override equals and hashCode for testing + return Objects.hash(index, shardId, node); + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index 74e3a18efbc..08be17a8e98 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -120,15 +120,16 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation "allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true"); } - initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, - shr -> { - if (shr.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) { - // we need to move the unassigned info back to treat it as if it was index creation - shr.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, - "force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(), - shardRouting.unassignedInfo().getFailure(), System.nanoTime(), System.currentTimeMillis())); - } - }); + UnassignedInfo unassignedInfoToUpdate = null; + if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) { + // we need to move the unassigned info back to treat it as if it was index creation + unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, + "force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(), + shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false); + } + + initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate); + return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders")); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java index b651580ea74..8c47deee66f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java @@ -136,6 +136,4 @@ public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocation initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting); return new RerouteExplanation(this, decision); } - - } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java index 736018531fa..92c1ffa9921 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java @@ -22,13 +22,16 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; /** - * This interface defines the basic methods of commands for allocation + * A command to move shards in some way. + * + * Commands are registered in {@link NetworkModule}. */ public interface AllocationCommand extends NamedWriteable, ToXContent { interface Parser { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index ca0eab6e33b..10ba3f55944 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -20,12 +20,12 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -33,12 +33,13 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Objects; /** * A simple {@link AllocationCommand} composite managing several * {@link AllocationCommand} implementations */ -public class AllocationCommands { +public class AllocationCommands extends ToXContentToBytes { private final List commands = new ArrayList<>(); /** @@ -171,21 +172,31 @@ public class AllocationCommands { return commands; } - /** - * Writes {@link AllocationCommands} to a {@link XContentBuilder} - * - * @param commands {@link AllocationCommands} to write - * @param builder {@link XContentBuilder} to use - * @param params Parameters to use for building - * @throws IOException if something bad happens while building the content - */ - public static void toXContent(AllocationCommands commands, XContentBuilder builder, ToXContent.Params params) throws IOException { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startArray("commands"); - for (AllocationCommand command : commands.commands) { + for (AllocationCommand command : commands) { builder.startObject(); builder.field(command.name(), command); builder.endObject(); } builder.endArray(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + AllocationCommands other = (AllocationCommands) obj; + // Override equals and hashCode for testing + return Objects.equals(commands, other.commands); + } + + @Override + public int hashCode() { + // Override equals and hashCode for testing + return Objects.hashCode(commands); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java index 0013061e8ea..f4dc4fba4b8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/BasePrimaryAllocationCommand.java @@ -71,7 +71,7 @@ public abstract class BasePrimaryAllocationCommand extends AbstractAllocateAlloc return acceptDataLoss; } - protected static abstract class Builder extends AbstractAllocateAllocationCommand.Builder { + protected abstract static class Builder extends AbstractAllocateAllocationCommand.Builder { protected boolean acceptDataLoss; public void setAcceptDataLoss(boolean acceptDataLoss) { @@ -83,4 +83,18 @@ public abstract class BasePrimaryAllocationCommand extends AbstractAllocateAlloc protected void extraXContent(XContentBuilder builder) throws IOException { builder.field(ACCEPT_DATA_LOSS_FIELD, acceptDataLoss); } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + BasePrimaryAllocationCommand other = (BasePrimaryAllocationCommand) obj; + return acceptDataLoss == other.acceptDataLoss; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Boolean.hashCode(acceptDataLoss); + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java index 3b2f0fbd55b..230e7929cc8 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java @@ -20,23 +20,28 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; - -import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; +import java.util.Locale; +import java.util.Objects; /** * A command that cancels relocation, or recovery of a given shard on a node. @@ -119,75 +124,38 @@ public class CancelAllocationCommand implements AllocationCommand { @Override public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) { DiscoveryNode discoNode = allocation.nodes().resolveNode(node); - boolean found = false; - for (RoutingNodes.RoutingNodeIterator it = allocation.routingNodes().routingNodeIter(discoNode.getId()); it.hasNext(); ) { - ShardRouting shardRouting = it.next(); - if (!shardRouting.shardId().getIndex().getName().equals(index)) { - continue; - } - if (shardRouting.shardId().id() != shardId) { - continue; - } - found = true; - if (shardRouting.relocatingNodeId() != null) { - if (shardRouting.initializing()) { - // the shard is initializing and recovering from another node, simply cancel the recovery - it.remove(); - // and cancel the relocating state from the shard its being relocated from - RoutingNode relocatingFromNode = allocation.routingNodes().node(shardRouting.relocatingNodeId()); - if (relocatingFromNode != null) { - for (ShardRouting fromShardRouting : relocatingFromNode) { - if (fromShardRouting.isSameShard(shardRouting) && fromShardRouting.state() == RELOCATING) { - allocation.routingNodes().cancelRelocation(fromShardRouting); - break; - } - } - } - } else if (shardRouting.relocating()) { - - // the shard is relocating to another node, cancel the recovery on the other node, and deallocate this one - if (!allowPrimary && shardRouting.primary()) { - // can't cancel a primary shard being initialized - if (explain) { - return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", - "can't cancel " + shardId + " on node " + discoNode + ", shard is primary and initializing its state")); - } - throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + - discoNode + ", shard is primary and initializing its state"); - } - it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REROUTE_CANCELLED, null)); - // now, go and find the shard that is initializing on the target node, and cancel it as well... - RoutingNodes.RoutingNodeIterator initializingNode = allocation.routingNodes().routingNodeIter(shardRouting.relocatingNodeId()); - if (initializingNode != null) { - while (initializingNode.hasNext()) { - ShardRouting initializingShardRouting = initializingNode.next(); - if (initializingShardRouting.isRelocationTargetOf(shardRouting)) { - initializingNode.remove(); - } - } - } - } - } else { - // the shard is not relocating, its either started, or initializing, just cancel it and move on... - if (!allowPrimary && shardRouting.primary()) { - // can't cancel a primary shard being initialized - if (explain) { - return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", - "can't cancel " + shardId + " on node " + discoNode + ", shard is primary and started")); - } - throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + - discoNode + ", shard is primary and started"); - } - it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REROUTE_CANCELLED, null)); + ShardRouting shardRouting = null; + RoutingNodes routingNodes = allocation.routingNodes(); + RoutingNode routingNode = routingNodes.node(discoNode.getId()); + if (routingNode != null) { + IndexMetaData indexMetaData = allocation.metaData().index(index()); + if (indexMetaData == null) { + throw new IndexNotFoundException(index()); } + ShardId shardId = new ShardId(indexMetaData.getIndex(), shardId()); + shardRouting = routingNode.getByShardId(shardId); } - if (!found) { + if (shardRouting == null) { if (explain) { return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", - "can't cancel " + shardId + ", failed to find it on node " + discoNode)); + "can't cancel " + shardId + ", failed to find it on node " + discoNode)); } throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + ", failed to find it on node " + discoNode); } + if (shardRouting.primary() && allowPrimary == false) { + if ((shardRouting.initializing() && shardRouting.relocatingNodeId() != null) == false) { + // only allow cancelling initializing shard of primary relocation without allowPrimary flag + if (explain) { + return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", + "can't cancel " + shardId + " on node " + discoNode + ", shard is primary and " + + shardRouting.state().name().toLowerCase(Locale.ROOT))); + } + throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + + discoNode + ", shard is primary and " + shardRouting.state().name().toLowerCase(Locale.ROOT)); + } + } + AllocationService.cancelShard(Loggers.getLogger(CancelAllocationCommand.class), shardRouting, + new UnassignedInfo(UnassignedInfo.Reason.REROUTE_CANCELLED, null), routingNodes); return new RerouteExplanation(this, allocation.decision(Decision.YES, "cancel_allocation_command", "shard " + shardId + " on node " + discoNode + " can be cancelled")); } @@ -240,4 +208,23 @@ public class CancelAllocationCommand implements AllocationCommand { } return new CancelAllocationCommand(index, shardId, nodeId, allowPrimary); } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + CancelAllocationCommand other = (CancelAllocationCommand) obj; + // Override equals and hashCode for testing + return Objects.equals(index, other.index) && + Objects.equals(shardId, other.shardId) && + Objects.equals(node, other.node) && + Objects.equals(allowPrimary, other.allowPrimary); + } + + @Override + public int hashCode() { + // Override equals and hashCode for testing + return Objects.hash(index, shardId, node, allowPrimary); + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java index a2e1a54e515..69bd8f0eeca 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; /** * A command that moves a shard from a specific node to another node.
    @@ -195,4 +196,23 @@ public class MoveAllocationCommand implements AllocationCommand { } return new MoveAllocationCommand(index, shardId, fromNode, toNode); } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MoveAllocationCommand other = (MoveAllocationCommand) obj; + // Override equals and hashCode for testing + return Objects.equals(index, other.index) && + Objects.equals(shardId, other.shardId) && + Objects.equals(fromNode, other.fromNode) && + Objects.equals(toNode, other.toNode); + } + + @Override + public int hashCode() { + // Override equals and hashCode for testing + return Objects.hash(index, shardId, fromNode, toNode); + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 8e6dbd989e2..32eaa8ddec6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -126,15 +126,6 @@ public class AwarenessAllocationDecider extends AllocationDecider { this.forcedAwarenessAttributes = forcedAwarenessAttributes; } - /** - * Get the attributes defined by this instance - * - * @return attributes defined by this instance - */ - public String[] awarenessAttributes() { - return this.awarenessAttributes; - } - private void setAwarenessAttributes(String[] awarenessAttributes) { this.awarenessAttributes = awarenessAttributes; } @@ -167,7 +158,7 @@ public class AwarenessAllocationDecider extends AllocationDecider { // build the count of shards per attribute value ObjectIntHashMap shardPerAttribute = new ObjectIntHashMap<>(); - for (ShardRouting assignedShard : allocation.routingNodes().assignedShards(shardRouting)) { + for (ShardRouting assignedShard : allocation.routingNodes().assignedShards(shardRouting.shardId())) { if (assignedShard.started() || assignedShard.initializing()) { // Note: this also counts relocation targets as that will be the new location of the shard. // Relocation sources should not be counted as the shard is moving away diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 4d56711c097..edeb609a9c7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -44,6 +45,8 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RatioValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import java.util.Set; @@ -303,26 +306,23 @@ public class DiskThresholdDecider extends AllocationDecider { * If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size * of all shards */ - public static long sizeOfRelocatingShards(RoutingNode node, ClusterInfo clusterInfo, + public static long sizeOfRelocatingShards(RoutingNode node, RoutingAllocation allocation, boolean subtractShardsMovingAway, String dataPath) { + ClusterInfo clusterInfo = allocation.clusterInfo(); long totalSize = 0; for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) { String actualPath = clusterInfo.getDataPath(routing); if (dataPath.equals(actualPath)) { if (routing.initializing() && routing.relocatingNodeId() != null) { - totalSize += getShardSize(routing, clusterInfo); + totalSize += getExpectedShardSize(routing, allocation, 0); } else if (subtractShardsMovingAway && routing.relocating()) { - totalSize -= getShardSize(routing, clusterInfo); + totalSize -= getExpectedShardSize(routing, allocation, 0); } } } return totalSize; } - static long getShardSize(ShardRouting routing, ClusterInfo clusterInfo) { - Long shardSize = clusterInfo.getShardSize(routing); - return shardSize == null ? 0 : shardSize; - } @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { @@ -426,7 +426,7 @@ public class DiskThresholdDecider extends AllocationDecider { } // Secondly, check that allocating the shard to this node doesn't put it above the high watermark - final long shardSize = getShardSize(shardRouting, allocation.clusterInfo()); + final long shardSize = getExpectedShardSize(shardRouting, allocation, 0); double freeSpaceAfterShard = freeDiskPercentageAfterShardAssigned(usage, shardSize); long freeBytesAfterShard = freeBytes - shardSize; if (freeBytesAfterShard < freeBytesThresholdHigh.bytes()) { @@ -505,7 +505,6 @@ public class DiskThresholdDecider extends AllocationDecider { } private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, ImmutableOpenMap usages) { - ClusterInfo clusterInfo = allocation.clusterInfo(); DiskUsage usage = usages.get(node.nodeId()); if (usage == null) { // If there is no usage, and we have other nodes in the cluster, @@ -518,7 +517,7 @@ public class DiskThresholdDecider extends AllocationDecider { } if (includeRelocations) { - long relocatingShardsSize = sizeOfRelocatingShards(node, clusterInfo, true, usage.getPath()); + long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, true, usage.getPath()); DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().getName(), usage.getPath(), usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize); if (logger.isTraceEnabled()) { @@ -643,4 +642,30 @@ public class DiskThresholdDecider extends AllocationDecider { } return null; } + + /** + * Returns the expected shard size for the given shard or the default value provided if not enough information are available + * to estimate the shards size. + */ + public static final long getExpectedShardSize(ShardRouting shard, RoutingAllocation allocation, long defaultValue) { + final IndexMetaData metaData = allocation.metaData().getIndexSafe(shard.index()); + final ClusterInfo info = allocation.clusterInfo(); + if (metaData.getMergeSourceIndex() != null && shard.allocatedPostIndexCreate(metaData) == false) { + // in the shrink index case we sum up the source index shards since we basically make a copy of the shard in + // the worst case + long targetShardSize = 0; + final Index mergeSourceIndex = metaData.getMergeSourceIndex(); + final IndexMetaData sourceIndexMeta = allocation.metaData().getIndexSafe(metaData.getMergeSourceIndex()); + final Set shardIds = IndexMetaData.selectShrinkShards(shard.id(), sourceIndexMeta, metaData.getNumberOfShards()); + for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) { + if (shardIds.contains(shardRoutingTable.shardId())) { + targetShardSize += info.getShardSize(shardRoutingTable.primaryShard(), 0); + } + } + return targetShardSize == 0 ? defaultValue : targetShardSize; + } else { + return info.getShardSize(shard, defaultValue); + } + + } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 38a2a39fc7c..7dc8eff3ebd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -137,7 +137,7 @@ public class EnableAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation"); } - Settings indexSettings = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()).getSettings(); + Settings indexSettings = allocation.metaData().getIndexSafe(shardRouting.index()).getSettings(); final Rebalance enable; if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) { enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index eb59c261214..63bd588114a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -88,6 +88,19 @@ public class FilterAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + if (shardRouting.unassigned()) { + // only for unassigned - we filter allocation right after the index creation ie. for shard shrinking etc. to ensure + // that once it has been allocated post API the replicas can be allocated elsewhere without user interaction + // this is a setting that can only be set within the system! + IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index()); + DiscoveryNodeFilters initialRecoveryFilters = indexMd.getInitialRecoveryFilters(); + if (shardRouting.allocatedPostIndexCreate(indexMd) == false && + initialRecoveryFilters != null && + initialRecoveryFilters.match(node.node()) == false) { + return allocation.decision(Decision.NO, NAME, "node does not match index initial recovery filters [%s]", + indexMd.includeFilters()); + } + } return shouldFilter(shardRouting, node, allocation); } @@ -105,7 +118,7 @@ public class FilterAllocationDecider extends AllocationDecider { Decision decision = shouldClusterFilter(node, allocation); if (decision != null) return decision; - decision = shouldIndexFilter(allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()), node, allocation); + decision = shouldIndexFilter(allocation.metaData().getIndexSafe(shardRouting.index()), node, allocation); if (decision != null) return decision; return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters"); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java new file mode 100644 index 00000000000..6a8a0ccc5fa --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation.decider; + +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; + +/** + * An allocation decider that prevents shards from being allocated on any node if the shards allocation has been retried N times without + * success. This means if a shard has been INITIALIZING N times in a row without being moved to STARTED the shard will be ignored until + * the setting for index.allocation.max_retry is raised. The default value is 5. + * Note: This allocation decider also allows allocation of repeatedly failing shards when the /_cluster/reroute?retry_failed=true + * API is manually invoked. This allows single retries without raising the limits. + * + * @see RoutingAllocation#isRetryFailed() + */ +public class MaxRetryAllocationDecider extends AllocationDecider { + + public static final Setting SETTING_ALLOCATION_MAX_RETRY = Setting.intSetting("index.allocation.max_retries", 5, 0, + Setting.Property.Dynamic, Setting.Property.IndexScope); + + public static final String NAME = "max_retry"; + + /** + * Initializes a new {@link MaxRetryAllocationDecider} + * + * @param settings {@link Settings} used by this {@link AllocationDecider} + */ + @Inject + public MaxRetryAllocationDecider(Settings settings) { + super(settings); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { + UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + if (unassignedInfo != null && unassignedInfo.getNumFailedAllocations() > 0) { + final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); + final int maxRetry = SETTING_ALLOCATION_MAX_RETRY.get(indexMetaData.getSettings()); + if (allocation.isRetryFailed()) { // manual allocation - retry + // if we are called via the _reroute API we ignore the failure counter and try to allocate + // this improves the usability since people don't need to raise the limits to issue retries since a simple _reroute call is + // enough to manually retry. + return allocation.decision(Decision.YES, NAME, "shard has already failed allocating [" + + unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed " + + unassignedInfo.toString() + " - retrying once on manual allocation"); + } else if (unassignedInfo.getNumFailedAllocations() >= maxRetry) { + return allocation.decision(Decision.NO, NAME, "shard has already failed allocating [" + + unassignedInfo.getNumFailedAllocations() + "] times vs. [" + maxRetry + "] retries allowed " + + unassignedInfo.toString() + " - manually call [/_cluster/reroute?retry_failed=true] to retry"); + } + } + return allocation.decision(Decision.YES, NAME, "shard has no previous failures"); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return canAllocate(shardRouting, allocation); + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java index 17ac486a8ce..9d9f78c3c15 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java @@ -59,7 +59,7 @@ public class NodeVersionAllocationDecider extends AllocationDecider { return isVersionCompatible(allocation.routingNodes(), shardRouting.currentNodeId(), node, allocation); } } else { - final ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting); + final ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting.shardId()); // check that active primary has a newer version so that peer recovery works if (primary != null) { return isVersionCompatible(allocation.routingNodes(), primary.currentNodeId(), node, allocation); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java index 869c6313069..b6c675597c0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java @@ -38,9 +38,7 @@ public class RebalanceOnlyWhenActiveAllocationDecider extends AllocationDecider @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { - // its ok to check for active here, since in relocation, a shard is split into two in routing - // nodes, once relocating, and one initializing - if (!allocation.routingNodes().allReplicasActive(shardRouting)) { + if (!allocation.routingNodes().allReplicasActive(shardRouting.shardId(), allocation.metaData())) { return allocation.decision(Decision.NO, NAME, "rebalancing can not occur if not all replicas are active in the cluster"); } return allocation.decision(Decision.YES, NAME, "all replicas are active in the cluster, rebalancing can occur"); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java index 59ab67c309e..c23f0930924 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java @@ -47,7 +47,7 @@ public class ReplicaAfterPrimaryActiveAllocationDecider extends AllocationDecide if (shardRouting.primary()) { return allocation.decision(Decision.YES, NAME, "shard is primary and can be allocated"); } - ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting); + ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting.shardId()); if (primary == null) { return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active"); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index f0b4fdf35c6..fca8a34936e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -58,7 +58,7 @@ public class SameShardAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - Iterable assignedShards = allocation.routingNodes().assignedShards(shardRouting); + Iterable assignedShards = allocation.routingNodes().assignedShards(shardRouting.shardId()); for (ShardRouting assignedShard : assignedShards) { if (node.nodeId().equals(assignedShard.currentNodeId())) { return allocation.decision(Decision.NO, NAME, diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index eb256516353..e1741c1af7f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -86,7 +86,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()); + IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index()); final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings); // Capture the limit here in case it changes during this method's // execution @@ -125,7 +125,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { @Override public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()); + IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index()); final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings); // Capture the limit here in case it changes during this method's // execution diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index 54cfb6407da..e25a4e690da 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -98,7 +98,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { if (!enableRelocation && shardRouting.primary()) { // Only primary shards are snapshotted - SnapshotsInProgress snapshotsInProgress = allocation.routingNodes().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress snapshotsInProgress = allocation.custom(SnapshotsInProgress.TYPE); if (snapshotsInProgress == null) { // Snapshots are not running return allocation.decision(Decision.YES, NAME, "no snapshots are currently running"); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 45afd07e297..286b378debc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -28,6 +28,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import static org.elasticsearch.cluster.routing.allocation.decider.Decision.THROTTLE; +import static org.elasticsearch.cluster.routing.allocation.decider.Decision.YES; + /** * {@link ThrottlingAllocationDecider} controls the recovery process per node in * the cluster. It exposes two settings via the cluster update API that allow @@ -109,50 +112,83 @@ public class ThrottlingAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - if (shardRouting.primary()) { - assert shardRouting.unassigned() || shardRouting.active(); - if (shardRouting.unassigned()) { - // primary is unassigned, means we are going to do recovery from gateway - // count *just the primary* currently doing recovery on the node and check against concurrent_recoveries - int primariesInRecovery = 0; - for (ShardRouting shard : node) { - // when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node* - // we only count initial recoveries here, so we need to make sure that relocating node is null - if (shard.initializing() && shard.primary() && shard.relocatingNodeId() == null) { - primariesInRecovery++; - } + if (shardRouting.primary() && shardRouting.unassigned()) { + assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery() == false; + // primary is unassigned, means we are going to do recovery from store, snapshot or local shards + // count *just the primaries* currently doing recovery on the node and check against primariesInitialRecoveries + + int primariesInRecovery = 0; + for (ShardRouting shard : node) { + // when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node* + // we only count initial recoveries here, so we need to make sure that relocating node is null + if (shard.initializing() && shard.primary() && shard.relocatingNodeId() == null) { + primariesInRecovery++; } - if (primariesInRecovery >= primariesInitialRecoveries) { - return allocation.decision(Decision.THROTTLE, NAME, "too many primaries are currently recovering [%d], limit: [%d]", - primariesInRecovery, primariesInitialRecoveries); + } + if (primariesInRecovery >= primariesInitialRecoveries) { + // TODO: Should index creation not be throttled for primary shards? + return allocation.decision(THROTTLE, NAME, "too many primaries are currently recovering [%d], limit: [%d]", + primariesInRecovery, primariesInitialRecoveries); + } else { + return allocation.decision(YES, NAME, "below primary recovery limit of [%d]", primariesInitialRecoveries); + } + } else { + // Peer recovery + assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery(); + + // Allocating a shard to this node will increase the incoming recoveries + int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId()); + if (currentInRecoveries >= concurrentIncomingRecoveries) { + return allocation.decision(THROTTLE, NAME, "too many incoming shards are currently recovering [%d], limit: [%d]", + currentInRecoveries, concurrentIncomingRecoveries); + } else { + // search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node + ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId()); + if (primaryShard == null) { + return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active"); + } + int primaryNodeOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId()); + if (primaryNodeOutRecoveries >= concurrentOutgoingRecoveries) { + return allocation.decision(THROTTLE, NAME, "too many outgoing shards are currently recovering [%d], limit: [%d]", + primaryNodeOutRecoveries, concurrentOutgoingRecoveries); } else { - return allocation.decision(Decision.YES, NAME, "below primary recovery limit of [%d]", primariesInitialRecoveries); + return allocation.decision(YES, NAME, "below shard recovery limit of outgoing: [%d < %d] incoming: [%d < %d]", + primaryNodeOutRecoveries, + concurrentOutgoingRecoveries, + currentInRecoveries, + concurrentIncomingRecoveries); } } } - // TODO should we allow shards not allocated post API to always allocate? - // either primary or replica doing recovery (from peer shard) - - // count the number of recoveries on the node, its for both target (INITIALIZING) and source (RELOCATING) - return canAllocate(node, allocation); } - @Override - public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) { - int currentOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(node.nodeId()); - int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId()); - if (currentOutRecoveries >= concurrentOutgoingRecoveries) { - return allocation.decision(Decision.THROTTLE, NAME, "too many outgoing shards are currently recovering [%d], limit: [%d]", - currentOutRecoveries, concurrentOutgoingRecoveries); - } else if (currentInRecoveries >= concurrentIncomingRecoveries) { - return allocation.decision(Decision.THROTTLE, NAME, "too many incoming shards are currently recovering [%d], limit: [%d]", - currentInRecoveries, concurrentIncomingRecoveries); - } else { - return allocation.decision(Decision.YES, NAME, "below shard recovery limit of outgoing: [%d < %d] incoming: [%d < %d]", - currentOutRecoveries, - concurrentOutgoingRecoveries, - currentInRecoveries, - concurrentIncomingRecoveries); + /** + * The shard routing passed to {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} is not the initializing shard to this + * node but: + * - the unassigned shard routing in case if we want to assign an unassigned shard to this node. + * - the initializing shard routing if we want to assign the initializing shard to this node instead + * - the started shard routing in case if we want to check if we can relocate to this node. + * - the relocating shard routing if we want to relocate to this node now instead. + * + * This method returns the corresponding initializing shard that would be allocated to this node. + */ + private ShardRouting initializingShard(ShardRouting shardRouting, String currentNodeId) { + final ShardRouting initializingShard; + if (shardRouting.unassigned()) { + initializingShard = shardRouting.initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + } else if (shardRouting.initializing()) { + initializingShard = shardRouting.moveToUnassigned(shardRouting.unassignedInfo()) + .initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); + } else if (shardRouting.relocating()) { + initializingShard = shardRouting.cancelRelocation() + .relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE) + .buildTargetRelocatingShard(); + } else { + assert shardRouting.started(); + initializingShard = shardRouting.relocate(currentNodeId, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE) + .buildTargetRelocatingShard(); } + assert initializingShard.initializing(); + return initializingShard; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index 15c2b5c3939..f8eee7071f6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -42,9 +42,7 @@ import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; @@ -66,7 +64,9 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -87,7 +87,7 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF /** * */ -public class ClusterService extends AbstractLifecycleComponent { +public class ClusterService extends AbstractLifecycleComponent { public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), @@ -95,6 +95,7 @@ public class ClusterService extends AbstractLifecycleComponent { public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; private final ThreadPool threadPool; + private final ClusterName clusterName; private BiConsumer clusterStatePublisher; @@ -128,14 +129,13 @@ public class ClusterService extends AbstractLifecycleComponent { private NodeConnectionsService nodeConnectionsService; - @Inject - public ClusterService(Settings settings, OperationRouting operationRouting, - ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName) { + public ClusterService(Settings settings, + ClusterSettings clusterSettings, ThreadPool threadPool) { super(settings); - this.operationRouting = operationRouting; + this.operationRouting = new OperationRouting(settings, clusterSettings); this.threadPool = threadPool; this.clusterSettings = clusterSettings; - + this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); // will be replaced on doStart. this.clusterState = ClusterState.builder(clusterName).build(); @@ -153,17 +153,17 @@ public class ClusterService extends AbstractLifecycleComponent { this.slowTaskLoggingThreshold = slowTaskLoggingThreshold; } - synchronized public void setClusterStatePublisher(BiConsumer publisher) { + public synchronized void setClusterStatePublisher(BiConsumer publisher) { clusterStatePublisher = publisher; } - synchronized public void setLocalNode(DiscoveryNode localNode) { + public synchronized void setLocalNode(DiscoveryNode localNode) { assert clusterState.nodes().getLocalNodeId() == null : "local node is already set"; DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.getId()); this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); } - synchronized public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { + public synchronized void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { assert this.nodeConnectionsService == null : "nodeConnectionsService is already set"; this.nodeConnectionsService = nodeConnectionsService; } @@ -171,7 +171,7 @@ public class ClusterService extends AbstractLifecycleComponent { /** * Adds an initial block to be set on the first cluster state created. */ - synchronized public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { + public synchronized void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { if (lifecycle.started()) { throw new IllegalStateException("can't set initial block when started"); } @@ -181,14 +181,14 @@ public class ClusterService extends AbstractLifecycleComponent { /** * Remove an initial block to be set on the first cluster state created. */ - synchronized public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { + public synchronized void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { removeInitialStateBlock(block.id()); } /** * Remove an initial block to be set on the first cluster state created. */ - synchronized public void removeInitialStateBlock(int blockId) throws IllegalStateException { + public synchronized void removeInitialStateBlock(int blockId) throws IllegalStateException { if (lifecycle.started()) { throw new IllegalStateException("can't set initial block when started"); } @@ -196,7 +196,7 @@ public class ClusterService extends AbstractLifecycleComponent { } @Override - synchronized protected void doStart() { + protected synchronized void doStart() { Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); Objects.requireNonNull(clusterState.nodes().getLocalNode(), "please set the local node before starting"); Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting"); @@ -208,7 +208,7 @@ public class ClusterService extends AbstractLifecycleComponent { } @Override - synchronized protected void doStop() { + protected synchronized void doStop() { for (NotifyTimeout onGoingTimeout : onGoingTimeouts) { onGoingTimeout.cancel(); try { @@ -229,14 +229,18 @@ public class ClusterService extends AbstractLifecycleComponent { } @Override - synchronized protected void doClose() { + protected synchronized void doClose() { } /** * The local node. */ public DiscoveryNode localNode() { - return clusterState.getNodes().getLocalNode(); + DiscoveryNode localNode = clusterState.getNodes().getLocalNode(); + if (localNode == null) { + throw new IllegalStateException("No local node found. Is the node started?"); + } + return localNode; } public OperationRouting operationRouting() { @@ -371,34 +375,61 @@ public class ClusterService extends AbstractLifecycleComponent { public void submitStateUpdateTask(final String source, final T task, final ClusterStateTaskConfig config, final ClusterStateTaskExecutor executor, - final ClusterStateTaskListener listener - ) { - innerSubmitStateUpdateTask(source, task, config, executor, safe(listener, logger)); + final ClusterStateTaskListener listener) { + submitStateUpdateTasks(source, Collections.singletonMap(task, listener), config, executor); } - private void innerSubmitStateUpdateTask(final String source, final T task, - final ClusterStateTaskConfig config, - final ClusterStateTaskExecutor executor, - final SafeClusterStateTaskListener listener) { + /** + * Submits a batch of cluster state update tasks; submitted updates are guaranteed to be processed together, + * potentially with more tasks of the same executor. + * + * @param source the source of the cluster state update task + * @param tasks a map of update tasks and their corresponding listeners + * @param config the cluster state update task configuration + * @param executor the cluster state update task executor; tasks + * that share the same executor will be executed + * batches on this executor + * @param the type of the cluster state update task state + */ + public void submitStateUpdateTasks(final String source, + final Map tasks, final ClusterStateTaskConfig config, + final ClusterStateTaskExecutor executor) { if (!lifecycle.started()) { return; } + if (tasks.isEmpty()) { + return; + } try { - final UpdateTask updateTask = new UpdateTask<>(source, task, config, executor, listener); + // convert to an identity map to check for dups based on update tasks semantics of using identity instead of equal + final IdentityHashMap tasksIdentity = new IdentityHashMap<>(tasks); + final List> updateTasks = tasksIdentity.entrySet().stream().map( + entry -> new UpdateTask<>(source, entry.getKey(), config, executor, safe(entry.getValue(), logger)) + ).collect(Collectors.toList()); synchronized (updateTasksPerExecutor) { - updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>()).add(updateTask); + List existingTasks = updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>()); + for (@SuppressWarnings("unchecked") UpdateTask existing : existingTasks) { + if (tasksIdentity.containsKey(existing.task)) { + throw new IllegalArgumentException("task [" + existing.task + "] is already queued"); + } + } + existingTasks.addAll(updateTasks); } + final UpdateTask firstTask = updateTasks.get(0); + if (config.timeout() != null) { - updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> { - if (updateTask.processed.getAndSet(true) == false) { - logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout()); - listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); + updateTasksExecutor.execute(firstTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> { + for (UpdateTask task : updateTasks) { + if (task.processed.getAndSet(true) == false) { + logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout()); + task.listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); + } } })); } else { - updateTasksExecutor.execute(updateTask); + updateTasksExecutor.execute(firstTask); } } catch (EsRejectedExecutionException e) { // ignore cases where we are shutting down..., there is really nothing interesting @@ -455,13 +486,17 @@ public class ClusterService extends AbstractLifecycleComponent { } /** asserts that the current thread is the cluster state update thread */ - public boolean assertClusterStateThread() { + public static boolean assertClusterStateThread() { assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread"; return true; } - static abstract class SourcePrioritizedRunnable extends PrioritizedRunnable { + public ClusterName getClusterName() { + return clusterName; + } + + abstract static class SourcePrioritizedRunnable extends PrioritizedRunnable { protected final String source; public SourcePrioritizedRunnable(Priority priority, String source) { @@ -476,17 +511,17 @@ public class ClusterService extends AbstractLifecycleComponent { void runTasksForExecutor(ClusterStateTaskExecutor executor) { final ArrayList> toExecute = new ArrayList<>(); - final ArrayList sources = new ArrayList<>(); + final Map> processTasksBySource = new HashMap<>(); synchronized (updateTasksPerExecutor) { List pending = updateTasksPerExecutor.remove(executor); if (pending != null) { for (UpdateTask task : pending) { if (task.processed.getAndSet(true) == false) { - logger.trace("will process [{}]", task.source); + logger.trace("will process [{}[{}]]", task.source, task.task); toExecute.add(task); - sources.add(task.source); + processTasksBySource.computeIfAbsent(task.source, s -> new ArrayList<>()).add(task.task); } else { - logger.trace("skipping [{}], already processed", task.source); + logger.trace("skipping [{}[{}]], already processed", task.source, task.task); } } } @@ -494,15 +529,19 @@ public class ClusterService extends AbstractLifecycleComponent { if (toExecute.isEmpty()) { return; } - final String source = Strings.collectionToCommaDelimitedString(sources); + final String tasksSummary = processTasksBySource.entrySet().stream().map(entry -> { + String tasks = executor.describeTasks(entry.getValue()); + return tasks.isEmpty() ? entry.getKey() : entry.getKey() + "[" + tasks + "]"; + }).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); + if (!lifecycle.started()) { - logger.debug("processing [{}]: ignoring, cluster_service not started", source); + logger.debug("processing [{}]: ignoring, cluster_service not started", tasksSummary); return; } - logger.debug("processing [{}]: execute", source); + logger.debug("processing [{}]: execute", tasksSummary); ClusterState previousClusterState = clusterState; if (!previousClusterState.nodes().isLocalNodeElectedMaster() && executor.runOnlyOnMaster()) { - logger.debug("failing [{}]: local node is no longer master", source); + logger.debug("failing [{}]: local node is no longer master", tasksSummary); toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source)); return; } @@ -511,14 +550,14 @@ public class ClusterService extends AbstractLifecycleComponent { try { List inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); batchResult = executor.execute(previousClusterState, inputs); - } catch (Throwable e) { + } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime, - previousClusterState.version(), source, previousClusterState.nodes().prettyPrint(), + previousClusterState.version(), tasksSummary, previousClusterState.nodes().prettyPrint(), previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint()); } - warnAboutSlowTaskIfNeeded(executionTime, source); + warnAboutSlowTaskIfNeeded(executionTime, tasksSummary); batchResult = ClusterStateTaskExecutor.BatchResult.builder() .failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e) .build(previousClusterState); @@ -561,8 +600,8 @@ public class ClusterService extends AbstractLifecycleComponent { task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState); } TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); - logger.debug("processing [{}]: took [{}] no change in cluster_state", source, executionTime); - warnAboutSlowTaskIfNeeded(executionTime, source); + logger.debug("processing [{}]: took [{}] no change in cluster_state", tasksSummary, executionTime); + warnAboutSlowTaskIfNeeded(executionTime, tasksSummary); return; } @@ -604,18 +643,18 @@ public class ClusterService extends AbstractLifecycleComponent { newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED); if (logger.isTraceEnabled()) { - logger.trace("cluster state updated, source [{}]\n{}", source, newClusterState.prettyPrint()); + logger.trace("cluster state updated, source [{}]\n{}", tasksSummary, newClusterState.prettyPrint()); } else if (logger.isDebugEnabled()) { - logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source); + logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), tasksSummary); } - ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState); + ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(tasksSummary, newClusterState, previousClusterState); // new cluster state, notify all listeners final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { String summary = nodesDelta.shortSummary(); if (summary.length() > 0) { - logger.info("{}, reason: {}", summary, source); + logger.info("{}, reason: {}", summary, tasksSummary); } } @@ -629,7 +668,7 @@ public class ClusterService extends AbstractLifecycleComponent { try { clusterStatePublisher.accept(clusterChangedEvent, ackListener); } catch (Discovery.FailedToCommitClusterStateException t) { - logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version()); + logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, tasksSummary, newClusterState.version()); proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t)); return; } @@ -671,8 +710,8 @@ public class ClusterService extends AbstractLifecycleComponent { if (newClusterState.nodes().isLocalNodeElectedMaster()) { try { ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null); - } catch (Throwable t) { - logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().getLocalNode()); + } catch (Exception e) { + logger.debug("error while processing ack for master node [{}]", e, newClusterState.nodes().getLocalNode()); } } @@ -681,19 +720,19 @@ public class ClusterService extends AbstractLifecycleComponent { } try { - executor.clusterStatePublished(newClusterState); + executor.clusterStatePublished(clusterChangedEvent); } catch (Exception e) { - logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, source); + logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, tasksSummary); } TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); - logger.debug("processing [{}]: took [{}] done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, - newClusterState.version(), newClusterState.stateUUID()); - warnAboutSlowTaskIfNeeded(executionTime, source); - } catch (Throwable t) { + logger.debug("processing [{}]: took [{}] done applying updated cluster_state (version: {}, uuid: {})", tasksSummary, + executionTime, newClusterState.version(), newClusterState.stateUUID()); + warnAboutSlowTaskIfNeeded(executionTime, tasksSummary); + } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); - logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", t, executionTime, - newClusterState.version(), newClusterState.stateUUID(), source, newClusterState.prettyPrint()); + logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", e, executionTime, + newClusterState.version(), newClusterState.stateUUID(), tasksSummary, newClusterState.prettyPrint()); // TODO: do we want to call updateTask.onFailure here? } @@ -720,11 +759,12 @@ public class ClusterService extends AbstractLifecycleComponent { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { try { - listener.onFailure(source, t); - } catch (Exception e) { - logger.error("exception thrown by listener notifying of failure [{}] from [{}]", e, t, source); + listener.onFailure(source, e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.error("exception thrown by listener notifying of failure from [{}]", inner, source); } } @@ -769,11 +809,12 @@ public class ClusterService extends AbstractLifecycleComponent { } @Override - public void onAllNodesAcked(@Nullable Throwable t) { + public void onAllNodesAcked(@Nullable Exception e) { try { - listener.onAllNodesAcked(t); - } catch (Exception e) { - logger.error("exception thrown by listener while notifying on all nodes acked [{}]", e, t); + listener.onAllNodesAcked(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.error("exception thrown by listener while notifying on all nodes acked", inner); } } @@ -923,16 +964,16 @@ public class ClusterService extends AbstractLifecycleComponent { private static class DelegetingAckListener implements Discovery.AckListener { - final private List listeners; + private final List listeners; private DelegetingAckListener(List listeners) { this.listeners = listeners; } @Override - public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { + public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { for (Discovery.AckListener listener : listeners) { - listener.onNodeAck(node, t); + listener.onNodeAck(node, e); } } @@ -951,7 +992,7 @@ public class ClusterService extends AbstractLifecycleComponent { private final DiscoveryNodes nodes; private final long clusterStateVersion; private final Future ackTimeoutCallback; - private Throwable lastFailure; + private Exception lastFailure; AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) { @@ -977,18 +1018,18 @@ public class ClusterService extends AbstractLifecycleComponent { } @Override - public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { + public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { if (!ackedTaskListener.mustAck(node)) { //we always wait for the master ack anyway if (!node.equals(nodes.getMasterNode())) { return; } } - if (t == null) { + if (e == null) { logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); } else { - this.lastFailure = t; - logger.debug("ack received from node [{}], cluster_state update (version: {})", t, node, clusterStateVersion); + this.lastFailure = e; + logger.debug("ack received from node [{}], cluster_state update (version: {})", e, node, clusterStateVersion); } if (countDown.countDown()) { @@ -1010,4 +1051,8 @@ public class ClusterService extends AbstractLifecycleComponent { public ClusterSettings getClusterSettings() { return clusterSettings; } + + public Settings getSettings() { + return settings; + } } diff --git a/core/src/main/java/org/elasticsearch/common/Base64.java b/core/src/main/java/org/elasticsearch/common/Base64.java deleted file mode 100644 index fa499a55d4d..00000000000 --- a/core/src/main/java/org/elasticsearch/common/Base64.java +++ /dev/null @@ -1,1621 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common; - -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.Locale; -import java.util.Objects; - -/** - *

    Encodes and decodes to and from Base64 notation.

    - *

    Homepage: http://iharder.net/base64.

    - *

    - * Example: - *

    - * String encoded = Base64.encode( myByteArray ); - *
    - * byte[] myByteArray = Base64.decode( encoded ); - *

    - * The options parameter, which appears in a few places, is used to pass - * several pieces of information to the encoder. In the "higher level" methods such as - * encodeBytes( bytes, options ) the options parameter can be used to indicate such - * things as first gzipping the bytes before encoding them, not inserting linefeeds, - * and encoding using the URL-safe and Ordered dialects. - *

    - * Note, according to RFC3548, - * Section 2.1, implementations should not add line feeds unless explicitly told - * to do so. I've got Base64 set to this behavior now, although earlier versions - * broke lines by default. - *

    - * The constants defined in Base64 can be OR-ed together to combine options, so you - * might make a call like this: - *

    - * String encoded = Base64.encodeBytes( mybytes, Base64.GZIP | Base64.DO_BREAK_LINES ); - *

    to compress the data before encoding it and then making the output have newline characters. - *

    Also... - * String encoded = Base64.encodeBytes( crazyString.getBytes() ); - *

    - * Change Log: - *

      - *
    • v2.3.7 - Fixed subtle bug when base 64 input stream contained the - * value 01111111, which is an invalid base 64 character but should not - * throw an ArrayIndexOutOfBoundsException either. Led to discovery of - * mishandling (or potential for better handling) of other bad input - * characters. You should now get an IOException if you try decoding - * something that has bad characters in it.
    • - *
    • v2.3.6 - Fixed bug when breaking lines and the final byte of the encoded - * string ended in the last column; the buffer was not properly shrunk and - * contained an extra (null) byte that made it into the string.
    • - *
    • v2.3.5 - Fixed bug in {@code #encodeFromFile} where estimated buffer size - * was wrong for files of size 31, 34, and 37 bytes.
    • - *
    • v2.3.4 - Fixed bug when working with gzipped streams whereby flushing - * the Base64.OutputStream closed the Base64 encoding (by padding with equals - * signs) too soon. Also added an option to suppress the automatic decoding - * of gzipped streams. Also added experimental support for specifying a - * class loader when using the - * {@code #decodeToObject(java.lang.String, int, java.lang.ClassLoader)} - * method.
    • - *
    • v2.3.3 - Changed default char encoding to US-ASCII which reduces the internal Java - * footprint with its CharEncoders and so forth. Fixed some javadocs that were - * inconsistent. Removed imports and specified things like java.io.IOException - * explicitly inline.
    • - *
    • v2.3.2 - Reduced memory footprint! Finally refined the "guessing" of how big the - * final encoded data will be so that the code doesn't have to create two output - * arrays: an oversized initial one and then a final, exact-sized one. Big win - * when using the {@link #encodeBytesToBytes(byte[])} family of methods (and not - * using the gzip options which uses a different mechanism with streams and stuff).
    • - *
    • v2.3.1 - Added {@link #encodeBytesToBytes(byte[], int, int, int)} and some - * similar helper methods to be more efficient with memory by not returning a - * String but just a byte array.
    • - *
    • v2.3 - This is not a drop-in replacement! This is two years of comments - * and bug fixes queued up and finally executed. Thanks to everyone who sent - * me stuff, and I'm sorry I wasn't able to distribute your fixes to everyone else. - * Much bad coding was cleaned up including throwing exceptions where necessary - * instead of returning null values or something similar. Here are some changes - * that may affect you: - *
        - *
      • Does not break lines, by default. This is to keep in compliance with - * RFC3548.
      • - *
      • Throws exceptions instead of returning null values. Because some operations - * (especially those that may permit the GZIP option) use IO streams, there - * is a possibility of an java.io.IOException being thrown. After some discussion and - * thought, I've changed the behavior of the methods to throw java.io.IOExceptions - * rather than return null if ever there's an error. I think this is more - * appropriate, though it will require some changes to your code. Sorry, - * it should have been done this way to begin with.
      • - *
      • Removed all references to System.out, System.err, and the like. - * Shame on me. All I can say is sorry they were ever there.
      • - *
      • Throws NullPointerExceptions and IllegalArgumentExceptions as needed - * such as when passed arrays are null or offsets are invalid.
      • - *
      • Cleaned up as much javadoc as I could to avoid any javadoc warnings. - * This was especially annoying before for people who were thorough in their - * own projects and then had gobs of javadoc warnings on this file.
      • - *
      - *
    • v2.2.1 - Fixed bug using URL_SAFE and ORDERED encodings. Fixed bug - * when using very small files (~< 40 bytes).
    • - *
    • v2.2 - Added some helper methods for encoding/decoding directly from - * one file to the next. Also added a main() method to support command line - * encoding/decoding from one file to the next. Also added these Base64 dialects: - *
        - *
      1. The default is RFC3548 format.
      2. - *
      3. Calling Base64.setFormat(Base64.BASE64_FORMAT.URLSAFE_FORMAT) generates - * URL and file name friendly format as described in Section 4 of RFC3548. - * http://www.faqs.org/rfcs/rfc3548.html
      4. - *
      5. Calling Base64.setFormat(Base64.BASE64_FORMAT.ORDERED_FORMAT) generates - * URL and file name friendly format that preserves lexical ordering as described - * in http://www.faqs.org/qa/rfcc-1940.html
      6. - *
      - * Special thanks to Jim Kellerman at http://www.powerset.com/ - * for contributing the new Base64 dialects. - *
    • - *
    • v2.1 - Cleaned up javadoc comments and unused variables and methods. Added - * some convenience methods for reading and writing to and from files.
    • - *
    • v2.0.2 - Now specifies UTF-8 encoding in places where the code fails on systems - * with other encodings (like EBCDIC).
    • - *
    • v2.0.1 - Fixed an error when decoding a single byte, that is, when the - * encoded data was a single byte.
    • - *
    • v2.0 - I got rid of methods that used booleans to set options. - * Now everything is more consolidated and cleaner. The code now detects - * when data that's being decoded is gzip-compressed and will decompress it - * automatically. Generally things are cleaner. You'll probably have to - * change some method calls that you were making to support the new - * options format (ints that you "OR" together).
    • - *
    • v1.5.1 - Fixed bug when decompressing and decoding to a - * byte[] using decode( String s, boolean gzipCompressed ). - * Added the ability to "suspend" encoding in the Output Stream so - * you can turn on and off the encoding if you need to embed base64 - * data in an otherwise "normal" stream (like an XML file).
    • - *
    • v1.5 - Output stream pases on flush() command but doesn't do anything itself. - * This helps when using GZIP streams. - * Added the ability to GZip-compress objects before encoding them.
    • - *
    • v1.4 - Added helper methods to read/write files.
    • - *
    • v1.3.6 - Fixed OutputStream.flush() so that 'position' is reset.
    • - *
    • v1.3.5 - Added flag to turn on and off line breaks. Fixed bug in input stream - * where last buffer being read, if not completely full, was not returned.
    • - *
    • v1.3.4 - Fixed when "improperly padded stream" error was thrown at the wrong time.
    • - *
    • v1.3.3 - Fixed I/O streams which were totally messed up.
    • - *
    - *

    - * I am placing this code in the Public Domain. Do with it as you will. - * This software comes with no guarantees or warranties but with - * plenty of well-wishing instead! - * Please visit http://iharder.net/base64 - * periodically to check for updates or to contribute improvements. - * - * @author Robert Harder - * @author rob@iharder.net - * @version 2.3.7 - */ -public final class Base64 { - -/* ******** P U B L I C F I E L D S ******** */ - - - /** - * No options specified. Value is zero. - */ - public final static int NO_OPTIONS = 0; - - /** - * Specify encoding in first bit. Value is one. - */ - public final static int ENCODE = 1; - - - /** - * Specify decoding in first bit. Value is zero. - */ - public final static int DECODE = 0; - - - /** - * Specify that data should be gzip-compressed in second bit. Value is two. - */ - public final static int GZIP = 2; - - /** - * Specify that gzipped data should not be automatically gunzipped. - */ - public final static int DONT_GUNZIP = 4; - - - /** - * Do break lines when encoding. Value is 8. - */ - public final static int DO_BREAK_LINES = 8; - - /** - * Encode using Base64-like encoding that is URL- and Filename-safe as described - * in Section 4 of RFC3548: - * http://www.faqs.org/rfcs/rfc3548.html. - * It is important to note that data encoded this way is not officially valid Base64, - * or at the very least should not be called Base64 without also specifying that is - * was encoded using the URL- and Filename-safe dialect. - */ - public final static int URL_SAFE = 16; - - - /** - * Encode using the special "ordered" dialect of Base64 described here: - * http://www.faqs.org/qa/rfcc-1940.html. - */ - public final static int ORDERED = 32; - - -/* ******** P R I V A T E F I E L D S ******** */ - - - /** - * Maximum line length (76) of Base64 output. - */ - private final static int MAX_LINE_LENGTH = 76; - - - /** - * The equals sign (=) as a byte. - */ - private final static byte EQUALS_SIGN = (byte) '='; - - - /** - * The new line character (\n) as a byte. - */ - private final static byte NEW_LINE = (byte) '\n'; - - - /** - * Preferred encoding. - */ - public final static Charset PREFERRED_ENCODING = Charset.forName("US-ASCII"); - - - private final static byte WHITE_SPACE_ENC = -5; // Indicates white space in encoding - private final static byte EQUALS_SIGN_ENC = -1; // Indicates equals sign in encoding - - -/* ******** S T A N D A R D B A S E 6 4 A L P H A B E T ******** */ - - /** - * The 64 valid Base64 values. - */ - /* Host platform me be something funny like EBCDIC, so we hardcode these values. */ - private final static byte[] _STANDARD_ALPHABET = { - (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G', - (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N', - (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U', - (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z', - (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g', - (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n', - (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u', - (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z', - (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4', (byte) '5', - (byte) '6', (byte) '7', (byte) '8', (byte) '9', (byte) '+', (byte) '/' - }; - - - /** - * Translates a Base64 value to either its 6-bit reconstruction value - * or a negative number indicating some other meaning. - */ - private final static byte[] _STANDARD_DECODABET = { - -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8 - -5, -5, // Whitespace: Tab and Linefeed - -9, -9, // Decimal 11 - 12 - -5, // Whitespace: Carriage Return - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26 - -9, -9, -9, -9, -9, // Decimal 27 - 31 - -5, // Whitespace: Space - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42 - 62, // Plus sign at decimal 43 - -9, -9, -9, // Decimal 44 - 46 - 63, // Slash at decimal 47 - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // Numbers zero through nine - -9, -9, -9, // Decimal 58 - 60 - -1, // Equals sign at decimal 61 - -9, -9, -9, // Decimal 62 - 64 - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, // Letters 'A' through 'N' - 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'O' through 'Z' - -9, -9, -9, -9, -9, -9, // Decimal 91 - 96 - 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, // Letters 'a' through 'm' - 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // Letters 'n' through 'z' - -9, -9, -9, -9, -9 // Decimal 123 - 127 - , -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 128 - 139 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 140 - 152 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 153 - 165 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 166 - 178 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 179 - 191 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 192 - 204 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 205 - 217 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 218 - 230 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 231 - 243 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9 // Decimal 244 - 255 - }; - - -/* ******** U R L S A F E B A S E 6 4 A L P H A B E T ******** */ - - /** - * Used in the URL- and Filename-safe dialect described in Section 4 of RFC3548: - * http://www.faqs.org/rfcs/rfc3548.html. - * Notice that the last two bytes become "hyphen" and "underscore" instead of "plus" and "slash." - */ - private final static byte[] _URL_SAFE_ALPHABET = { - (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G', - (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N', - (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U', - (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z', - (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g', - (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n', - (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u', - (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z', - (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4', (byte) '5', - (byte) '6', (byte) '7', (byte) '8', (byte) '9', (byte) '-', (byte) '_' - }; - - /** - * Used in decoding URL- and Filename-safe dialects of Base64. - */ - private final static byte[] _URL_SAFE_DECODABET = { - -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8 - -5, -5, // Whitespace: Tab and Linefeed - -9, -9, // Decimal 11 - 12 - -5, // Whitespace: Carriage Return - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26 - -9, -9, -9, -9, -9, // Decimal 27 - 31 - -5, // Whitespace: Space - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42 - -9, // Plus sign at decimal 43 - -9, // Decimal 44 - 62, // Minus sign at decimal 45 - -9, // Decimal 46 - -9, // Slash at decimal 47 - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // Numbers zero through nine - -9, -9, -9, // Decimal 58 - 60 - -1, // Equals sign at decimal 61 - -9, -9, -9, // Decimal 62 - 64 - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, // Letters 'A' through 'N' - 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // Letters 'O' through 'Z' - -9, -9, -9, -9, // Decimal 91 - 94 - 63, // Underscore at decimal 95 - -9, // Decimal 96 - 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, // Letters 'a' through 'm' - 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // Letters 'n' through 'z' - -9, -9, -9, -9, -9 // Decimal 123 - 127 - , -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 128 - 139 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 140 - 152 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 153 - 165 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 166 - 178 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 179 - 191 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 192 - 204 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 205 - 217 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 218 - 230 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 231 - 243 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9 // Decimal 244 - 255 - }; - - -/* ******** O R D E R E D B A S E 6 4 A L P H A B E T ******** */ - - /** - * I don't get the point of this technique, but someone requested it, - * and it is described here: - * http://www.faqs.org/qa/rfcc-1940.html. - */ - private final static byte[] _ORDERED_ALPHABET = { - (byte) '-', - (byte) '0', (byte) '1', (byte) '2', (byte) '3', (byte) '4', - (byte) '5', (byte) '6', (byte) '7', (byte) '8', (byte) '9', - (byte) 'A', (byte) 'B', (byte) 'C', (byte) 'D', (byte) 'E', (byte) 'F', (byte) 'G', - (byte) 'H', (byte) 'I', (byte) 'J', (byte) 'K', (byte) 'L', (byte) 'M', (byte) 'N', - (byte) 'O', (byte) 'P', (byte) 'Q', (byte) 'R', (byte) 'S', (byte) 'T', (byte) 'U', - (byte) 'V', (byte) 'W', (byte) 'X', (byte) 'Y', (byte) 'Z', - (byte) '_', - (byte) 'a', (byte) 'b', (byte) 'c', (byte) 'd', (byte) 'e', (byte) 'f', (byte) 'g', - (byte) 'h', (byte) 'i', (byte) 'j', (byte) 'k', (byte) 'l', (byte) 'm', (byte) 'n', - (byte) 'o', (byte) 'p', (byte) 'q', (byte) 'r', (byte) 's', (byte) 't', (byte) 'u', - (byte) 'v', (byte) 'w', (byte) 'x', (byte) 'y', (byte) 'z' - }; - - /** - * Used in decoding the "ordered" dialect of Base64. - */ - private final static byte[] _ORDERED_DECODABET = { - -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 0 - 8 - -5, -5, // Whitespace: Tab and Linefeed - -9, -9, // Decimal 11 - 12 - -5, // Whitespace: Carriage Return - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 14 - 26 - -9, -9, -9, -9, -9, // Decimal 27 - 31 - -5, // Whitespace: Space - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 33 - 42 - -9, // Plus sign at decimal 43 - -9, // Decimal 44 - 0, // Minus sign at decimal 45 - -9, // Decimal 46 - -9, // Slash at decimal 47 - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // Numbers zero through nine - -9, -9, -9, // Decimal 58 - 60 - -1, // Equals sign at decimal 61 - -9, -9, -9, // Decimal 62 - 64 - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, // Letters 'A' through 'M' - 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, // Letters 'N' through 'Z' - -9, -9, -9, -9, // Decimal 91 - 94 - 37, // Underscore at decimal 95 - -9, // Decimal 96 - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, // Letters 'a' through 'm' - 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, // Letters 'n' through 'z' - -9, -9, -9, -9, -9 // Decimal 123 - 127 - , -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 128 - 139 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 140 - 152 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 153 - 165 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 166 - 178 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 179 - 191 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 192 - 204 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 205 - 217 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 218 - 230 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, // Decimal 231 - 243 - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, -9 // Decimal 244 - 255 - }; - - -/* ******** D E T E R M I N E W H I C H A L H A B E T ******** */ - - - /** - * Returns one of the _SOMETHING_ALPHABET byte arrays depending on - * the options specified. - * It's possible, though silly, to specify ORDERED and URLSAFE - * in which case one of them will be picked, though there is - * no guarantee as to which one will be picked. - */ - private final static byte[] getAlphabet(int options) { - if ((options & URL_SAFE) == URL_SAFE) { - return _URL_SAFE_ALPHABET; - } else if ((options & ORDERED) == ORDERED) { - return _ORDERED_ALPHABET; - } else { - return _STANDARD_ALPHABET; - } - } // end getAlphabet - - - /** - * Returns one of the _SOMETHING_DECODABET byte arrays depending on - * the options specified. - * It's possible, though silly, to specify ORDERED and URL_SAFE - * in which case one of them will be picked, though there is - * no guarantee as to which one will be picked. - */ - private final static byte[] getDecodabet(int options) { - if ((options & URL_SAFE) == URL_SAFE) { - return _URL_SAFE_DECODABET; - } else if ((options & ORDERED) == ORDERED) { - return _ORDERED_DECODABET; - } else { - return _STANDARD_DECODABET; - } - } // end getAlphabet - - - /** - * Defeats instantiation. - */ - private Base64() { - } - - -/* ******** E N C O D I N G M E T H O D S ******** */ - - - /** - * Encodes up to the first three bytes of array threeBytes - * and returns a four-byte array in Base64 notation. - * The actual number of significant bytes in your array is - * given by numSigBytes. - * The array threeBytes needs only be as big as - * numSigBytes. - * Code can reuse a byte array by passing a four-byte array as b4. - * - * @param b4 A reusable byte array to reduce array instantiation - * @param threeBytes the array to convert - * @param numSigBytes the number of significant bytes in your array - * @return four byte array in Base64 notation. - * @since 1.5.1 - */ - private static byte[] encode3to4(byte[] b4, byte[] threeBytes, int numSigBytes, int options) { - encode3to4(threeBytes, 0, numSigBytes, b4, 0, options); - return b4; - } // end encode3to4 - - - /** - *

    Encodes up to three bytes of the array source - * and writes the resulting four Base64 bytes to destination. - * The source and destination arrays can be manipulated - * anywhere along their length by specifying - * srcOffset and destOffset. - * This method does not check to make sure your arrays - * are large enough to accommodate srcOffset + 3 for - * the source array or destOffset + 4 for - * the destination array. - * The actual number of significant bytes in your array is - * given by numSigBytes.

    - *

    This is the lowest level of the encoding methods with - * all possible parameters.

    - * - * @param source the array to convert - * @param srcOffset the index where conversion begins - * @param numSigBytes the number of significant bytes in your array - * @param destination the array to hold the conversion - * @param destOffset the index where output will be put - * @return the destination array - * @since 1.3 - */ - private static byte[] encode3to4( - byte[] source, int srcOffset, int numSigBytes, - byte[] destination, int destOffset, int options) { - - byte[] ALPHABET = getAlphabet(options); - - // 1 2 3 - // 01234567890123456789012345678901 Bit position - // --------000000001111111122222222 Array position from threeBytes - // --------| || || || | Six bit groups to index ALPHABET - // >>18 >>12 >> 6 >> 0 Right shift necessary - // 0x3f 0x3f 0x3f Additional AND - - // Create buffer with zero-padding if there are only one or two - // significant bytes passed in the array. - // We have to shift left 24 in order to flush out the 1's that appear - // when Java treats a value as negative that is cast from a byte to an int. - int inBuff = (numSigBytes > 0 ? ((source[srcOffset] << 24) >>> 8) : 0) - | (numSigBytes > 1 ? ((source[srcOffset + 1] << 24) >>> 16) : 0) - | (numSigBytes > 2 ? ((source[srcOffset + 2] << 24) >>> 24) : 0); - - switch (numSigBytes) { - case 3: - destination[destOffset] = ALPHABET[(inBuff >>> 18)]; - destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f]; - destination[destOffset + 2] = ALPHABET[(inBuff >>> 6) & 0x3f]; - destination[destOffset + 3] = ALPHABET[(inBuff) & 0x3f]; - return destination; - - case 2: - destination[destOffset] = ALPHABET[(inBuff >>> 18)]; - destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f]; - destination[destOffset + 2] = ALPHABET[(inBuff >>> 6) & 0x3f]; - destination[destOffset + 3] = EQUALS_SIGN; - return destination; - - case 1: - destination[destOffset] = ALPHABET[(inBuff >>> 18)]; - destination[destOffset + 1] = ALPHABET[(inBuff >>> 12) & 0x3f]; - destination[destOffset + 2] = EQUALS_SIGN; - destination[destOffset + 3] = EQUALS_SIGN; - return destination; - - default: - return destination; - } // end switch - } // end encode3to4 - - - /** - * Performs Base64 encoding on the raw ByteBuffer, - * writing it to the encoded ByteBuffer. - * This is an experimental feature. Currently it does not - * pass along any options (such as {@link #DO_BREAK_LINES} - * or {@link #GZIP}. - * - * @param raw input buffer - * @param encoded output buffer - * @since 2.3 - */ - public static void encode(java.nio.ByteBuffer raw, java.nio.ByteBuffer encoded) { - byte[] raw3 = new byte[3]; - byte[] enc4 = new byte[4]; - - while (raw.hasRemaining()) { - int rem = Math.min(3, raw.remaining()); - raw.get(raw3, 0, rem); - Base64.encode3to4(enc4, raw3, rem, Base64.NO_OPTIONS); - encoded.put(enc4); - } // end input remaining - } - - - /** - * Performs Base64 encoding on the raw ByteBuffer, - * writing it to the encoded CharBuffer. - * This is an experimental feature. Currently it does not - * pass along any options (such as {@link #DO_BREAK_LINES} - * or {@link #GZIP}. - * - * @param raw input buffer - * @param encoded output buffer - * @since 2.3 - */ - public static void encode(java.nio.ByteBuffer raw, java.nio.CharBuffer encoded) { - byte[] raw3 = new byte[3]; - byte[] enc4 = new byte[4]; - - while (raw.hasRemaining()) { - int rem = Math.min(3, raw.remaining()); - raw.get(raw3, 0, rem); - Base64.encode3to4(enc4, raw3, rem, Base64.NO_OPTIONS); - for (int i = 0; i < 4; i++) { - encoded.put((char) (enc4[i] & 0xFF)); - } - } // end input remaining - } - - - - /** - * Encodes a byte array into Base64 notation. - * Does not GZip-compress data. - * - * @param source The data to convert - * @return The data in Base64-encoded form - * @throws NullPointerException if source array is null - * @since 1.4 - */ - public static String encodeBytes(byte[] source) { - // Since we're not going to have the GZIP encoding turned on, - // we're not going to have an java.io.IOException thrown, so - // we should not force the user to have to catch it. - String encoded = null; - try { - encoded = encodeBytes(source, 0, source.length, NO_OPTIONS); - } catch (java.io.IOException ex) { - // not sure why this was an assertion before, running with assertions disabled would mean swallowing this exception - throw new IllegalStateException(ex); - } // end catch - assert encoded != null; - return encoded; - } // end encodeBytes - - - /** - * Encodes a byte array into Base64 notation. - *

    - * Example options:

    -     *   GZIP: gzip-compresses object before encoding it.
    -     *   DO_BREAK_LINES: break lines at 76 characters
    -     *     Note: Technically, this makes your encoding non-compliant.
    -     * 
    - *

    - * Example: encodeBytes( myData, Base64.GZIP ) or - *

    - * Example: encodeBytes( myData, Base64.GZIP | Base64.DO_BREAK_LINES ) - *

    As of v 2.3, if there is an error with the GZIP stream, - * the method will throw an java.io.IOException. This is new to v2.3! - * In earlier versions, it just returned a null value, but - * in retrospect that's a pretty poor way to handle it.

    - * - * @param source The data to convert - * @param options Specified options - * @return The Base64-encoded data as a String - * @throws java.io.IOException if there is an error - * @throws NullPointerException if source array is null - * @see Base64#GZIP - * @see Base64#DO_BREAK_LINES - * @since 2.0 - */ - public static String encodeBytes(byte[] source, int options) throws java.io.IOException { - return encodeBytes(source, 0, source.length, options); - } // end encodeBytes - - /** - * Encodes a byte array into Base64 notation. - * Does not GZip-compress data. - *

    - * As of v 2.3, if there is an error, - * the method will throw an java.io.IOException. This is new to v2.3! - * In earlier versions, it just returned a null value, but - * in retrospect that's a pretty poor way to handle it.

    - * - * @param source The data to convert - * @param off Offset in array where conversion should begin - * @param len Length of data to convert - * @return The Base64-encoded data as a String - * @throws NullPointerException if source array is null - * @throws IllegalArgumentException if source array, offset, or length are invalid - * @since 1.4 - */ - public static String encodeBytes(byte[] source, int off, int len) { - // Since we're not going to have the GZIP encoding turned on, - // we're not going to have an java.io.IOException thrown, so - // we should not force the user to have to catch it. - String encoded = null; - try { - encoded = encodeBytes(source, off, len, NO_OPTIONS); - } catch (java.io.IOException ex) { - throw new IllegalStateException(ex); - } // end catch - assert encoded != null; - return encoded; - } // end encodeBytes - - - /** - * Encodes a byte array into Base64 notation. - *

    - * Example options:

    -     *   GZIP: gzip-compresses object before encoding it.
    -     *   DO_BREAK_LINES: break lines at 76 characters
    -     *     Note: Technically, this makes your encoding non-compliant.
    -     * 
    - *

    - * Example: encodeBytes( myData, Base64.GZIP ) or - *

    - * Example: encodeBytes( myData, Base64.GZIP | Base64.DO_BREAK_LINES ) - *

    - * As of v 2.3, if there is an error with the GZIP stream, - * the method will throw an java.io.IOException. This is new to v2.3! - * In earlier versions, it just returned a null value, but - * in retrospect that's a pretty poor way to handle it. - * - * @param source The data to convert - * @param off Offset in array where conversion should begin - * @param len Length of data to convert - * @param options Specified options - * @return The Base64-encoded data as a String - * @throws java.io.IOException if there is an error - * @throws NullPointerException if source array is null - * @throws IllegalArgumentException if source array, offset, or length are invalid - * @see Base64#GZIP - * @see Base64#DO_BREAK_LINES - * @since 2.0 - */ - public static String encodeBytes(byte[] source, int off, int len, int options) throws java.io.IOException { - byte[] encoded = encodeBytesToBytes(source, off, len, options); - - // Return value according to relevant encoding. - return new String(encoded, PREFERRED_ENCODING); - - } // end encodeBytes - - - /** - * Similar to {@link #encodeBytes(byte[])} but returns - * a byte array instead of instantiating a String. This is more efficient - * if you're working with I/O streams and have large data sets to encode. - * - * @param source The data to convert - * @return The Base64-encoded data as a byte[] (of ASCII characters) - * @throws NullPointerException if source array is null - * @since 2.3.1 - */ - public static byte[] encodeBytesToBytes(byte[] source) { - byte[] encoded = null; - try { - encoded = encodeBytesToBytes(source, 0, source.length, Base64.NO_OPTIONS); - } catch (java.io.IOException ex) { - throw new IllegalStateException("IOExceptions only come from GZipping, which is turned off: ", ex); - } - return encoded; - } - - - /** - * Similar to {@link #encodeBytes(byte[], int, int, int)} but returns - * a byte array instead of instantiating a String. This is more efficient - * if you're working with I/O streams and have large data sets to encode. - * - * @param source The data to convert - * @param off Offset in array where conversion should begin - * @param len Length of data to convert - * @param options Specified options - * @return The Base64-encoded data as a String - * @throws java.io.IOException if there is an error - * @throws NullPointerException if source array is null - * @throws IllegalArgumentException if source array, offset, or length are invalid - * @see Base64#GZIP - * @see Base64#DO_BREAK_LINES - * @since 2.3.1 - */ - public static byte[] encodeBytesToBytes(byte[] source, int off, int len, int options) throws java.io.IOException { - Objects.requireNonNull(source, "Cannot serialize a null array."); - - if (off < 0) { - throw new IllegalArgumentException("Cannot have negative offset: " + off); - } // end if: off < 0 - - if (len < 0) { - throw new IllegalArgumentException("Cannot have length offset: " + len); - } // end if: len < 0 - - if (off + len > source.length) { - throw new IllegalArgumentException( - String.format(Locale.ROOT, "Cannot have offset of %d and length of %d with array of length %d", off, len, source.length)); - } // end if: off < 0 - - // Compress? - if ((options & GZIP) != 0) { - return encodeCompressedBytes(source, off, len, options); - } // end if: compress - - // Else, don't compress. Better not to use streams at all then. - else { - return encodeNonCompressedBytes(source, off, len, options); - } // end else: don't compress - - } // end encodeBytesToBytes - - private static byte[] encodeNonCompressedBytes(byte[] source, int off, int len, int options) { - boolean breakLines = (options & DO_BREAK_LINES) != 0; - - //int len43 = len * 4 / 3; - //byte[] outBuff = new byte[ ( len43 ) // Main 4:3 - // + ( (len % 3) > 0 ? 4 : 0 ) // Account for padding - // + (breakLines ? ( len43 / MAX_LINE_LENGTH ) : 0) ]; // New lines - // Try to determine more precisely how big the array needs to be. - // If we get it right, we don't have to do an array copy, and - // we save a bunch of memory. - int encLen = (len / 3) * 4 + (len % 3 > 0 ? 4 : 0); // Bytes needed for actual encoding - if (breakLines) { - encLen += encLen / MAX_LINE_LENGTH; // Plus extra newline characters - } - byte[] outBuff = new byte[encLen]; - - - int d = 0; - int e = 0; - int len2 = len - 2; - int lineLength = 0; - for (; d < len2; d += 3, e += 4) { - encode3to4(source, d + off, 3, outBuff, e, options); - - lineLength += 4; - if (breakLines && lineLength >= MAX_LINE_LENGTH) { - outBuff[e + 4] = NEW_LINE; - e++; - lineLength = 0; - } // end if: end of line - } // en dfor: each piece of array - - if (d < len) { - encode3to4(source, d + off, len - d, outBuff, e, options); - e += 4; - } // end if: some padding needed - - - // Only resize array if we didn't guess it right. - if (e <= outBuff.length - 1) { - // If breaking lines and the last byte falls right at - // the line length (76 bytes per line), there will be - // one extra byte, and the array will need to be resized. - // Not too bad of an estimate on array size, I'd say. - byte[] finalOut = new byte[e]; - System.arraycopy(outBuff, 0, finalOut, 0, e); - //System.err.println("Having to resize array from " + outBuff.length + " to " + e ); - return finalOut; - } else { - //System.err.println("No need to resize array."); - return outBuff; - } - } - - private static byte[] encodeCompressedBytes(byte[] source, int off, int len, int options) throws IOException { - java.io.ByteArrayOutputStream baos = null; - java.util.zip.GZIPOutputStream gzos = null; - OutputStream b64os = null; - - try { - // GZip -> Base64 -> ByteArray - baos = new java.io.ByteArrayOutputStream(); - b64os = new OutputStream(baos, ENCODE | options); - gzos = new java.util.zip.GZIPOutputStream(b64os); - - gzos.write(source, off, len); - gzos.close(); - } // end try - catch (IOException e) { - // Catch it and then throw it immediately so that - // the finally{} block is called for cleanup. - throw e; - } // end catch - finally { - try { - gzos.close(); - } catch (Exception e) { - } - try { - b64os.close(); - } catch (Exception e) { - } - try { - baos.close(); - } catch (Exception e) { - } - } // end finally - - return baos.toByteArray(); - } - - -/* ******** D E C O D I N G M E T H O D S ******** */ - - - /** - * Decodes four bytes from array source - * and writes the resulting bytes (up to three of them) - * to destination. - * The source and destination arrays can be manipulated - * anywhere along their length by specifying - * srcOffset and destOffset. - * This method does not check to make sure your arrays - * are large enough to accommodate srcOffset + 4 for - * the source array or destOffset + 3 for - * the destination array. - * This method returns the actual number of bytes that - * were converted from the Base64 encoding. - *

    This is the lowest level of the decoding methods with - * all possible parameters.

    - * - * @param source the array to convert - * @param srcOffset the index where conversion begins - * @param destination the array to hold the conversion - * @param destOffset the index where output will be put - * @param options alphabet type is pulled from this (standard, url-safe, ordered) - * @return the number of decoded bytes converted - * @throws NullPointerException if source or destination arrays are null - * @throws IllegalArgumentException if srcOffset or destOffset are invalid - * or there is not enough room in the array. - * @since 1.3 - */ - private static int decode4to3(byte[] source, int srcOffset, byte[] destination, int destOffset, int options) { - // Lots of error checking and exception throwing - Objects.requireNonNull(source, "Source array was null."); - Objects.requireNonNull(destination, "Destination array was null."); - if (srcOffset < 0 || srcOffset + 3 >= source.length) { - throw new IllegalArgumentException(String.format(Locale.ROOT, - "Source array with length %d cannot have offset of %d and still process four bytes.", source.length, srcOffset)); - } // end if - if (destOffset < 0 || destOffset + 2 >= destination.length) { - throw new IllegalArgumentException(String.format(Locale.ROOT, - "Destination array with length %d cannot have offset of %d and still store three bytes.", destination.length, destOffset)); - } // end if - - byte[] DECODABET = getDecodabet(options); - - - // Two ways to do the same thing. Don't know which way I like best. - //int outBuff = ( ( DECODABET[ source[ srcOffset ] ] << 24 ) >>> 6 ) - // | ( ( DECODABET[ source[ srcOffset + 1] ] << 24 ) >>> 12 ); - int outBuff = ((DECODABET[source[srcOffset]] & 0xFF) << 18) - | ((DECODABET[source[srcOffset + 1]] & 0xFF) << 12); - - destination[destOffset] = (byte) (outBuff >>> 16); - - // Example: Dk== - if (source[srcOffset + 2] == EQUALS_SIGN) { - return 1; - } - - outBuff |= ((DECODABET[source[srcOffset + 2]] & 0xFF) << 6); - destination[destOffset + 1] = (byte) (outBuff >>> 8); - - // Example: DkL= - if (source[srcOffset + 3] == EQUALS_SIGN) { - return 2; - } - - outBuff |= ((DECODABET[source[srcOffset + 3]] & 0xFF)); - destination[destOffset + 2] = (byte) (outBuff); - - // Example: DkLE - return 3; - } - - - /** - * Low-level access to decoding ASCII characters in - * the form of a byte array. Ignores GUNZIP option, if - * it's set. This is not generally a recommended method, - * although it is used internally as part of the decoding process. - * Special case: if len = 0, an empty array is returned. Still, - * if you need more speed and reduced memory footprint (and aren't - * gzipping), consider this method. - * - * @param source The Base64 encoded data - * @return decoded data - * @since 2.3.1 - */ - public static byte[] decode(byte[] source) - throws java.io.IOException { - byte[] decoded = null; -// try { - decoded = decode(source, 0, source.length, Base64.NO_OPTIONS); -// } catch( java.io.IOException ex ) { -// assert false : "IOExceptions only come from GZipping, which is turned off: " + ex.getMessage(); -// } - return decoded; - } - - - /** - * Low-level access to decoding ASCII characters in - * the form of a byte array. Ignores GUNZIP option, if - * it's set. This is not generally a recommended method, - * although it is used internally as part of the decoding process. - * Special case: if len = 0, an empty array is returned. Still, - * if you need more speed and reduced memory footprint (and aren't - * gzipping), consider this method. - * - * @param source The Base64 encoded data - * @param off The offset of where to begin decoding - * @param len The length of characters to decode - * @param options Can specify options such as alphabet type to use - * @return decoded data - * @throws java.io.IOException If bogus characters exist in source data - * @since 1.3 - */ - public static byte[] decode(byte[] source, int off, int len, int options) throws java.io.IOException { - // Lots of error checking and exception throwing - Objects.requireNonNull(source, "Cannot decode null source array."); - if (off < 0 || off + len > source.length) { - throw new IllegalArgumentException(String.format(Locale.ROOT, - "Source array with length %d cannot have offset of %d and process %d bytes.", source.length, off, len)); - } // end if - - if (len == 0) { - return new byte[0]; - } else if (len < 4) { - throw new IllegalArgumentException( - "Base64-encoded string must have at least four characters, but length specified was " + len); - } // end if - - byte[] DECODABET = getDecodabet(options); - - int len34 = len * 3 / 4; // Estimate on array size - byte[] outBuff = new byte[len34]; // Upper limit on size of output - - int outBuffPosn = decode(source, off, len, options, DECODABET, outBuff); - - byte[] out = new byte[outBuffPosn]; - System.arraycopy(outBuff, 0, out, 0, outBuffPosn); - return out; - } // end decode - - private static int decode(byte[] source, int off, int len, int options, byte[] DECODABET, byte[] outBuff) throws IOException { - int outBuffPosn = 0; // Keep track of where we're writing - byte[] b4 = new byte[4]; // Four byte buffer from source, eliminating white space - int b4Posn = 0; // Keep track of four byte input buffer - for (int i = off; i < off + len; i++) { // Loop through source - - byte sbiDecode = DECODABET[source[i] & 0xFF]; - - // White space, Equals sign, or legit Base64 character - // Note the values such as -5 and -9 in the - // DECODABETs at the top of the file. - if (sbiDecode >= WHITE_SPACE_ENC) { - if (sbiDecode >= EQUALS_SIGN_ENC) { - b4[b4Posn++] = source[i]; // Save non-whitespace - if (b4Posn > 3) { // Time to decode? - outBuffPosn += decode4to3(b4, 0, outBuff, outBuffPosn, options); - b4Posn = 0; - - // If that was the equals sign, break out of 'for' loop - if (source[i] == EQUALS_SIGN) { - // check if the equals sign is somewhere in between - if (i+1 < len + off) { - throw new IOException(String.format(Locale.ROOT, - "Found equals sign at position %d of the base64 string, not at the end", i)); - } - break; - } // end if: equals sign - } // end if: quartet built - else { - if (source[i] == EQUALS_SIGN && len + off > i && source[i+1] != EQUALS_SIGN) { - throw new IOException(String.format(Locale.ROOT, - "Found equals sign at position %d of the base64 string, not at the end", i)); - } // enf if: equals sign and next character not as well - } // end else: - } // end if: equals sign or better - } // end if: white space, equals sign or better - else { - // There's a bad input character in the Base64 stream. - throw new IOException(String.format(Locale.ROOT, - "Bad Base64 input character decimal %d in array position %d", ((int) source[i]) & 0xFF, i)); - } // end else: - } // each input character - return outBuffPosn; - } - - - /** - * Decodes data from Base64 notation, automatically - * detecting gzip-compressed data and decompressing it. - * - * @param s the string to decode - * @return the decoded data - * @throws java.io.IOException If there is a problem - * @since 1.4 - */ - public static byte[] decode(String s) throws java.io.IOException { - return decode(s, NO_OPTIONS); - } - - - /** - * Decodes data from Base64 notation, automatically - * detecting gzip-compressed data and decompressing it. - * - * @param s the string to decode - * @param options encode options such as URL_SAFE - * @return the decoded data - * @throws java.io.IOException if there is an error - * @throws NullPointerException if s is null - * @since 1.4 - */ - public static byte[] decode(String s, int options) throws java.io.IOException { - - if (s == null) { - throw new NullPointerException("Input string was null."); - } // end if - - byte[] bytes = s.getBytes(PREFERRED_ENCODING); - // - - // Decode - bytes = decode(bytes, 0, bytes.length, options); - - // Check to see if it's gzip-compressed - // GZIP Magic Two-Byte Number: 0x8b1f (35615) - boolean dontGunzip = (options & DONT_GUNZIP) != 0; - if ((bytes != null) && (bytes.length >= 4) && (!dontGunzip)) { - - int head = ((int) bytes[0] & 0xff) | ((bytes[1] << 8) & 0xff00); - if (java.util.zip.GZIPInputStream.GZIP_MAGIC == head) { - java.io.ByteArrayInputStream bais = null; - java.util.zip.GZIPInputStream gzis = null; - java.io.ByteArrayOutputStream baos = null; - byte[] buffer = new byte[2048]; - int length = 0; - - try { - baos = new java.io.ByteArrayOutputStream(); - bais = new java.io.ByteArrayInputStream(bytes); - gzis = new java.util.zip.GZIPInputStream(bais); - - while ((length = gzis.read(buffer)) >= 0) { - baos.write(buffer, 0, length); - } // end while: reading input - - // No error? Get new bytes. - bytes = baos.toByteArray(); - - } // end try - catch (java.io.IOException e) { - // e.printStackTrace(); - // Just return originally-decoded bytes - } // end catch - finally { - try { - baos.close(); - } catch (Exception e) { - } - try { - gzis.close(); - } catch (Exception e) { - } - try { - bais.close(); - } catch (Exception e) { - } - } // end finally - - } // end if: gzipped - } // end if: bytes.length >= 2 - - return bytes; - } // end decode - - - - /* ******** I N N E R C L A S S I N P U T S T R E A M ******** */ - - - /** - * A {@link Base64.InputStream} will read data from another - * java.io.InputStream, given in the constructor, - * and encode/decode to/from Base64 notation on the fly. - * - * @see Base64 - * @since 1.3 - */ - public static class InputStream extends java.io.FilterInputStream { - - private boolean encode; // Encoding or decoding - private int position; // Current position in the buffer - private byte[] buffer; // Small buffer holding converted data - private int bufferLength; // Length of buffer (3 or 4) - private int numSigBytes; // Number of meaningful bytes in the buffer - private int lineLength; - private boolean breakLines; // Break lines at less than 80 characters - private int options; // Record options used to create the stream. - private byte[] decodabet; // Local copies to avoid extra method calls - - - /** - * Constructs a {@link Base64.InputStream} in DECODE mode. - * - * @param in the java.io.InputStream from which to read data. - * @since 1.3 - */ - public InputStream(java.io.InputStream in) { - this(in, DECODE); - } // end constructor - - - /** - * Constructs a {@link Base64.InputStream} in - * either ENCODE or DECODE mode. - *

    - * Valid options:

    -         *   ENCODE or DECODE: Encode or Decode as data is read.
    -         *   DO_BREAK_LINES: break lines at 76 characters
    -         *     (only meaningful when encoding)
    -         * 
    - *

    - * Example: new Base64.InputStream( in, Base64.DECODE ) - * - * @param in the java.io.InputStream from which to read data. - * @param options Specified options - * @see Base64#ENCODE - * @see Base64#DECODE - * @see Base64#DO_BREAK_LINES - * @since 2.0 - */ - public InputStream(java.io.InputStream in, int options) { - - super(in); - this.options = options; // Record for later - this.breakLines = (options & DO_BREAK_LINES) > 0; - this.encode = (options & ENCODE) > 0; - this.bufferLength = encode ? 4 : 3; - this.buffer = new byte[bufferLength]; - this.position = -1; - this.lineLength = 0; - this.decodabet = getDecodabet(options); - } // end constructor - - /** - * Reads enough of the input stream to convert - * to/from Base64 and returns the next byte. - * - * @return next byte - * @since 1.3 - */ - @Override - public int read() throws java.io.IOException { - - // Do we need to get data? - if (position < 0) { - if (encode) { - byte[] b3 = new byte[3]; - int numBinaryBytes = 0; - for (int i = 0; i < 3; i++) { - int b = in.read(); - - // If end of stream, b is -1. - if (b >= 0) { - b3[i] = (byte) b; - numBinaryBytes++; - } else { - break; // out of for loop - } // end else: end of stream - - } // end for: each needed input byte - - if (numBinaryBytes > 0) { - encode3to4(b3, 0, numBinaryBytes, buffer, 0, options); - position = 0; - numSigBytes = 4; - } // end if: got data - else { - return -1; // Must be end of stream - } // end else - } // end if: encoding - - // Else decoding - else { - byte[] b4 = new byte[4]; - int i = 0; - for (i = 0; i < 4; i++) { - // Read four "meaningful" bytes: - int b = 0; - do { - b = in.read(); - } - while (b >= 0 && decodabet[b & 0x7f] <= WHITE_SPACE_ENC); - - if (b < 0) { - break; // Reads a -1 if end of stream - } // end if: end of stream - - b4[i] = (byte) b; - } // end for: each needed input byte - - if (i == 4) { - numSigBytes = decode4to3(b4, 0, buffer, 0, options); - position = 0; - } // end if: got four characters - else if (i == 0) { - return -1; - } // end else if: also padded correctly - else { - // Must have broken out from above. - throw new java.io.IOException("Improperly padded Base64 input."); - } // end - - } // end else: decode - } // end else: get data - - // Got data? - if (position >= 0) { - // End of relevant data? - if ( /*!encode &&*/ position >= numSigBytes) { - return -1; - } // end if: got data - - if (encode && breakLines && lineLength >= MAX_LINE_LENGTH) { - lineLength = 0; - return '\n'; - } // end if - else { - lineLength++; // This isn't important when decoding - // but throwing an extra "if" seems - // just as wasteful. - - int b = buffer[position++]; - - if (position >= bufferLength) { - position = -1; - } // end if: end - - return b & 0xFF; // This is how you "cast" a byte that's - // intended to be unsigned. - } // end else - } // end if: position >= 0 - - // Else error - else { - throw new java.io.IOException("Error in Base64 code reading stream."); - } // end else - } // end read - - - /** - * Calls {@link #read()} repeatedly until the end of stream - * is reached or len bytes are read. - * Returns number of bytes read into array or -1 if - * end of stream is encountered. - * - * @param dest array to hold values - * @param off offset for array - * @param len max number of bytes to read into array - * @return bytes read into array or -1 if end of stream is encountered. - * @since 1.3 - */ - @Override - public int read(byte[] dest, int off, int len) - throws java.io.IOException { - int i; - int b; - for (i = 0; i < len; i++) { - b = read(); - - if (b >= 0) { - dest[off + i] = (byte) b; - } else if (i == 0) { - return -1; - } else { - break; // Out of 'for' loop - } // Out of 'for' loop - } // end for: each byte read - return i; - } // end read - - } // end inner class InputStream - - - /* ******** I N N E R C L A S S O U T P U T S T R E A M ******** */ - - - /** - * A {@link Base64.OutputStream} will write data to another - * java.io.OutputStream, given in the constructor, - * and encode/decode to/from Base64 notation on the fly. - * - * @see Base64 - * @since 1.3 - */ - public static class OutputStream extends java.io.FilterOutputStream { - - private boolean encode; - private int position; - private byte[] buffer; - private int bufferLength; - private int lineLength; - private boolean breakLines; - private byte[] b4; // Scratch used in a few places - private boolean suspendEncoding; - private int options; // Record for later - private byte[] decodabet; // Local copies to avoid extra method calls - - /** - * Constructs a {@link Base64.OutputStream} in ENCODE mode. - * - * @param out the java.io.OutputStream to which data will be written. - * @since 1.3 - */ - public OutputStream(java.io.OutputStream out) { - this(out, ENCODE); - } // end constructor - - - /** - * Constructs a {@link Base64.OutputStream} in - * either ENCODE or DECODE mode. - *

    - * Valid options:

    -         *   ENCODE or DECODE: Encode or Decode as data is read.
    -         *   DO_BREAK_LINES: don't break lines at 76 characters
    -         *     (only meaningful when encoding)
    -         * 
    - *

    - * Example: new Base64.OutputStream( out, Base64.ENCODE ) - * - * @param out the java.io.OutputStream to which data will be written. - * @param options Specified options. - * @see Base64#ENCODE - * @see Base64#DECODE - * @see Base64#DO_BREAK_LINES - * @since 1.3 - */ - public OutputStream(java.io.OutputStream out, int options) { - super(out); - this.breakLines = (options & DO_BREAK_LINES) != 0; - this.encode = (options & ENCODE) != 0; - this.bufferLength = encode ? 3 : 4; - this.buffer = new byte[bufferLength]; - this.position = 0; - this.lineLength = 0; - this.suspendEncoding = false; - this.b4 = new byte[4]; - this.options = options; - this.decodabet = getDecodabet(options); - } // end constructor - - - /** - * Writes the byte to the output stream after - * converting to/from Base64 notation. - * When encoding, bytes are buffered three - * at a time before the output stream actually - * gets a write() call. - * When decoding, bytes are buffered four - * at a time. - * - * @param theByte the byte to write - * @since 1.3 - */ - @Override - public void write(int theByte) - throws java.io.IOException { - // Encoding suspended? - if (suspendEncoding) { - this.out.write(theByte); - return; - } // end if: suspended - - // Encode? - if (encode) { - buffer[position++] = (byte) theByte; - if (position >= bufferLength) { // Enough to encode. - - this.out.write(encode3to4(b4, buffer, bufferLength, options)); - - lineLength += 4; - if (breakLines && lineLength >= MAX_LINE_LENGTH) { - this.out.write(NEW_LINE); - lineLength = 0; - } // end if: end of line - - position = 0; - } // end if: enough to output - } // end if: encoding - - // Else, Decoding - else { - // Meaningful Base64 character? - if (decodabet[theByte & 0x7f] > WHITE_SPACE_ENC) { - buffer[position++] = (byte) theByte; - if (position >= bufferLength) { // Enough to output. - - int len = Base64.decode4to3(buffer, 0, b4, 0, options); - out.write(b4, 0, len); - position = 0; - } // end if: enough to output - } // end if: meaningful base64 character - else if (decodabet[theByte & 0x7f] != WHITE_SPACE_ENC) { - throw new java.io.IOException("Invalid character in Base64 data."); - } // end else: not white space either - } // end else: decoding - } // end write - - - /** - * Calls {@link #write(int)} repeatedly until len - * bytes are written. - * - * @param theBytes array from which to read bytes - * @param off offset for array - * @param len max number of bytes to read into array - * @since 1.3 - */ - @Override - public void write(byte[] theBytes, int off, int len) - throws java.io.IOException { - // Encoding suspended? - if (suspendEncoding) { - this.out.write(theBytes, off, len); - return; - } // end if: suspended - - for (int i = 0; i < len; i++) { - write(theBytes[off + i]); - } // end for: each byte written - - } // end write - - - /** - * Method added by PHIL. [Thanks, PHIL. -Rob] - * This pads the buffer without closing the stream. - * - * @throws java.io.IOException if there's an error. - */ - public void flushBase64() throws java.io.IOException { - if (position > 0) { - if (encode) { - out.write(encode3to4(b4, buffer, position, options)); - position = 0; - } // end if: encoding - else { - throw new java.io.IOException("Base64 input not properly padded."); - } // end else: decoding - } // end if: buffer partially full - - } // end flush - - - /** - * Flushes and closes (I think, in the superclass) the stream. - * - * @since 1.3 - */ - @Override - public void close() throws java.io.IOException { - // 1. Ensure that pending characters are written - flushBase64(); - - // 2. Actually close the stream - // Base class both flushes and closes. - super.close(); - - buffer = null; - out = null; - } // end close - - - /** - * Suspends encoding of the stream. - * May be helpful if you need to embed a piece of - * base64-encoded data in a stream. - * - * @throws java.io.IOException if there's an error flushing - * @since 1.5.1 - */ - public void suspendEncoding() throws java.io.IOException { - flushBase64(); - this.suspendEncoding = true; - } // end suspendEncoding - - - /** - * Resumes encoding of the stream. - * May be helpful if you need to embed a piece of - * base64-encoded data in a stream. - * - * @since 1.5.1 - */ - public void resumeEncoding() { - this.suspendEncoding = false; - } // end resumeEncoding - - - } // end inner class OutputStream - - -} // end class Base64 diff --git a/core/src/main/java/org/elasticsearch/common/MacAddressProvider.java b/core/src/main/java/org/elasticsearch/common/MacAddressProvider.java index 52ebc283f3c..1c6d7b3945b 100644 --- a/core/src/main/java/org/elasticsearch/common/MacAddressProvider.java +++ b/core/src/main/java/org/elasticsearch/common/MacAddressProvider.java @@ -59,7 +59,7 @@ public class MacAddressProvider { byte[] address = null; try { address = getMacAddress(); - } catch (Throwable t) { + } catch (SocketException e) { // address will be set below } diff --git a/core/src/main/java/org/elasticsearch/common/NamedRegistry.java b/core/src/main/java/org/elasticsearch/common/NamedRegistry.java new file mode 100644 index 00000000000..c326da7a495 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/NamedRegistry.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static java.util.Objects.requireNonNull; + +/** + * A registry from String to some class implementation. Used to ensure implementations are registered only once. + */ +public class NamedRegistry { + private final Map registry = new HashMap<>(); + private final String targetName; + + public NamedRegistry(String targetName) { + this.targetName = targetName; + } + + public Map getRegistry() { + return registry; + } + + public void register(String name, T t) { + requireNonNull(name, "name is required"); + requireNonNull(t, targetName + " is required"); + if (registry.putIfAbsent(name, t) != null) { + throw new IllegalArgumentException(targetName + " for name [" + name + "] already registered"); + } + } + + public

    void extractAndRegister(List

    plugins, Function> lookup) { + for (P plugin : plugins) { + for (Map.Entry entry : lookup.apply(plugin).entrySet()) { + register(entry.getKey(), entry.getValue()); + } + } + } +} diff --git a/core/src/main/java/org/elasticsearch/common/ParseField.java b/core/src/main/java/org/elasticsearch/common/ParseField.java index 6516f7952a7..c04bcb14dcb 100644 --- a/core/src/main/java/org/elasticsearch/common/ParseField.java +++ b/core/src/main/java/org/elasticsearch/common/ParseField.java @@ -107,4 +107,12 @@ public class ParseField { public String[] getDeprecatedNames() { return deprecatedNames; } + + public static class CommonFields { + public static final ParseField FIELD = new ParseField("field"); + public static final ParseField FIELDS = new ParseField("fields"); + public static final ParseField FORMAT = new ParseField("format"); + public static final ParseField MISSING = new ParseField("missing"); + public static final ParseField TIME_ZONE = new ParseField("time_zone"); + } } diff --git a/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java b/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java index 9a3c35f3527..9f5e5f34a1b 100644 --- a/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java @@ -21,6 +21,7 @@ package org.elasticsearch.common; import java.io.IOException; +import java.util.Base64; import java.util.Random; class RandomBasedUUIDGenerator implements UUIDGenerator { @@ -54,14 +55,6 @@ class RandomBasedUUIDGenerator implements UUIDGenerator { * We set only the MSB of the variant*/ randomBytes[8] &= 0x3f; /* clear the 2 most significant bits */ randomBytes[8] |= 0x80; /* set the variant (MSB is set)*/ - try { - byte[] encoded = Base64.encodeBytesToBytes(randomBytes, 0, randomBytes.length, Base64.URL_SAFE); - // we know the bytes are 16, and not a multi of 3, so remove the 2 padding chars that are added - assert encoded[encoded.length - 1] == '='; - assert encoded[encoded.length - 2] == '='; - return new String(encoded, 0, encoded.length - 2, Base64.PREFERRED_ENCODING); - } catch (IOException e) { - throw new IllegalStateException("should not be thrown"); - } + return Base64.getUrlEncoder().withoutPadding().encodeToString(randomBytes); } } diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java index ddcd3ea90f7..295d7a6bcda 100644 --- a/core/src/main/java/org/elasticsearch/common/Randomness.java +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -54,7 +54,7 @@ public final class Randomness { Class clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext"); maybeCurrentMethod = clazz.getMethod("current"); maybeGetRandomMethod = clazz.getMethod("getRandom"); - } catch (Throwable t) { + } catch (Exception e) { maybeCurrentMethod = null; maybeGetRandomMethod = null; } diff --git a/core/src/main/java/org/elasticsearch/common/Strings.java b/core/src/main/java/org/elasticsearch/common/Strings.java index 151c53e2007..63afe9a0323 100644 --- a/core/src/main/java/org/elasticsearch/common/Strings.java +++ b/core/src/main/java/org/elasticsearch/common/Strings.java @@ -37,8 +37,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; -import java.util.Properties; -import java.util.Random; import java.util.Set; import java.util.StringTokenizer; import java.util.TreeSet; @@ -244,45 +242,6 @@ public class Strings { return hasText((CharSequence) str); } - /** - * Check whether the given CharSequence contains any whitespace characters. - * - * @param str the CharSequence to check (may be null) - * @return true if the CharSequence is not empty and - * contains at least 1 whitespace character - * @see java.lang.Character#isWhitespace - */ - public static boolean containsWhitespace(CharSequence str) { - if (!hasLength(str)) { - return false; - } - int strLen = str.length(); - for (int i = 0; i < strLen; i++) { - if (Character.isWhitespace(str.charAt(i))) { - return true; - } - } - return false; - } - - /** - * Trim leading whitespace from the given String. - * - * @param str the String to check - * @return the trimmed String - * @see java.lang.Character#isWhitespace - */ - public static String trimLeadingWhitespace(String str) { - if (!hasLength(str)) { - return str; - } - StringBuilder sb = new StringBuilder(str); - while (sb.length() > 0 && Character.isWhitespace(sb.charAt(0))) { - sb.deleteCharAt(0); - } - return sb.toString(); - } - /** * Trim all occurrences of the supplied leading character from the given String. * @@ -417,17 +376,6 @@ public class Strings { return (str != null ? "'" + str + "'" : null); } - /** - * Unqualify a string qualified by a separator character. For example, - * "this:name:is:qualified" returns "qualified" if using a ':' separator. - * - * @param qualifiedName the qualified name - * @param separator the separator - */ - public static String unqualify(String qualifiedName, char separator) { - return qualifiedName.substring(qualifiedName.lastIndexOf(separator) + 1); - } - /** * Capitalize a String, changing the first letter to * upper case as per {@link Character#toUpperCase(char)}. @@ -557,7 +505,8 @@ public class Strings { } public static String[] splitStringByCommaToArray(final String s) { - return splitStringToArray(s, ','); + if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY; + else return s.split(","); } public static Set splitStringToSet(final String s, final char c) { @@ -588,42 +537,6 @@ public class Strings { return result; } - public static String[] splitStringToArray(final CharSequence s, final char c) { - if (s == null || s.length() == 0) { - return Strings.EMPTY_ARRAY; - } - int count = 1; - for (int i = 0; i < s.length(); i++) { - if (s.charAt(i) == c) { - count++; - } - } - final String[] result = new String[count]; - final StringBuilder builder = new StringBuilder(); - int res = 0; - for (int i = 0; i < s.length(); i++) { - if (s.charAt(i) == c) { - if (builder.length() > 0) { - result[res++] = builder.toString(); - builder.setLength(0); - } - - } else { - builder.append(s.charAt(i)); - } - } - if (builder.length() > 0) { - result[res++] = builder.toString(); - } - if (res != count) { - // we have empty strings, copy over to a new array - String[] result1 = new String[res]; - System.arraycopy(result, 0, result1, 0, res); - return result1; - } - return result; - } - /** * Split a String at the first occurrence of the delimiter. * Does not include the delimiter in the result. @@ -647,41 +560,6 @@ public class Strings { return new String[]{beforeDelimiter, afterDelimiter}; } - /** - * Take an array Strings and split each element based on the given delimiter. - * A Properties instance is then generated, with the left of the - * delimiter providing the key, and the right of the delimiter providing the value. - *

    Will trim both the key and value before adding them to the - * Properties instance. - * - * @param array the array to process - * @param delimiter to split each element using (typically the equals symbol) - * @param charsToDelete one or more characters to remove from each element - * prior to attempting the split operation (typically the quotation mark - * symbol), or null if no removal should occur - * @return a Properties instance representing the array contents, - * or null if the array to process was null or empty - */ - public static Properties splitArrayElementsIntoProperties( - String[] array, String delimiter, String charsToDelete) { - - if (isEmpty(array)) { - return null; - } - Properties result = new Properties(); - for (String element : array) { - if (charsToDelete != null) { - element = deleteAny(element, charsToDelete); - } - String[] splittedElement = split(element, delimiter); - if (splittedElement == null) { - continue; - } - result.setProperty(splittedElement[0].trim(), splittedElement[1].trim()); - } - return result; - } - /** * Tokenize the given String into a String array via a StringTokenizer. * Trims tokens and omits empty tokens. diff --git a/core/src/main/java/org/elasticsearch/common/Table.java b/core/src/main/java/org/elasticsearch/common/Table.java index 0d4a827202d..ab0252b11dc 100644 --- a/core/src/main/java/org/elasticsearch/common/Table.java +++ b/core/src/main/java/org/elasticsearch/common/Table.java @@ -149,7 +149,7 @@ public class Table { // get the attributes of the header cell we are going to add mAttr.putAll(headers.get(currentCells.size()).attr); } - String[] sAttrs = Strings.splitStringToArray(attributes, ';'); + String[] sAttrs = attributes.split(";"); for (String sAttr : sAttrs) { if (sAttr.length() == 0) { continue; diff --git a/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java index d1a22a17cda..8d507ae7f22 100644 --- a/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java @@ -19,8 +19,7 @@ package org.elasticsearch.common; - -import java.io.IOException; +import java.util.Base64; import java.util.concurrent.atomic.AtomicInteger; /** These are essentially flake ids (http://boundary.com/blog/2012/01/12/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang) but @@ -80,15 +79,6 @@ class TimeBasedUUIDGenerator implements UUIDGenerator { assert 9 + SECURE_MUNGED_ADDRESS.length == uuidBytes.length; - byte[] encoded; - try { - encoded = Base64.encodeBytesToBytes(uuidBytes, 0, uuidBytes.length, Base64.URL_SAFE); - } catch (IOException e) { - throw new IllegalStateException("should not be thrown", e); - } - - // We are a multiple of 3 bytes so we should not see any padding: - assert encoded[encoded.length - 1] != '='; - return new String(encoded, 0, encoded.length, Base64.PREFERRED_ENCODING); + return Base64.getUrlEncoder().withoutPadding().encodeToString(uuidBytes); } } diff --git a/core/src/main/java/org/elasticsearch/common/UUIDGenerator.java b/core/src/main/java/org/elasticsearch/common/UUIDGenerator.java index 0cdcfec1cf7..a74c5fbec30 100644 --- a/core/src/main/java/org/elasticsearch/common/UUIDGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/UUIDGenerator.java @@ -23,5 +23,5 @@ package org.elasticsearch.common; * Generates opaque unique strings. */ interface UUIDGenerator { - public String getBase64UUID(); + String getBase64UUID(); } diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index 5d00e36ddbd..46b7b6fc68f 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -27,60 +27,137 @@ import java.util.Collection; import java.util.Map; /** - * + * An interface for managing a repository of blob entries, where each blob entry is just a named group of bytes. */ public interface BlobContainer { + /** + * Gets the {@link BlobPath} that defines the implementation specific paths to where the blobs are contained. + * + * @return the BlobPath where the blobs are contained + */ BlobPath path(); + /** + * Tests whether a blob with the given blob name exists in the container. + * + * @param blobName + * The name of the blob whose existence is to be determined. + * @return {@code true} if a blob exists in the {@link BlobContainer} with the given name, and {@code false} otherwise. + */ boolean blobExists(String blobName); /** - * Creates a new InputStream for the given blob name + * Creates a new {@link InputStream} for the given blob name. + * + * @param blobName + * The name of the blob to get an {@link InputStream} for. + * @return The {@code InputStream} to read the blob. + * @throws IOException if the blob does not exist or can not be read. */ InputStream readBlob(String blobName) throws IOException; /** - * Reads blob content from the input stream and writes it to the blob store + * Reads blob content from the input stream and writes it to the container in a new blob with the given name. + * This method assumes the container does not already contain a blob of the same blobName. If a blob by the + * same name already exists, the operation will fail and an {@link IOException} will be thrown. + * + * @param blobName + * The name of the blob to write the contents of the input stream to. + * @param inputStream + * The input stream from which to retrieve the bytes to write to the blob. + * @param blobSize + * The size of the blob to be written, in bytes. It is implementation dependent whether + * this value is used in writing the blob to the repository. + * @throws IOException if the input stream could not be read, a blob by the same name already exists, + * or the target blob could not be written to. */ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException; /** - * Writes bytes to the blob + * Writes the input bytes to a new blob in the container with the given name. This method assumes the + * container does not already contain a blob of the same blobName. If a blob by the same name already + * exists, the operation will fail and an {@link IOException} will be thrown. + * + * TODO: Remove this in favor of a single {@link #writeBlob(String, InputStream, long)} method. + * See https://github.com/elastic/elasticsearch/issues/18528 + * + * @param blobName + * The name of the blob to write the contents of the input stream to. + * @param bytes + * The bytes to write to the blob. + * @throws IOException if a blob by the same name already exists, or the target blob could not be written to. */ void writeBlob(String blobName, BytesReference bytes) throws IOException; /** - * Deletes a blob with giving name. + * Deletes a blob with giving name, if the blob exists. If the blob does not exist, this method throws an IOException. * - * If a blob exists but cannot be deleted an exception has to be thrown. + * @param blobName + * The name of the blob to delete. + * @throws IOException if the blob does not exist, or if the blob exists but could not be deleted. */ void deleteBlob(String blobName) throws IOException; /** - * Deletes blobs with giving names. + * Deletes blobs with the given names. If any subset of the names do not exist in the container, this method has no + * effect for those names, and will delete the blobs for those names that do exist. If any of the blobs failed + * to delete, those blobs that were processed before it and successfully deleted will remain deleted. An exception + * is thrown at the first blob entry that fails to delete (TODO: is this the right behavior? Should we collect + * all the failed deletes into a single IOException instead?) * - * If a blob exists but cannot be deleted an exception has to be thrown. + * TODO: remove, see https://github.com/elastic/elasticsearch/issues/18529 + * + * @param blobNames + * The collection of blob names to delete from the container. + * @throws IOException if any of the blobs in the collection exists but could not be deleted. */ void deleteBlobs(Collection blobNames) throws IOException; /** - * Deletes all blobs in the container that match the specified prefix. + * Deletes all blobs in the container that match the specified prefix. If any of the blobs failed to delete, + * those blobs that were processed before it and successfully deleted will remain deleted. An exception is + * thrown at the first blob entry that fails to delete (TODO: is this the right behavior? Should we collect + * all the failed deletes into a single IOException instead?) + * + * TODO: remove, see: https://github.com/elastic/elasticsearch/issues/18529 + * + * @param blobNamePrefix + * The prefix to match against blob names in the container. Any blob whose name has the prefix will be deleted. + * @throws IOException if any of the matching blobs failed to delete. */ void deleteBlobsByPrefix(String blobNamePrefix) throws IOException; /** - * Lists all blobs in the container + * Lists all blobs in the container. + * + * @return A map of all the blobs in the container. The keys in the map are the names of the blobs and + * the values are {@link BlobMetaData}, containing basic information about each blob. + * @throws IOException if there were any failures in reading from the blob container. */ Map listBlobs() throws IOException; /** - * Lists all blobs in the container that match specified prefix + * Lists all blobs in the container that match the specified prefix. + * + * @param blobNamePrefix + * The prefix to match against blob names in the container. + * @return A map of the matching blobs in the container. The keys in the map are the names of the blobs + * and the values are {@link BlobMetaData}, containing basic information about each blob. + * @throws IOException if there were any failures in reading from the blob container. */ Map listBlobsByPrefix(String blobNamePrefix) throws IOException; /** - * Atomically renames source blob into target blob + * Atomically renames the source blob into the target blob. If the source blob does not exist or the + * target blob already exists, an exception is thrown. + * + * @param sourceBlobName + * The blob to rename. + * @param targetBlobName + * The name of the blob after the renaming. + * @throws IOException if the source blob does not exist, the target blob already exists, + * or there were any failures in reading from the blob container. */ void move(String sourceBlobName, String targetBlobName) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobMetaData.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobMetaData.java index 3f69e268034..da6c277aa2a 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobMetaData.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobMetaData.java @@ -20,11 +20,17 @@ package org.elasticsearch.common.blobstore; /** - * + * An interface for providing basic metadata about a blob. */ public interface BlobMetaData { + /** + * Gets the name of the blob. + */ String name(); + /** + * Gets the size of the blob in bytes. + */ long length(); } diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java index 7636097e288..9092e13eb1b 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java @@ -19,17 +19,18 @@ package org.elasticsearch.common.blobstore; - import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; /** - * + * The list of paths where a blob can reside. The contents of the paths are dependent upon the implementation of {@link BlobContainer}. */ public class BlobPath implements Iterable { + private static final String SEPARATOR = "/"; + private final List paths; public BlobPath() { @@ -60,15 +61,12 @@ public class BlobPath implements Iterable { return new BlobPath(Collections.unmodifiableList(paths)); } - public String buildAsString(String separator) { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < paths.size(); i++) { - sb.append(paths.get(i)); - if (i < (paths.size() - 1)) { - sb.append(separator); - } + public String buildAsString() { + String p = String.join(SEPARATOR, paths); + if (p.isEmpty()) { + return p; } - return sb.toString(); + return p + SEPARATOR; } @Override diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java index 9275b379158..e4cdb148a15 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java @@ -22,12 +22,18 @@ import java.io.Closeable; import java.io.IOException; /** - * + * An interface for storing blobs. */ public interface BlobStore extends Closeable { + /** + * Get a blob container instance for storing blobs at the given {@link BlobPath}. + */ BlobContainer blobContainer(BlobPath path); + /** + * Delete the blob store at the given {@link BlobPath}. + */ void delete(BlobPath path) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index c62166a23a3..822f8d1721a 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -41,7 +41,12 @@ import java.util.Map; import static java.util.Collections.unmodifiableMap; /** + * A file system based implementation of {@link org.elasticsearch.common.blobstore.BlobContainer}. + * All blobs in the container are stored on a file system, the location of which is specified by the {@link BlobPath}. * + * Note that the methods in this implementation of {@link org.elasticsearch.common.blobstore.BlobContainer} may + * additionally throw a {@link java.lang.SecurityException} if the configured {@link java.lang.SecurityManager} + * does not permit read and/or write access to the underlying files. */ public class FsBlobContainer extends AbstractBlobContainer { diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java b/core/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java index 8f83bbf8098..9929de2674d 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/support/AbstractBlobContainer.java @@ -30,7 +30,7 @@ import java.util.Collection; import java.util.Map; /** - * + * A base abstract blob container that implements higher level container methods. */ public abstract class AbstractBlobContainer implements BlobContainer { @@ -55,11 +55,11 @@ public abstract class AbstractBlobContainer implements BlobContainer { @Override public void deleteBlobs(Collection blobNames) throws IOException { - for(String blob: blobNames) { + for (String blob: blobNames) { deleteBlob(blob); } } - + @Override public void writeBlob(String blobName, BytesReference bytes) throws IOException { try (InputStream stream = bytes.streamInput()) { diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 5cb690b7375..43c1df588b1 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -20,30 +20,22 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.StreamInput; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; import java.io.IOException; import java.io.OutputStream; -import java.nio.channels.GatheringByteChannel; import java.nio.charset.StandardCharsets; import java.util.Arrays; -public class BytesArray implements BytesReference { +public final class BytesArray extends BytesReference { public static final BytesArray EMPTY = new BytesArray(BytesRef.EMPTY_BYTES, 0, 0); - - private byte[] bytes; - private int offset; - private int length; + private final byte[] bytes; + private final int offset; + private final int length; public BytesArray(String bytes) { - BytesRef bytesRef = new BytesRef(bytes); - this.bytes = bytesRef.bytes; - this.offset = bytesRef.offset; - this.length = bytesRef.length; + this(new BytesRef(bytes)); } public BytesArray(BytesRef bytesRef) { @@ -52,21 +44,15 @@ public class BytesArray implements BytesReference { public BytesArray(BytesRef bytesRef, boolean deepCopy) { if (deepCopy) { - BytesRef copy = BytesRef.deepCopyOf(bytesRef); - bytes = copy.bytes; - offset = copy.offset; - length = copy.length; - } else { - bytes = bytesRef.bytes; - offset = bytesRef.offset; - length = bytesRef.length; + bytesRef = BytesRef.deepCopyOf(bytesRef); } + bytes = bytesRef.bytes; + offset = bytesRef.offset; + length = bytesRef.length; } public BytesArray(byte[] bytes) { - this.bytes = bytes; - this.offset = 0; - this.length = bytes.length; + this(bytes, 0, bytes.length); } public BytesArray(byte[] bytes, int offset, int length) { @@ -93,84 +79,22 @@ public class BytesArray implements BytesReference { return new BytesArray(bytes, offset + from, length); } - @Override - public StreamInput streamInput() { - return StreamInput.wrap(bytes, offset, length); - } - - @Override - public void writeTo(OutputStream os) throws IOException { - os.write(bytes, offset, length); - } - - @Override - public void writeTo(GatheringByteChannel channel) throws IOException { - Channels.writeToChannel(bytes, offset, length(), channel); - } - - @Override - public byte[] toBytes() { - if (offset == 0 && bytes.length == length) { - return bytes; - } - return Arrays.copyOfRange(bytes, offset, offset + length); - } - - @Override - public BytesArray toBytesArray() { - return this; - } - - @Override - public BytesArray copyBytesArray() { - return new BytesArray(Arrays.copyOfRange(bytes, offset, offset + length)); - } - - @Override - public ChannelBuffer toChannelBuffer() { - return ChannelBuffers.wrappedBuffer(bytes, offset, length); - } - - @Override - public boolean hasArray() { - return true; - } - - @Override public byte[] array() { return bytes; } - @Override - public int arrayOffset() { + public int offset() { return offset; } - @Override - public String toUtf8() { - if (length == 0) { - return ""; - } - return new String(bytes, offset, length, StandardCharsets.UTF_8); - } - @Override public BytesRef toBytesRef() { return new BytesRef(bytes, offset, length); } @Override - public BytesRef copyBytesRef() { - return new BytesRef(Arrays.copyOfRange(bytes, offset, offset + length)); + public long ramBytesUsed() { + return bytes.length; } - @Override - public int hashCode() { - return Helper.bytesHashCode(this); - } - - @Override - public boolean equals(Object obj) { - return Helper.bytesEqual(this, (BytesReference) obj); - } } diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index a72346f9ee8..f31ea2bbf82 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -18,148 +18,263 @@ */ package org.elasticsearch.common.bytes; +import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.StreamInput; -import org.jboss.netty.buffer.ChannelBuffer; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; -import java.nio.channels.GatheringByteChannel; +import java.util.function.ToIntBiFunction; /** * A reference to bytes. */ -public interface BytesReference { +public abstract class BytesReference implements Accountable, Comparable { - public static class Helper { - - public static boolean bytesEqual(BytesReference a, BytesReference b) { - if (a == b) { - return true; - } - if (a.length() != b.length()) { - return false; - } - - return bytesEquals(a, b); - } - - // pkg-private for testing - static boolean bytesEquals(BytesReference a, BytesReference b) { - assert a.length() == b.length(); - for (int i = 0, end = a.length(); i < end; ++i) { - if (a.get(i) != b.get(i)) { - return false; - } - } - - return true; - } - - public static int bytesHashCode(BytesReference a) { - if (a.hasArray()) { - return hashCode(a.array(), a.arrayOffset(), a.length()); - } else { - return slowHashCode(a); - } - } - - // pkg-private for testing - static int hashCode(byte[] array, int offset, int length) { - int result = 1; - for (int i = offset, end = offset + length; i < end; ++i) { - result = 31 * result + array[i]; - } - return result; - } - - // pkg-private for testing - static int slowHashCode(BytesReference a) { - int result = 1; - for (int i = 0, end = a.length(); i < end; ++i) { - result = 31 * result + a.get(i); - } - return result; - } - } + private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it /** * Returns the byte at the specified index. Need to be between 0 and length. */ - byte get(int index); + public abstract byte get(int index); /** * The length. */ - int length(); + public abstract int length(); /** * Slice the bytes from the from index up to length. */ - BytesReference slice(int from, int length); + public abstract BytesReference slice(int from, int length); /** * A stream input of the bytes. */ - StreamInput streamInput(); + public StreamInput streamInput() throws IOException { + return new MarkSupportingStreamInputWrapper(this); + } /** * Writes the bytes directly to the output stream. */ - void writeTo(OutputStream os) throws IOException; + public void writeTo(OutputStream os) throws IOException { + final BytesRefIterator iterator = iterator(); + BytesRef ref; + while ((ref = iterator.next()) != null) { + os.write(ref.bytes, ref.offset, ref.length); + } + } /** - * Writes the bytes directly to the channel. + * Interprets the referenced bytes as UTF8 bytes, returning the resulting string */ - void writeTo(GatheringByteChannel channel) throws IOException; - - /** - * Returns the bytes as a single byte array. - */ - byte[] toBytes(); - - /** - * Returns the bytes as a byte array, possibly sharing the underlying byte buffer. - */ - BytesArray toBytesArray(); - - /** - * Returns the bytes copied over as a byte array. - */ - BytesArray copyBytesArray(); - - /** - * Returns the bytes as a channel buffer. - */ - ChannelBuffer toChannelBuffer(); - - /** - * Is there an underlying byte array for this bytes reference. - */ - boolean hasArray(); - - /** - * The underlying byte array (if exists). - */ - byte[] array(); - - /** - * The offset into the underlying byte array. - */ - int arrayOffset(); - - /** - * Converts to a string based on utf8. - */ - String toUtf8(); + public String utf8ToString() { + return toBytesRef().utf8ToString(); + } /** * Converts to Lucene BytesRef. */ - BytesRef toBytesRef(); + public abstract BytesRef toBytesRef(); /** - * Converts to a copied Lucene BytesRef. + * Returns a BytesRefIterator for this BytesReference. This method allows + * access to the internal pages of this reference without copying them. Use with care! + * @see BytesRefIterator */ - BytesRef copyBytesRef(); + public BytesRefIterator iterator() { + return new BytesRefIterator() { + BytesRef ref = length() == 0 ? null : toBytesRef(); + @Override + public BytesRef next() throws IOException { + BytesRef r = ref; + ref = null; // only return it once... + return r; + } + }; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof BytesReference) { + final BytesReference otherRef = (BytesReference) other; + if (length() != otherRef.length()) { + return false; + } + return compareIterators(this, otherRef, (a, b) -> + a.bytesEquals(b) ? 0 : 1 // this is a call to BytesRef#bytesEquals - this method is the hot one in the comparison + ) == 0; + } + return false; + } + + @Override + public int hashCode() { + if (hash == null) { + final BytesRefIterator iterator = iterator(); + BytesRef ref; + int result = 1; + try { + while ((ref = iterator.next()) != null) { + for (int i = 0; i < ref.length; i++) { + result = 31 * result + ref.bytes[ref.offset + i]; + } + } + } catch (IOException ex) { + throw new AssertionError("wont happen", ex); + } + return hash = result; + } else { + return hash.intValue(); + } + } + + /** + * Returns a compact array from the given BytesReference. The returned array won't be copied unless necessary. If you need + * to modify the returned array use BytesRef.deepCopyOf(reference.toBytesRef() instead + */ + public static byte[] toBytes(BytesReference reference) { + final BytesRef bytesRef = reference.toBytesRef(); + if (bytesRef.offset == 0 && bytesRef.length == bytesRef.bytes.length) { + return bytesRef.bytes; + } + return BytesRef.deepCopyOf(bytesRef).bytes; + } + + @Override + public int compareTo(final BytesReference other) { + return compareIterators(this, other, (a, b) -> a.compareTo(b)); + } + + /** + * Compares the two references using the given int function. + */ + private static int compareIterators(final BytesReference a, final BytesReference b, final ToIntBiFunction f) { + try { + // we use the iterators since it's a 0-copy comparison where possible! + final long lengthToCompare = Math.min(a.length(), b.length()); + final BytesRefIterator aIter = a.iterator(); + final BytesRefIterator bIter = b.iterator(); + BytesRef aRef = aIter.next(); + BytesRef bRef = bIter.next(); + if (aRef != null && bRef != null) { // do we have any data? + aRef = aRef.clone(); // we clone since we modify the offsets and length in the iteration below + bRef = bRef.clone(); + if (aRef.length == a.length() && bRef.length == b.length()) { // is it only one array slice we are comparing? + return f.applyAsInt(aRef, bRef); + } else { + for (int i = 0; i < lengthToCompare;) { + if (aRef.length == 0) { + aRef = aIter.next().clone(); // must be non null otherwise we have a bug + } + if (bRef.length == 0) { + bRef = bIter.next().clone(); // must be non null otherwise we have a bug + } + final int aLength = aRef.length; + final int bLength = bRef.length; + final int length = Math.min(aLength, bLength); // shrink to the same length and use the fast compare in lucene + aRef.length = bRef.length = length; + // now we move to the fast comparison - this is the hot part of the loop + int diff = f.applyAsInt(aRef, bRef); + aRef.length = aLength; + bRef.length = bLength; + + if (diff != 0) { + return diff; + } + advance(aRef, length); + advance(bRef, length); + i += length; + } + } + } + // One is a prefix of the other, or, they are equal: + return a.length() - b.length(); + } catch (IOException ex) { + throw new AssertionError("can not happen", ex); + } + } + + private static void advance(final BytesRef ref, final int length) { + assert ref.length >= length : " ref.length: " + ref.length + " length: " + length; + assert ref.offset+length < ref.bytes.length || (ref.offset+length == ref.bytes.length && ref.length-length == 0) + : "offset: " + ref.offset + " ref.bytes.length: " + ref.bytes.length + " length: " + length + " ref.length: " + ref.length; + ref.length -= length; + ref.offset += length; + } + + /** + * Instead of adding the complexity of {@link InputStream#reset()} etc to the actual impl + * this wrapper builds it on top of the BytesReferenceStreamInput which is much simpler + * that way. + */ + private static final class MarkSupportingStreamInputWrapper extends StreamInput { + private final BytesReference reference; + private BytesReferenceStreamInput input; + private int mark = 0; + + private MarkSupportingStreamInputWrapper(BytesReference reference) throws IOException { + this.reference = reference; + this.input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); + } + + @Override + public byte readByte() throws IOException { + return input.readByte(); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + input.readBytes(b, offset, len); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return input.read(b, off, len); + } + + @Override + public void close() throws IOException { + input.close(); + } + + @Override + public int read() throws IOException { + return input.read(); + } + + @Override + public int available() throws IOException { + return input.available(); + } + + @Override + public void reset() throws IOException { + input = new BytesReferenceStreamInput(reference.iterator(), reference.length()); + input.skip(mark); + } + + @Override + public boolean markSupported() { + return true; + } + + @Override + public void mark(int readLimit) { + // readLimit is optional it only guarantees that the stream remembers data upto this limit but it can remember more + // which we do in our case + this.mark = input.getOffset(); + } + + @Override + public long skip(long n) throws IOException { + return input.skip(n); + } + } } diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java new file mode 100644 index 00000000000..4426ea53efa --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java @@ -0,0 +1,136 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.bytes; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.EOFException; +import java.io.IOException; + +/** + * A StreamInput that reads off a {@link BytesRefIterator}. This is used to provide + * generic stream access to {@link BytesReference} instances without materializing the + * underlying bytes reference. + */ +final class BytesReferenceStreamInput extends StreamInput { + private final BytesRefIterator iterator; + private int sliceOffset; + private BytesRef slice; + private final int length; // the total size of the stream + private int offset; // the current position of the stream + + public BytesReferenceStreamInput(BytesRefIterator iterator, final int length) throws IOException { + this.iterator = iterator; + this.slice = iterator.next(); + this.length = length; + this.offset = 0; + this.sliceOffset = 0; + } + + @Override + public byte readByte() throws IOException { + if (offset >= length) { + throw new EOFException(); + } + maybeNextSlice(); + byte b = slice.bytes[slice.offset + (sliceOffset++)]; + offset++; + return b; + } + + private void maybeNextSlice() throws IOException { + while (sliceOffset == slice.length) { + slice = iterator.next(); + sliceOffset = 0; + if (slice == null) { + throw new EOFException(); + } + } + } + + @Override + public void readBytes(byte[] b, int bOffset, int len) throws IOException { + if (offset + len > length) { + throw new IndexOutOfBoundsException("Cannot read " + len + " bytes from stream with length " + length + " at offset " + offset); + } + read(b, bOffset, len); + } + + @Override + public int read() throws IOException { + if (offset >= length) { + return -1; + } + return Byte.toUnsignedInt(readByte()); + } + + @Override + public int read(final byte[] b, final int bOffset, final int len) throws IOException { + if (offset >= length) { + return -1; + } + final int numBytesToCopy = Math.min(len, length - offset); + int remaining = numBytesToCopy; // copy the full length or the remaining part + int destOffset = bOffset; + while (remaining > 0) { + maybeNextSlice(); + final int currentLen = Math.min(remaining, slice.length - sliceOffset); + assert currentLen > 0 : "length has to be > 0 to make progress but was: " + currentLen; + System.arraycopy(slice.bytes, slice.offset + sliceOffset, b, destOffset, currentLen); + destOffset += currentLen; + remaining -= currentLen; + sliceOffset += currentLen; + offset += currentLen; + assert remaining >= 0 : "remaining: " + remaining; + } + return numBytesToCopy; + } + + @Override + public void close() throws IOException { + // do nothing + } + + @Override + public int available() throws IOException { + return length - offset; + } + + @Override + public long skip(long n) throws IOException { + final int skip = (int) Math.min(Integer.MAX_VALUE, n); + final int numBytesSkipped = Math.min(skip, length - offset); + int remaining = numBytesSkipped; + while (remaining > 0) { + maybeNextSlice(); + int currentLen = Math.min(remaining, slice.length - (slice.offset + sliceOffset)); + remaining -= currentLen; + sliceOffset += currentLen; + offset += currentLen; + assert remaining >= 0 : "remaining: " + remaining; + } + return numBytesSkipped; + } + + int getOffset() { + return offset; + } +} diff --git a/core/src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java deleted file mode 100644 index 4d6c11214bb..00000000000 --- a/core/src/main/java/org/elasticsearch/common/bytes/ChannelBufferBytesReference.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.bytes; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.Channels; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.transport.netty.ChannelBufferStreamInputFactory; -import org.jboss.netty.buffer.ChannelBuffer; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.channels.GatheringByteChannel; -import java.nio.charset.StandardCharsets; - -/** - */ -public class ChannelBufferBytesReference implements BytesReference { - - private final ChannelBuffer buffer; - - public ChannelBufferBytesReference(ChannelBuffer buffer) { - this.buffer = buffer; - } - - @Override - public byte get(int index) { - return buffer.getByte(buffer.readerIndex() + index); - } - - @Override - public int length() { - return buffer.readableBytes(); - } - - @Override - public BytesReference slice(int from, int length) { - return new ChannelBufferBytesReference(buffer.slice(from, length)); - } - - @Override - public StreamInput streamInput() { - return ChannelBufferStreamInputFactory.create(buffer.duplicate()); - } - - @Override - public void writeTo(OutputStream os) throws IOException { - buffer.getBytes(buffer.readerIndex(), os, length()); - } - - @Override - public void writeTo(GatheringByteChannel channel) throws IOException { - Channels.writeToChannel(buffer, buffer.readerIndex(), length(), channel); - } - - @Override - public byte[] toBytes() { - return copyBytesArray().toBytes(); - } - - @Override - public BytesArray toBytesArray() { - if (buffer.hasArray()) { - return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes()); - } - return copyBytesArray(); - } - - @Override - public BytesArray copyBytesArray() { - byte[] copy = new byte[buffer.readableBytes()]; - buffer.getBytes(buffer.readerIndex(), copy); - return new BytesArray(copy); - } - - @Override - public ChannelBuffer toChannelBuffer() { - return buffer.duplicate(); - } - - @Override - public boolean hasArray() { - return buffer.hasArray(); - } - - @Override - public byte[] array() { - return buffer.array(); - } - - @Override - public int arrayOffset() { - return buffer.arrayOffset() + buffer.readerIndex(); - } - - @Override - public String toUtf8() { - return buffer.toString(StandardCharsets.UTF_8); - } - - @Override - public BytesRef toBytesRef() { - if (buffer.hasArray()) { - return new BytesRef(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), buffer.readableBytes()); - } - byte[] copy = new byte[buffer.readableBytes()]; - buffer.getBytes(buffer.readerIndex(), copy); - return new BytesRef(copy); - } - - @Override - public BytesRef copyBytesRef() { - byte[] copy = new byte[buffer.readableBytes()]; - buffer.getBytes(buffer.readerIndex(), copy); - return new BytesRef(copy); - } - - @Override - public int hashCode() { - return Helper.bytesHashCode(this); - } - - @Override - public boolean equals(Object obj) { - return Helper.bytesEqual(this, (BytesReference) obj); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java new file mode 100644 index 00000000000..3538cba869c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.bytes; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.BytesRefIterator; +import org.apache.lucene.util.RamUsageEstimator; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * A composite {@link BytesReference} that allows joining multiple bytes references + * into one without copying. + * + * Note, {@link #toBytesRef()} will materialize all pages in this BytesReference. + */ +public final class CompositeBytesReference extends BytesReference { + + private final BytesReference[] references; + private final int[] offsets; + private final int length; + private final long ramBytesUsed; + + public CompositeBytesReference(BytesReference... references) { + this.references = Objects.requireNonNull(references, "references must not be null"); + this.offsets = new int[references.length]; + long ramBytesUsed = 0; + int offset = 0; + for (int i = 0; i < references.length; i++) { + BytesReference reference = references[i]; + if (reference == null) { + throw new IllegalArgumentException("references must not be null"); + } + offsets[i] = offset; // we use the offsets to seek into the right BytesReference for random access and slicing + offset += reference.length(); + ramBytesUsed += reference.ramBytesUsed(); + } + this.ramBytesUsed = ramBytesUsed + + (Integer.BYTES * offsets.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER) // offsets + + (references.length * RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER) // references + + Integer.BYTES // length + + Long.BYTES; // ramBytesUsed + length = offset; + } + + + @Override + public byte get(int index) { + final int i = getOffsetIndex(index); + return references[i].get(index - offsets[i]); + } + + @Override + public int length() { + return length; + } + + @Override + public BytesReference slice(int from, int length) { + // for slices we only need to find the start and the end reference + // adjust them and pass on the references in between as they are fully contained + final int to = from + length; + final int limit = getOffsetIndex(from + length); + final int start = getOffsetIndex(from); + final BytesReference[] inSlice = new BytesReference[1 + (limit - start)]; + for (int i = 0, j = start; i < inSlice.length; i++) { + inSlice[i] = references[j++]; + } + int inSliceOffset = from - offsets[start]; + if (inSlice.length == 1) { + return inSlice[0].slice(inSliceOffset, length); + } + // now adjust slices in front and at the end + inSlice[0] = inSlice[0].slice(inSliceOffset, inSlice[0].length() - inSliceOffset); + inSlice[inSlice.length-1] = inSlice[inSlice.length-1].slice(0, to - offsets[limit]); + return new CompositeBytesReference(inSlice); + } + + private int getOffsetIndex(int offset) { + final int i = Arrays.binarySearch(offsets, offset); + return i < 0 ? (-(i + 1)) - 1 : i; + } + + @Override + public BytesRef toBytesRef() { + BytesRefBuilder builder = new BytesRefBuilder(); + builder.grow(length()); + BytesRef spare; + BytesRefIterator iterator = iterator(); + try { + while ((spare = iterator.next()) != null) { + builder.append(spare); + } + } catch (IOException ex) { + throw new AssertionError("won't happen", ex); // this is really an error since we don't do IO in our bytesreferences + } + return builder.toBytesRef(); + } + + @Override + public BytesRefIterator iterator() { + if (references.length > 0) { + return new BytesRefIterator() { + int index = 0; + private BytesRefIterator current = references[index++].iterator(); + @Override + public BytesRef next() throws IOException { + BytesRef next = current.next(); + if (next == null) { + while (index < references.length) { + current = references[index++].iterator(); + next = current.next(); + if (next != null) { + break; + } + } + } + return next; + } + }; + } else { + return () -> null; + } + + } + + @Override + public long ramBytesUsed() { + return ramBytesUsed; + } +} diff --git a/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java index 16ce91dc38f..ef1102326d6 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java @@ -20,26 +20,18 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.common.io.Channels; +import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; -import org.jboss.netty.buffer.ChannelBuffer; -import org.jboss.netty.buffer.ChannelBuffers; -import java.io.EOFException; import java.io.IOException; -import java.io.OutputStream; -import java.nio.channels.GatheringByteChannel; -import java.util.Arrays; /** * A page based bytes reference, internally holding the bytes in a paged * data structure. */ -public class PagedBytesReference implements BytesReference { +public class PagedBytesReference extends BytesReference { private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE; @@ -47,7 +39,6 @@ public class PagedBytesReference implements BytesReference { protected final ByteArray bytearray; private final int offset; private final int length; - private int hash = 0; public PagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int length) { this(bigarrays, bytearray, 0, length); @@ -75,206 +66,9 @@ public class PagedBytesReference implements BytesReference { if (from < 0 || (from + length) > length()) { throw new IllegalArgumentException("can't slice a buffer with length [" + length() + "], with slice parameters from [" + from + "], length [" + length + "]"); } - return new PagedBytesReference(bigarrays, bytearray, offset + from, length); } - @Override - public StreamInput streamInput() { - return new PagedBytesReferenceStreamInput(bytearray, offset, length); - } - - @Override - public void writeTo(OutputStream os) throws IOException { - // nothing to do - if (length == 0) { - return; - } - - BytesRef ref = new BytesRef(); - int written = 0; - - // are we a slice? - if (offset != 0) { - // remaining size of page fragment at offset - int fragmentSize = Math.min(length, PAGE_SIZE - (offset % PAGE_SIZE)); - bytearray.get(offset, fragmentSize, ref); - os.write(ref.bytes, ref.offset, fragmentSize); - written += fragmentSize; - } - - // handle remainder of pages + trailing fragment - while (written < length) { - int remaining = length - written; - int bulkSize = (remaining > PAGE_SIZE) ? PAGE_SIZE : remaining; - bytearray.get(offset + written, bulkSize, ref); - os.write(ref.bytes, ref.offset, bulkSize); - written += bulkSize; - } - } - - @Override - public void writeTo(GatheringByteChannel channel) throws IOException { - // nothing to do - if (length == 0) { - return; - } - - int currentLength = length; - int currentOffset = offset; - BytesRef ref = new BytesRef(); - - while (currentLength > 0) { - // try to align to the underlying pages while writing, so no new arrays will be created. - int fragmentSize = Math.min(currentLength, PAGE_SIZE - (currentOffset % PAGE_SIZE)); - boolean newArray = bytearray.get(currentOffset, fragmentSize, ref); - assert !newArray : "PagedBytesReference failed to align with underlying bytearray. offset [" + currentOffset + "], size [" + fragmentSize + "]"; - Channels.writeToChannel(ref.bytes, ref.offset, ref.length, channel); - currentLength -= ref.length; - currentOffset += ref.length; - } - - assert currentLength == 0; - } - - @Override - public byte[] toBytes() { - if (length == 0) { - return BytesRef.EMPTY_BYTES; - } - - BytesRef ref = new BytesRef(); - bytearray.get(offset, length, ref); - - // undo the single-page optimization by ByteArray.get(), otherwise - // a materialized stream will contain trailing garbage/zeros - byte[] result = ref.bytes; - if (result.length != length || ref.offset != 0) { - result = Arrays.copyOfRange(result, ref.offset, ref.offset + length); - } - - return result; - } - - @Override - public BytesArray toBytesArray() { - BytesRef ref = new BytesRef(); - bytearray.get(offset, length, ref); - return new BytesArray(ref); - } - - @Override - public BytesArray copyBytesArray() { - BytesRef ref = new BytesRef(); - boolean copied = bytearray.get(offset, length, ref); - - if (copied) { - // BigArray has materialized for us, no need to do it again - return new BytesArray(ref.bytes, ref.offset, ref.length); - } else { - // here we need to copy the bytes even when shared - byte[] copy = Arrays.copyOfRange(ref.bytes, ref.offset, ref.offset + ref.length); - return new BytesArray(copy); - } - } - - @Override - public ChannelBuffer toChannelBuffer() { - // nothing to do - if (length == 0) { - return ChannelBuffers.EMPTY_BUFFER; - } - - ChannelBuffer[] buffers; - ChannelBuffer currentBuffer = null; - BytesRef ref = new BytesRef(); - int pos = 0; - - // are we a slice? - if (offset != 0) { - // remaining size of page fragment at offset - int fragmentSize = Math.min(length, PAGE_SIZE - (offset % PAGE_SIZE)); - bytearray.get(offset, fragmentSize, ref); - currentBuffer = ChannelBuffers.wrappedBuffer(ref.bytes, ref.offset, fragmentSize); - pos += fragmentSize; - } - - // no need to create a composite buffer for a single page - if (pos == length && currentBuffer != null) { - return currentBuffer; - } - - // a slice > pagesize will likely require extra buffers for initial/trailing fragments - int numBuffers = countRequiredBuffers((currentBuffer != null ? 1 : 0), length - pos); - - buffers = new ChannelBuffer[numBuffers]; - int bufferSlot = 0; - - if (currentBuffer != null) { - buffers[bufferSlot] = currentBuffer; - bufferSlot++; - } - - // handle remainder of pages + trailing fragment - while (pos < length) { - int remaining = length - pos; - int bulkSize = (remaining > PAGE_SIZE) ? PAGE_SIZE : remaining; - bytearray.get(offset + pos, bulkSize, ref); - currentBuffer = ChannelBuffers.wrappedBuffer(ref.bytes, ref.offset, bulkSize); - buffers[bufferSlot] = currentBuffer; - bufferSlot++; - pos += bulkSize; - } - - // this would indicate that our numBuffer calculation is off by one. - assert (numBuffers == bufferSlot); - - return ChannelBuffers.wrappedBuffer(NettyUtils.DEFAULT_GATHERING, buffers); - } - - @Override - public boolean hasArray() { - return (offset + length <= PAGE_SIZE); - } - - @Override - public byte[] array() { - if (hasArray()) { - if (length == 0) { - return BytesRef.EMPTY_BYTES; - } - - BytesRef ref = new BytesRef(); - bytearray.get(offset, length, ref); - return ref.bytes; - } - - throw new IllegalStateException("array not available"); - } - - @Override - public int arrayOffset() { - if (hasArray()) { - BytesRef ref = new BytesRef(); - bytearray.get(offset, length, ref); - return ref.offset; - } - - throw new IllegalStateException("array not available"); - } - - @Override - public String toUtf8() { - if (length() == 0) { - return ""; - } - - byte[] bytes = toBytes(); - final CharsRefBuilder ref = new CharsRefBuilder(); - ref.copyUTF8Bytes(bytes, offset, length); - return ref.toString(); - } - @Override public BytesRef toBytesRef() { BytesRef bref = new BytesRef(); @@ -284,171 +78,39 @@ public class PagedBytesReference implements BytesReference { } @Override - public BytesRef copyBytesRef() { - byte[] bytes = toBytes(); - return new BytesRef(bytes, offset, length); + public final BytesRefIterator iterator() { + final int offset = this.offset; + final int length = this.length; + // this iteration is page aligned to ensure we do NOT materialize the pages from the ByteArray + // we calculate the initial fragment size here to ensure that if this reference is a slice we are still page aligned + // across the entire iteration. The first page is smaller if our offset != 0 then we start in the middle of the page + // otherwise we iterate full pages until we reach the last chunk which also might end within a page. + final int initialFragmentSize = offset != 0 ? PAGE_SIZE - (offset % PAGE_SIZE) : PAGE_SIZE; + return new BytesRefIterator() { + int position = 0; + int nextFragmentSize = Math.min(length, initialFragmentSize); + // this BytesRef is reused across the iteration on purpose - BytesRefIterator interface was designed for this + final BytesRef slice = new BytesRef(); + + @Override + public BytesRef next() throws IOException { + if (nextFragmentSize != 0) { + final boolean materialized = bytearray.get(offset + position, nextFragmentSize, slice); + assert materialized == false : "iteration should be page aligned but array got materialized"; + position += nextFragmentSize; + final int remaining = length - position; + nextFragmentSize = Math.min(remaining, PAGE_SIZE); + return slice; + } else { + assert nextFragmentSize == 0 : "fragmentSize expected [0] but was: [" + nextFragmentSize + "]"; + return null; // we are done with this iteration + } + } + }; } @Override - public int hashCode() { - if (hash == 0) { - // TODO: delegate to BigArrays via: - // hash = bigarrays.hashCode(bytearray); - // and for slices: - // hash = bigarrays.hashCode(bytearray, offset, length); - int tmphash = 1; - for (int i = 0; i < length; i++) { - tmphash = 31 * tmphash + bytearray.get(offset + i); - } - hash = tmphash; - } - return hash; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null) { - return false; - } - - if (!(obj instanceof PagedBytesReference)) { - return BytesReference.Helper.bytesEqual(this, (BytesReference) obj); - } - - PagedBytesReference other = (PagedBytesReference) obj; - if (length != other.length) { - return false; - } - - // TODO: delegate to BigArrays via: - // return bigarrays.equals(bytearray, other.bytearray); - // and for slices: - // return bigarrays.equals(bytearray, start, other.bytearray, otherstart, len); - ByteArray otherArray = other.bytearray; - int otherOffset = other.offset; - for (int i = 0; i < length; i++) { - if (bytearray.get(offset + i) != otherArray.get(otherOffset + i)) { - return false; - } - } - return true; - } - - private int countRequiredBuffers(int initialCount, int numBytes) { - int numBuffers = initialCount; - // an "estimate" of how many pages remain - rounded down - int pages = numBytes / PAGE_SIZE; - // a remaining fragment < pagesize needs at least one buffer - numBuffers += (pages == 0) ? 1 : pages; - // a remainder that is not a multiple of pagesize also needs an extra buffer - numBuffers += (pages > 0 && numBytes % PAGE_SIZE > 0) ? 1 : 0; - return numBuffers; - } - - private static class PagedBytesReferenceStreamInput extends StreamInput { - - private final ByteArray bytearray; - private final BytesRef ref; - private final int offset; - private final int length; - private int pos; - private int mark; - - public PagedBytesReferenceStreamInput(ByteArray bytearray, int offset, int length) { - this.bytearray = bytearray; - this.ref = new BytesRef(); - this.offset = offset; - this.length = length; - this.pos = 0; - - if (offset + length > bytearray.size()) { - throw new IndexOutOfBoundsException("offset+length >= bytearray.size()"); - } - } - - @Override - public byte readByte() throws IOException { - if (pos >= length) { - throw new EOFException(); - } - - return bytearray.get(offset + pos++); - } - - @Override - public void readBytes(byte[] b, int bOffset, int len) throws IOException { - if (len > offset + length) { - throw new IndexOutOfBoundsException("Cannot read " + len + " bytes from stream with length " + length + " at pos " + pos); - } - - read(b, bOffset, len); - } - - @Override - public int read() throws IOException { - return (pos < length) ? bytearray.get(offset + pos++) : -1; - } - - @Override - public int read(final byte[] b, final int bOffset, final int len) throws IOException { - if (len == 0) { - return 0; - } - - if (pos >= offset + length) { - return -1; - } - - final int numBytesToCopy = Math.min(len, length - pos); // copy the full length or the remaining part - - // current offset into the underlying ByteArray - long byteArrayOffset = offset + pos; - - // bytes already copied - int copiedBytes = 0; - - while (copiedBytes < numBytesToCopy) { - long pageFragment = PAGE_SIZE - (byteArrayOffset % PAGE_SIZE); // how much can we read until hitting N*PAGE_SIZE? - int bulkSize = (int) Math.min(pageFragment, numBytesToCopy - copiedBytes); // we cannot copy more than a page fragment - boolean copied = bytearray.get(byteArrayOffset, bulkSize, ref); // get the fragment - assert (copied == false); // we should never ever get back a materialized byte[] - System.arraycopy(ref.bytes, ref.offset, b, bOffset + copiedBytes, bulkSize); // copy fragment contents - copiedBytes += bulkSize; // count how much we copied - byteArrayOffset += bulkSize; // advance ByteArray index - } - - pos += copiedBytes; // finally advance our stream position - return copiedBytes; - } - - @Override - public boolean markSupported() { - return true; - } - - @Override - public void mark(int readlimit) { - this.mark = pos; - } - - @Override - public void reset() throws IOException { - pos = mark; - } - - @Override - public void close() throws IOException { - // do nothing - } - - @Override - public int available() throws IOException { - return length - pos; - } - + public long ramBytesUsed() { + return bytearray.ramBytesUsed(); } } diff --git a/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java index 603087a9213..2152aa226a8 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java @@ -28,7 +28,7 @@ import org.elasticsearch.common.util.ByteArray; * An extension to {@link PagedBytesReference} that requires releasing its content. This * class exists to make it explicit when a bytes reference needs to be released, and when not. */ -public class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable { +public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable { public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int length) { super(bigarrays, bytearray, length); diff --git a/core/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java b/core/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java index 3c0579c87e1..53167686736 100644 --- a/core/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java +++ b/core/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java @@ -75,7 +75,7 @@ public final class CopyOnWriteHashMap extends AbstractMap { /** * Abstraction of a node, implemented by both inner and leaf nodes. */ - private static abstract class Node { + private abstract static class Node { /** * Recursively get the key with the given hash. diff --git a/core/src/main/java/org/elasticsearch/common/collect/HppcMaps.java b/core/src/main/java/org/elasticsearch/common/collect/HppcMaps.java index 5dc17bedb39..55fc67831e3 100644 --- a/core/src/main/java/org/elasticsearch/common/collect/HppcMaps.java +++ b/core/src/main/java/org/elasticsearch/common/collect/HppcMaps.java @@ -132,8 +132,8 @@ public final class HppcMaps { }; } - public final static class Object { - public final static class Integer { + public static final class Object { + public static final class Integer { public static ObjectIntHashMap ensureNoNullKeys(int capacity, float loadFactor) { return new ObjectIntHashMap(capacity, loadFactor) { @Override diff --git a/core/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java b/core/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java index 0b53614723b..43e3552909b 100644 --- a/core/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java +++ b/core/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java @@ -132,7 +132,7 @@ public final class ImmutableOpenIntMap implements Iterable implements Iterable implements Iterable implements Iterable extends AbstractComponent implements LifecycleComponent { +public abstract class AbstractLifecycleComponent extends AbstractComponent implements LifecycleComponent { protected final Lifecycle lifecycle = new Lifecycle(); @@ -58,9 +58,9 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im @SuppressWarnings({"unchecked"}) @Override - public T start() { + public void start() { if (!lifecycle.canMoveToStarted()) { - return (T) this; + return; } for (LifecycleListener listener : listeners) { listener.beforeStart(); @@ -70,16 +70,15 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im for (LifecycleListener listener : listeners) { listener.afterStart(); } - return (T) this; } protected abstract void doStart(); @SuppressWarnings({"unchecked"}) @Override - public T stop() { + public void stop() { if (!lifecycle.canMoveToStopped()) { - return (T) this; + return; } for (LifecycleListener listener : listeners) { listener.beforeStop(); @@ -89,7 +88,6 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im for (LifecycleListener listener : listeners) { listener.afterStop(); } - return (T) this; } protected abstract void doStop(); diff --git a/core/src/main/java/org/elasticsearch/common/component/Lifecycle.java b/core/src/main/java/org/elasticsearch/common/component/Lifecycle.java index 479496dd9b6..4f0ef4c6887 100644 --- a/core/src/main/java/org/elasticsearch/common/component/Lifecycle.java +++ b/core/src/main/java/org/elasticsearch/common/component/Lifecycle.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.component; - /** * Lifecycle state. Allows the following transitions: *